vcpu               59 arch/arm/include/asm/kvm_asm.h extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
vcpu               64 arch/arm/include/asm/kvm_asm.h static inline int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) { BUG(); return 0; }
vcpu               66 arch/arm/include/asm/kvm_asm.h extern int __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu);
vcpu               10 arch/arm/include/asm/kvm_coproc.h void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
vcpu               19 arch/arm/include/asm/kvm_coproc.h int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run);
vcpu               20 arch/arm/include/asm/kvm_coproc.h int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
vcpu               21 arch/arm/include/asm/kvm_coproc.h int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
vcpu               22 arch/arm/include/asm/kvm_coproc.h int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
vcpu               23 arch/arm/include/asm/kvm_coproc.h int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
vcpu               24 arch/arm/include/asm/kvm_coproc.h int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
vcpu               25 arch/arm/include/asm/kvm_coproc.h int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
vcpu               27 arch/arm/include/asm/kvm_coproc.h unsigned long kvm_arm_num_guest_msrs(struct kvm_vcpu *vcpu);
vcpu               28 arch/arm/include/asm/kvm_coproc.h int kvm_arm_copy_msrindices(struct kvm_vcpu *vcpu, u64 __user *uindices);
vcpu               32 arch/arm/include/asm/kvm_coproc.h int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
vcpu               33 arch/arm/include/asm/kvm_coproc.h int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
vcpu               34 arch/arm/include/asm/kvm_coproc.h int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
vcpu               35 arch/arm/include/asm/kvm_coproc.h unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
vcpu               37 arch/arm/include/asm/kvm_emulate.h unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
vcpu               39 arch/arm/include/asm/kvm_emulate.h static inline unsigned long *vcpu_reg32(struct kvm_vcpu *vcpu, u8 reg_num)
vcpu               41 arch/arm/include/asm/kvm_emulate.h 	return vcpu_reg(vcpu, reg_num);
vcpu               44 arch/arm/include/asm/kvm_emulate.h unsigned long *__vcpu_spsr(struct kvm_vcpu *vcpu);
vcpu               46 arch/arm/include/asm/kvm_emulate.h static inline unsigned long vpcu_read_spsr(struct kvm_vcpu *vcpu)
vcpu               48 arch/arm/include/asm/kvm_emulate.h 	return *__vcpu_spsr(vcpu);
vcpu               51 arch/arm/include/asm/kvm_emulate.h static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
vcpu               53 arch/arm/include/asm/kvm_emulate.h 	*__vcpu_spsr(vcpu) = v;
vcpu               61 arch/arm/include/asm/kvm_emulate.h static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu,
vcpu               64 arch/arm/include/asm/kvm_emulate.h 	return *vcpu_reg(vcpu, reg_num);
vcpu               67 arch/arm/include/asm/kvm_emulate.h static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
vcpu               70 arch/arm/include/asm/kvm_emulate.h 	*vcpu_reg(vcpu, reg_num) = val;
vcpu               73 arch/arm/include/asm/kvm_emulate.h bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
vcpu               74 arch/arm/include/asm/kvm_emulate.h void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
vcpu               75 arch/arm/include/asm/kvm_emulate.h void kvm_inject_undef32(struct kvm_vcpu *vcpu);
vcpu               76 arch/arm/include/asm/kvm_emulate.h void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
vcpu               77 arch/arm/include/asm/kvm_emulate.h void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
vcpu               78 arch/arm/include/asm/kvm_emulate.h void kvm_inject_vabt(struct kvm_vcpu *vcpu);
vcpu               80 arch/arm/include/asm/kvm_emulate.h static inline void kvm_inject_undefined(struct kvm_vcpu *vcpu)
vcpu               82 arch/arm/include/asm/kvm_emulate.h 	kvm_inject_undef32(vcpu);
vcpu               85 arch/arm/include/asm/kvm_emulate.h static inline void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
vcpu               87 arch/arm/include/asm/kvm_emulate.h 	kvm_inject_dabt32(vcpu, addr);
vcpu               90 arch/arm/include/asm/kvm_emulate.h static inline void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
vcpu               92 arch/arm/include/asm/kvm_emulate.h 	kvm_inject_pabt32(vcpu, addr);
vcpu               95 arch/arm/include/asm/kvm_emulate.h static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
vcpu               97 arch/arm/include/asm/kvm_emulate.h 	return kvm_condition_valid32(vcpu);
vcpu              100 arch/arm/include/asm/kvm_emulate.h static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
vcpu              102 arch/arm/include/asm/kvm_emulate.h 	kvm_skip_instr32(vcpu, is_wide_instr);
vcpu              105 arch/arm/include/asm/kvm_emulate.h static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
vcpu              107 arch/arm/include/asm/kvm_emulate.h 	vcpu->arch.hcr = HCR_GUEST_MASK;
vcpu              110 arch/arm/include/asm/kvm_emulate.h static inline unsigned long *vcpu_hcr(const struct kvm_vcpu *vcpu)
vcpu              112 arch/arm/include/asm/kvm_emulate.h 	return (unsigned long *)&vcpu->arch.hcr;
vcpu              115 arch/arm/include/asm/kvm_emulate.h static inline void vcpu_clear_wfe_traps(struct kvm_vcpu *vcpu)
vcpu              117 arch/arm/include/asm/kvm_emulate.h 	vcpu->arch.hcr &= ~HCR_TWE;
vcpu              120 arch/arm/include/asm/kvm_emulate.h static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu)
vcpu              122 arch/arm/include/asm/kvm_emulate.h 	vcpu->arch.hcr |= HCR_TWE;
vcpu              125 arch/arm/include/asm/kvm_emulate.h static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
vcpu              130 arch/arm/include/asm/kvm_emulate.h static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)
vcpu              132 arch/arm/include/asm/kvm_emulate.h 	return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_pc;
vcpu              135 arch/arm/include/asm/kvm_emulate.h static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
vcpu              137 arch/arm/include/asm/kvm_emulate.h 	return (unsigned long *)&vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr;
vcpu              140 arch/arm/include/asm/kvm_emulate.h static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
vcpu              142 arch/arm/include/asm/kvm_emulate.h 	*vcpu_cpsr(vcpu) |= PSR_T_BIT;
vcpu              145 arch/arm/include/asm/kvm_emulate.h static inline bool mode_has_spsr(struct kvm_vcpu *vcpu)
vcpu              147 arch/arm/include/asm/kvm_emulate.h 	unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK;
vcpu              151 arch/arm/include/asm/kvm_emulate.h static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu)
vcpu              153 arch/arm/include/asm/kvm_emulate.h 	unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK;
vcpu              157 arch/arm/include/asm/kvm_emulate.h static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
vcpu              159 arch/arm/include/asm/kvm_emulate.h 	return vcpu->arch.fault.hsr;
vcpu              162 arch/arm/include/asm/kvm_emulate.h static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
vcpu              164 arch/arm/include/asm/kvm_emulate.h 	u32 hsr = kvm_vcpu_get_hsr(vcpu);
vcpu              172 arch/arm/include/asm/kvm_emulate.h static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu)
vcpu              174 arch/arm/include/asm/kvm_emulate.h 	return vcpu->arch.fault.hxfar;
vcpu              177 arch/arm/include/asm/kvm_emulate.h static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu)
vcpu              179 arch/arm/include/asm/kvm_emulate.h 	return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8;
vcpu              182 arch/arm/include/asm/kvm_emulate.h static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu)
vcpu              184 arch/arm/include/asm/kvm_emulate.h 	return kvm_vcpu_get_hsr(vcpu) & HSR_ISV;
vcpu              187 arch/arm/include/asm/kvm_emulate.h static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu)
vcpu              189 arch/arm/include/asm/kvm_emulate.h 	return kvm_vcpu_get_hsr(vcpu) & HSR_WNR;
vcpu              192 arch/arm/include/asm/kvm_emulate.h static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu)
vcpu              194 arch/arm/include/asm/kvm_emulate.h 	return kvm_vcpu_get_hsr(vcpu) & HSR_SSE;
vcpu              197 arch/arm/include/asm/kvm_emulate.h static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
vcpu              202 arch/arm/include/asm/kvm_emulate.h static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
vcpu              204 arch/arm/include/asm/kvm_emulate.h 	return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
vcpu              207 arch/arm/include/asm/kvm_emulate.h static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu)
vcpu              209 arch/arm/include/asm/kvm_emulate.h 	return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
vcpu              212 arch/arm/include/asm/kvm_emulate.h static inline bool kvm_vcpu_dabt_is_cm(struct kvm_vcpu *vcpu)
vcpu              214 arch/arm/include/asm/kvm_emulate.h 	return !!(kvm_vcpu_get_hsr(vcpu) & HSR_DABT_CM);
vcpu              218 arch/arm/include/asm/kvm_emulate.h static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
vcpu              220 arch/arm/include/asm/kvm_emulate.h 	switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) {
vcpu              234 arch/arm/include/asm/kvm_emulate.h static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu)
vcpu              236 arch/arm/include/asm/kvm_emulate.h 	return kvm_vcpu_get_hsr(vcpu) & HSR_IL;
vcpu              239 arch/arm/include/asm/kvm_emulate.h static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu)
vcpu              241 arch/arm/include/asm/kvm_emulate.h 	return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT;
vcpu              244 arch/arm/include/asm/kvm_emulate.h static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu)
vcpu              246 arch/arm/include/asm/kvm_emulate.h 	return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT;
vcpu              249 arch/arm/include/asm/kvm_emulate.h static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu)
vcpu              251 arch/arm/include/asm/kvm_emulate.h 	return kvm_vcpu_get_hsr(vcpu) & HSR_FSC;
vcpu              254 arch/arm/include/asm/kvm_emulate.h static inline u8 kvm_vcpu_trap_get_fault_type(struct kvm_vcpu *vcpu)
vcpu              256 arch/arm/include/asm/kvm_emulate.h 	return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE;
vcpu              259 arch/arm/include/asm/kvm_emulate.h static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu)
vcpu              261 arch/arm/include/asm/kvm_emulate.h 	switch (kvm_vcpu_trap_get_fault(vcpu)) {
vcpu              278 arch/arm/include/asm/kvm_emulate.h static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
vcpu              280 arch/arm/include/asm/kvm_emulate.h 	if (kvm_vcpu_trap_is_iabt(vcpu))
vcpu              283 arch/arm/include/asm/kvm_emulate.h 	return kvm_vcpu_dabt_iswrite(vcpu);
vcpu              286 arch/arm/include/asm/kvm_emulate.h static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
vcpu              288 arch/arm/include/asm/kvm_emulate.h 	return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
vcpu              291 arch/arm/include/asm/kvm_emulate.h static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
vcpu              293 arch/arm/include/asm/kvm_emulate.h 	return vcpu_cp15(vcpu, c0_MPIDR) & MPIDR_HWID_BITMASK;
vcpu              296 arch/arm/include/asm/kvm_emulate.h static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu)
vcpu              301 arch/arm/include/asm/kvm_emulate.h static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu,
vcpu              306 arch/arm/include/asm/kvm_emulate.h static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
vcpu              308 arch/arm/include/asm/kvm_emulate.h 	*vcpu_cpsr(vcpu) |= PSR_E_BIT;
vcpu              311 arch/arm/include/asm/kvm_emulate.h static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
vcpu              313 arch/arm/include/asm/kvm_emulate.h 	return !!(*vcpu_cpsr(vcpu) & PSR_E_BIT);
vcpu              316 arch/arm/include/asm/kvm_emulate.h static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
vcpu              320 arch/arm/include/asm/kvm_emulate.h 	if (kvm_vcpu_is_be(vcpu)) {
vcpu              341 arch/arm/include/asm/kvm_emulate.h static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
vcpu              345 arch/arm/include/asm/kvm_emulate.h 	if (kvm_vcpu_is_be(vcpu)) {
vcpu              366 arch/arm/include/asm/kvm_emulate.h static inline bool vcpu_has_ptrauth(struct kvm_vcpu *vcpu) { return false; }
vcpu              367 arch/arm/include/asm/kvm_emulate.h static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu) { }
vcpu               46 arch/arm/include/asm/kvm_host.h u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
vcpu               48 arch/arm/include/asm/kvm_host.h int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
vcpu               49 arch/arm/include/asm/kvm_host.h void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
vcpu              227 arch/arm/include/asm/kvm_host.h unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
vcpu              228 arch/arm/include/asm/kvm_host.h int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
vcpu              229 arch/arm/include/asm/kvm_host.h int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
vcpu              230 arch/arm/include/asm/kvm_host.h int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
vcpu              261 arch/arm/include/asm/kvm_host.h int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
vcpu              264 arch/arm/include/asm/kvm_host.h int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
vcpu              272 arch/arm/include/asm/kvm_host.h unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
vcpu              273 arch/arm/include/asm/kvm_host.h int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
vcpu              282 arch/arm/include/asm/kvm_host.h int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
vcpu              283 arch/arm/include/asm/kvm_host.h unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
vcpu              284 arch/arm/include/asm/kvm_host.h int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
vcpu              285 arch/arm/include/asm/kvm_host.h int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
vcpu              287 arch/arm/include/asm/kvm_host.h int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
vcpu              290 arch/arm/include/asm/kvm_host.h static inline void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
vcpu              333 arch/arm/include/asm/kvm_host.h static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
vcpu              334 arch/arm/include/asm/kvm_host.h static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
vcpu              335 arch/arm/include/asm/kvm_host.h static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
vcpu              338 arch/arm/include/asm/kvm_host.h static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {}
vcpu              339 arch/arm/include/asm/kvm_host.h static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {}
vcpu              340 arch/arm/include/asm/kvm_host.h static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {}
vcpu              342 arch/arm/include/asm/kvm_host.h int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
vcpu              344 arch/arm/include/asm/kvm_host.h int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
vcpu              346 arch/arm/include/asm/kvm_host.h int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
vcpu              353 arch/arm/include/asm/kvm_host.h static inline void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) {}
vcpu              354 arch/arm/include/asm/kvm_host.h static inline void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) {}
vcpu              355 arch/arm/include/asm/kvm_host.h static inline void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) {}
vcpu              357 arch/arm/include/asm/kvm_host.h static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
vcpu              358 arch/arm/include/asm/kvm_host.h static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
vcpu              396 arch/arm/include/asm/kvm_host.h static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {}
vcpu              397 arch/arm/include/asm/kvm_host.h static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) {}
vcpu              414 arch/arm/include/asm/kvm_host.h static inline int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature)
vcpu              419 arch/arm/include/asm/kvm_host.h static inline bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
vcpu              424 arch/arm/include/asm/kvm_host.h #define kvm_arm_vcpu_loaded(vcpu)	(false)
vcpu               96 arch/arm/include/asm/kvm_hyp.h void __timer_enable_traps(struct kvm_vcpu *vcpu);
vcpu               97 arch/arm/include/asm/kvm_hyp.h void __timer_disable_traps(struct kvm_vcpu *vcpu);
vcpu               99 arch/arm/include/asm/kvm_hyp.h void __vgic_v2_save_state(struct kvm_vcpu *vcpu);
vcpu              100 arch/arm/include/asm/kvm_hyp.h void __vgic_v2_restore_state(struct kvm_vcpu *vcpu);
vcpu              105 arch/arm/include/asm/kvm_hyp.h void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
vcpu              106 arch/arm/include/asm/kvm_hyp.h void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
vcpu              107 arch/arm/include/asm/kvm_hyp.h void __vgic_v3_activate_traps(struct kvm_vcpu *vcpu);
vcpu              108 arch/arm/include/asm/kvm_hyp.h void __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu);
vcpu              109 arch/arm/include/asm/kvm_hyp.h void __vgic_v3_save_aprs(struct kvm_vcpu *vcpu);
vcpu              110 arch/arm/include/asm/kvm_hyp.h void __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu);
vcpu              122 arch/arm/include/asm/kvm_hyp.h asmlinkage int __guest_enter(struct kvm_vcpu *vcpu,
vcpu               24 arch/arm/include/asm/kvm_mmio.h int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
vcpu               25 arch/arm/include/asm/kvm_mmio.h int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
vcpu               60 arch/arm/include/asm/kvm_mmu.h int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
vcpu               62 arch/arm/include/asm/kvm_mmu.h void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
vcpu              206 arch/arm/include/asm/kvm_mmu.h static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
vcpu              208 arch/arm/include/asm/kvm_mmu.h 	return (vcpu_cp15(vcpu, c1_SCTLR) & 0b101) == 0b101;
vcpu              333 arch/arm/include/asm/kvm_mmu.h void kvm_set_way_flush(struct kvm_vcpu *vcpu);
vcpu              334 arch/arm/include/asm/kvm_mmu.h void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
vcpu               32 arch/arm/kvm/coproc.c static bool write_to_read_only(struct kvm_vcpu *vcpu,
vcpu               37 arch/arm/kvm/coproc.c 	kvm_inject_undefined(vcpu);
vcpu               41 arch/arm/kvm/coproc.c static bool read_from_write_only(struct kvm_vcpu *vcpu,
vcpu               46 arch/arm/kvm/coproc.c 	kvm_inject_undefined(vcpu);
vcpu               62 arch/arm/kvm/coproc.c static inline void vcpu_cp15_reg64_set(struct kvm_vcpu *vcpu,
vcpu               66 arch/arm/kvm/coproc.c 	vcpu_cp15(vcpu, r->reg) = val & 0xffffffff;
vcpu               67 arch/arm/kvm/coproc.c 	vcpu_cp15(vcpu, r->reg + 1) = val >> 32;
vcpu               70 arch/arm/kvm/coproc.c static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu,
vcpu               75 arch/arm/kvm/coproc.c 	val = vcpu_cp15(vcpu, r->reg + 1);
vcpu               77 arch/arm/kvm/coproc.c 	val = val | vcpu_cp15(vcpu, r->reg);
vcpu               81 arch/arm/kvm/coproc.c int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu               83 arch/arm/kvm/coproc.c 	kvm_inject_undefined(vcpu);
vcpu               87 arch/arm/kvm/coproc.c int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu               93 arch/arm/kvm/coproc.c 	kvm_inject_undefined(vcpu);
vcpu               97 arch/arm/kvm/coproc.c int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu               99 arch/arm/kvm/coproc.c 	kvm_inject_undefined(vcpu);
vcpu              103 arch/arm/kvm/coproc.c static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
vcpu              110 arch/arm/kvm/coproc.c 	vcpu_cp15(vcpu, c0_MPIDR) = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) |
vcpu              111 arch/arm/kvm/coproc.c 				     ((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) |
vcpu              112 arch/arm/kvm/coproc.c 				     (vcpu->vcpu_id & 3));
vcpu              116 arch/arm/kvm/coproc.c static bool access_actlr(struct kvm_vcpu *vcpu,
vcpu              121 arch/arm/kvm/coproc.c 		return ignore_write(vcpu, p);
vcpu              123 arch/arm/kvm/coproc.c 	*vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c1_ACTLR);
vcpu              128 arch/arm/kvm/coproc.c static bool access_cbar(struct kvm_vcpu *vcpu,
vcpu              133 arch/arm/kvm/coproc.c 		return write_to_read_only(vcpu, p);
vcpu              134 arch/arm/kvm/coproc.c 	return read_zero(vcpu, p);
vcpu              138 arch/arm/kvm/coproc.c static bool access_l2ctlr(struct kvm_vcpu *vcpu,
vcpu              143 arch/arm/kvm/coproc.c 		return ignore_write(vcpu, p);
vcpu              145 arch/arm/kvm/coproc.c 	*vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c9_L2CTLR);
vcpu              149 arch/arm/kvm/coproc.c static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
vcpu              155 arch/arm/kvm/coproc.c 	ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1;
vcpu              157 arch/arm/kvm/coproc.c 	ncores -= (vcpu->vcpu_id & ~3);
vcpu              162 arch/arm/kvm/coproc.c 	vcpu_cp15(vcpu, c9_L2CTLR) = l2ctlr;
vcpu              165 arch/arm/kvm/coproc.c static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
vcpu              172 arch/arm/kvm/coproc.c 	if (atomic_read(&vcpu->kvm->online_vcpus) > 1)
vcpu              177 arch/arm/kvm/coproc.c 	vcpu_cp15(vcpu, c1_ACTLR) = actlr;
vcpu              184 arch/arm/kvm/coproc.c static bool access_l2ectlr(struct kvm_vcpu *vcpu,
vcpu              189 arch/arm/kvm/coproc.c 		return ignore_write(vcpu, p);
vcpu              191 arch/arm/kvm/coproc.c 	*vcpu_reg(vcpu, p->Rt1) = 0;
vcpu              198 arch/arm/kvm/coproc.c static bool access_dcsw(struct kvm_vcpu *vcpu,
vcpu              203 arch/arm/kvm/coproc.c 		return read_from_write_only(vcpu, p);
vcpu              205 arch/arm/kvm/coproc.c 	kvm_set_way_flush(vcpu);
vcpu              216 arch/arm/kvm/coproc.c bool access_vm_reg(struct kvm_vcpu *vcpu,
vcpu              220 arch/arm/kvm/coproc.c 	bool was_enabled = vcpu_has_cache_enabled(vcpu);
vcpu              224 arch/arm/kvm/coproc.c 	vcpu_cp15(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt1);
vcpu              226 arch/arm/kvm/coproc.c 		vcpu_cp15(vcpu, r->reg + 1) = *vcpu_reg(vcpu, p->Rt2);
vcpu              228 arch/arm/kvm/coproc.c 	kvm_toggle_cache(vcpu, was_enabled);
vcpu              232 arch/arm/kvm/coproc.c static bool access_gic_sgi(struct kvm_vcpu *vcpu,
vcpu              240 arch/arm/kvm/coproc.c 		return read_from_write_only(vcpu, p);
vcpu              242 arch/arm/kvm/coproc.c 	reg = (u64)*vcpu_reg(vcpu, p->Rt2) << 32;
vcpu              243 arch/arm/kvm/coproc.c 	reg |= *vcpu_reg(vcpu, p->Rt1) ;
vcpu              263 arch/arm/kvm/coproc.c 	vgic_v3_dispatch_sgi(vcpu, reg, g1);
vcpu              268 arch/arm/kvm/coproc.c static bool access_gic_sre(struct kvm_vcpu *vcpu,
vcpu              273 arch/arm/kvm/coproc.c 		return ignore_write(vcpu, p);
vcpu              275 arch/arm/kvm/coproc.c 	*vcpu_reg(vcpu, p->Rt1) = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
vcpu              280 arch/arm/kvm/coproc.c static bool access_cntp_tval(struct kvm_vcpu *vcpu,
vcpu              287 arch/arm/kvm/coproc.c 		val = *vcpu_reg(vcpu, p->Rt1);
vcpu              288 arch/arm/kvm/coproc.c 		kvm_arm_timer_write_sysreg(vcpu,
vcpu              291 arch/arm/kvm/coproc.c 		val = kvm_arm_timer_read_sysreg(vcpu,
vcpu              293 arch/arm/kvm/coproc.c 		*vcpu_reg(vcpu, p->Rt1) = val;
vcpu              299 arch/arm/kvm/coproc.c static bool access_cntp_ctl(struct kvm_vcpu *vcpu,
vcpu              306 arch/arm/kvm/coproc.c 		val = *vcpu_reg(vcpu, p->Rt1);
vcpu              307 arch/arm/kvm/coproc.c 		kvm_arm_timer_write_sysreg(vcpu,
vcpu              310 arch/arm/kvm/coproc.c 		val = kvm_arm_timer_read_sysreg(vcpu,
vcpu              312 arch/arm/kvm/coproc.c 		*vcpu_reg(vcpu, p->Rt1) = val;
vcpu              318 arch/arm/kvm/coproc.c static bool access_cntp_cval(struct kvm_vcpu *vcpu,
vcpu              325 arch/arm/kvm/coproc.c 		val = (u64)*vcpu_reg(vcpu, p->Rt2) << 32;
vcpu              326 arch/arm/kvm/coproc.c 		val |= *vcpu_reg(vcpu, p->Rt1);
vcpu              327 arch/arm/kvm/coproc.c 		kvm_arm_timer_write_sysreg(vcpu,
vcpu              330 arch/arm/kvm/coproc.c 		val = kvm_arm_timer_read_sysreg(vcpu,
vcpu              332 arch/arm/kvm/coproc.c 		*vcpu_reg(vcpu, p->Rt1) = val;
vcpu              333 arch/arm/kvm/coproc.c 		*vcpu_reg(vcpu, p->Rt2) = val >> 32;
vcpu              348 arch/arm/kvm/coproc.c static bool trap_raz_wi(struct kvm_vcpu *vcpu,
vcpu              353 arch/arm/kvm/coproc.c 		return ignore_write(vcpu, p);
vcpu              355 arch/arm/kvm/coproc.c 		return read_zero(vcpu, p);
vcpu              572 arch/arm/kvm/coproc.c static int emulate_cp15(struct kvm_vcpu *vcpu,
vcpu              581 arch/arm/kvm/coproc.c 	table = get_target_table(vcpu->arch.target, &num);
vcpu              592 arch/arm/kvm/coproc.c 		if (likely(r->access(vcpu, params, r))) {
vcpu              594 arch/arm/kvm/coproc.c 			kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
vcpu              599 arch/arm/kvm/coproc.c 			*vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
vcpu              601 arch/arm/kvm/coproc.c 		kvm_inject_undefined(vcpu);
vcpu              607 arch/arm/kvm/coproc.c static struct coproc_params decode_64bit_hsr(struct kvm_vcpu *vcpu)
vcpu              611 arch/arm/kvm/coproc.c 	params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
vcpu              612 arch/arm/kvm/coproc.c 	params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
vcpu              613 arch/arm/kvm/coproc.c 	params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
vcpu              616 arch/arm/kvm/coproc.c 	params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 16) & 0xf;
vcpu              618 arch/arm/kvm/coproc.c 	params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
vcpu              629 arch/arm/kvm/coproc.c int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu              631 arch/arm/kvm/coproc.c 	struct coproc_params params = decode_64bit_hsr(vcpu);
vcpu              633 arch/arm/kvm/coproc.c 	return emulate_cp15(vcpu, &params);
vcpu              641 arch/arm/kvm/coproc.c int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu              643 arch/arm/kvm/coproc.c 	struct coproc_params params = decode_64bit_hsr(vcpu);
vcpu              646 arch/arm/kvm/coproc.c 	trap_raz_wi(vcpu, &params, NULL);
vcpu              649 arch/arm/kvm/coproc.c 	kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
vcpu              653 arch/arm/kvm/coproc.c static void reset_coproc_regs(struct kvm_vcpu *vcpu,
vcpu              663 arch/arm/kvm/coproc.c 			table[i].reset(vcpu, &table[i]);
vcpu              672 arch/arm/kvm/coproc.c static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu)
vcpu              676 arch/arm/kvm/coproc.c 	params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
vcpu              677 arch/arm/kvm/coproc.c 	params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
vcpu              678 arch/arm/kvm/coproc.c 	params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
vcpu              681 arch/arm/kvm/coproc.c 	params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
vcpu              682 arch/arm/kvm/coproc.c 	params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 14) & 0x7;
vcpu              683 arch/arm/kvm/coproc.c 	params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7;
vcpu              694 arch/arm/kvm/coproc.c int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu              696 arch/arm/kvm/coproc.c 	struct coproc_params params = decode_32bit_hsr(vcpu);
vcpu              697 arch/arm/kvm/coproc.c 	return emulate_cp15(vcpu, &params);
vcpu              705 arch/arm/kvm/coproc.c int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu              707 arch/arm/kvm/coproc.c 	struct coproc_params params = decode_32bit_hsr(vcpu);
vcpu              710 arch/arm/kvm/coproc.c 	trap_raz_wi(vcpu, &params, NULL);
vcpu              713 arch/arm/kvm/coproc.c 	kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
vcpu              766 arch/arm/kvm/coproc.c static const struct coproc_reg *index_to_coproc_reg(struct kvm_vcpu *vcpu,
vcpu              780 arch/arm/kvm/coproc.c 	table = get_target_table(vcpu->arch.target, &num);
vcpu             1086 arch/arm/kvm/coproc.c static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
vcpu             1099 arch/arm/kvm/coproc.c 		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpregs[vfpid],
vcpu             1109 arch/arm/kvm/coproc.c 		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpexc, id);
vcpu             1111 arch/arm/kvm/coproc.c 		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpscr, id);
vcpu             1113 arch/arm/kvm/coproc.c 		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst, id);
vcpu             1115 arch/arm/kvm/coproc.c 		return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst2, id);
vcpu             1130 arch/arm/kvm/coproc.c static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr)
vcpu             1143 arch/arm/kvm/coproc.c 		return reg_from_user(&vcpu->arch.ctxt.vfp.fpregs[vfpid],
vcpu             1153 arch/arm/kvm/coproc.c 		return reg_from_user(&vcpu->arch.ctxt.vfp.fpexc, uaddr, id);
vcpu             1155 arch/arm/kvm/coproc.c 		return reg_from_user(&vcpu->arch.ctxt.vfp.fpscr, uaddr, id);
vcpu             1157 arch/arm/kvm/coproc.c 		return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst, uaddr, id);
vcpu             1159 arch/arm/kvm/coproc.c 		return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst2, uaddr, id);
vcpu             1194 arch/arm/kvm/coproc.c static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
vcpu             1199 arch/arm/kvm/coproc.c static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr)
vcpu             1205 arch/arm/kvm/coproc.c int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
vcpu             1215 arch/arm/kvm/coproc.c 		return vfp_get_reg(vcpu, reg->id, uaddr);
vcpu             1217 arch/arm/kvm/coproc.c 	r = index_to_coproc_reg(vcpu, reg->id);
vcpu             1225 arch/arm/kvm/coproc.c 		val = vcpu_cp15_reg64_get(vcpu, r);
vcpu             1228 arch/arm/kvm/coproc.c 		ret = reg_to_user(uaddr, &vcpu_cp15(vcpu, r->reg), reg->id);
vcpu             1234 arch/arm/kvm/coproc.c int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
vcpu             1244 arch/arm/kvm/coproc.c 		return vfp_set_reg(vcpu, reg->id, uaddr);
vcpu             1246 arch/arm/kvm/coproc.c 	r = index_to_coproc_reg(vcpu, reg->id);
vcpu             1256 arch/arm/kvm/coproc.c 			vcpu_cp15_reg64_set(vcpu, r, val);
vcpu             1258 arch/arm/kvm/coproc.c 		ret = reg_from_user(&vcpu_cp15(vcpu, r->reg), uaddr, reg->id);
vcpu             1328 arch/arm/kvm/coproc.c static int walk_cp15(struct kvm_vcpu *vcpu, u64 __user *uind)
vcpu             1335 arch/arm/kvm/coproc.c 	i1 = get_target_table(vcpu->arch.target, &num);
vcpu             1370 arch/arm/kvm/coproc.c unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu)
vcpu             1375 arch/arm/kvm/coproc.c 		+ walk_cp15(vcpu, (u64 __user *)NULL);
vcpu             1378 arch/arm/kvm/coproc.c int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
vcpu             1390 arch/arm/kvm/coproc.c 	err = walk_cp15(vcpu, uindices);
vcpu             1440 arch/arm/kvm/coproc.c void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
vcpu             1447 arch/arm/kvm/coproc.c 	reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs), bmap);
vcpu             1449 arch/arm/kvm/coproc.c 	table = get_target_table(vcpu->arch.target, &num);
vcpu             1450 arch/arm/kvm/coproc.c 	reset_coproc_regs(vcpu, table, num, bmap);
vcpu               59 arch/arm/kvm/coproc.h static inline bool ignore_write(struct kvm_vcpu *vcpu,
vcpu               65 arch/arm/kvm/coproc.h static inline bool read_zero(struct kvm_vcpu *vcpu,
vcpu               68 arch/arm/kvm/coproc.h 	*vcpu_reg(vcpu, p->Rt1) = 0;
vcpu               73 arch/arm/kvm/coproc.h static inline void reset_unknown(struct kvm_vcpu *vcpu,
vcpu               77 arch/arm/kvm/coproc.h 	BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.ctxt.cp15));
vcpu               78 arch/arm/kvm/coproc.h 	vcpu_cp15(vcpu, r->reg) = 0xdecafbad;
vcpu               81 arch/arm/kvm/coproc.h static inline void reset_val(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
vcpu               84 arch/arm/kvm/coproc.h 	BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.ctxt.cp15));
vcpu               85 arch/arm/kvm/coproc.h 	vcpu_cp15(vcpu, r->reg) = r->val;
vcpu               88 arch/arm/kvm/coproc.h static inline void reset_unknown64(struct kvm_vcpu *vcpu,
vcpu               92 arch/arm/kvm/coproc.h 	BUG_ON(r->reg + 1 >= ARRAY_SIZE(vcpu->arch.ctxt.cp15));
vcpu               94 arch/arm/kvm/coproc.h 	vcpu_cp15(vcpu, r->reg) = 0xdecafbad;
vcpu               95 arch/arm/kvm/coproc.h 	vcpu_cp15(vcpu, r->reg+1) = 0xd0c0ffee;
vcpu              126 arch/arm/kvm/coproc.h bool access_vm_reg(struct kvm_vcpu *vcpu,
vcpu              101 arch/arm/kvm/emulate.c unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
vcpu              103 arch/arm/kvm/emulate.c 	unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs;
vcpu              104 arch/arm/kvm/emulate.c 	unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
vcpu              133 arch/arm/kvm/emulate.c unsigned long *__vcpu_spsr(struct kvm_vcpu *vcpu)
vcpu              135 arch/arm/kvm/emulate.c 	unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
vcpu              138 arch/arm/kvm/emulate.c 		return &vcpu->arch.ctxt.gp_regs.KVM_ARM_SVC_spsr;
vcpu              140 arch/arm/kvm/emulate.c 		return &vcpu->arch.ctxt.gp_regs.KVM_ARM_ABT_spsr;
vcpu              142 arch/arm/kvm/emulate.c 		return &vcpu->arch.ctxt.gp_regs.KVM_ARM_UND_spsr;
vcpu              144 arch/arm/kvm/emulate.c 		return &vcpu->arch.ctxt.gp_regs.KVM_ARM_IRQ_spsr;
vcpu              146 arch/arm/kvm/emulate.c 		return &vcpu->arch.ctxt.gp_regs.KVM_ARM_FIQ_spsr;
vcpu              163 arch/arm/kvm/emulate.c void kvm_inject_vabt(struct kvm_vcpu *vcpu)
vcpu              165 arch/arm/kvm/emulate.c 	*vcpu_hcr(vcpu) |= HCR_VA;
vcpu               33 arch/arm/kvm/guest.c int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu               43 arch/arm/kvm/guest.c static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
vcpu               46 arch/arm/kvm/guest.c 	struct kvm_regs *regs = &vcpu->arch.ctxt.gp_regs;
vcpu               60 arch/arm/kvm/guest.c static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
vcpu               63 arch/arm/kvm/guest.c 	struct kvm_regs *regs = &vcpu->arch.ctxt.gp_regs;
vcpu               96 arch/arm/kvm/guest.c int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu              101 arch/arm/kvm/guest.c int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu              119 arch/arm/kvm/guest.c static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
vcpu              133 arch/arm/kvm/guest.c static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
vcpu              143 arch/arm/kvm/guest.c 	return kvm_arm_timer_set_reg(vcpu, reg->id, val);
vcpu              146 arch/arm/kvm/guest.c static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
vcpu              151 arch/arm/kvm/guest.c 	val = kvm_arm_timer_get_reg(vcpu, reg->id);
vcpu              165 arch/arm/kvm/guest.c unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
vcpu              167 arch/arm/kvm/guest.c 	return num_core_regs() + kvm_arm_num_coproc_regs(vcpu)
vcpu              168 arch/arm/kvm/guest.c 		+ kvm_arm_get_fw_num_regs(vcpu)
vcpu              177 arch/arm/kvm/guest.c int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
vcpu              189 arch/arm/kvm/guest.c 	ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
vcpu              192 arch/arm/kvm/guest.c 	uindices += kvm_arm_get_fw_num_regs(vcpu);
vcpu              194 arch/arm/kvm/guest.c 	ret = copy_timer_indices(vcpu, uindices);
vcpu              199 arch/arm/kvm/guest.c 	return kvm_arm_copy_coproc_indices(vcpu, uindices);
vcpu              202 arch/arm/kvm/guest.c int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
vcpu              210 arch/arm/kvm/guest.c 		return get_core_reg(vcpu, reg);
vcpu              213 arch/arm/kvm/guest.c 		return kvm_arm_get_fw_reg(vcpu, reg);
vcpu              216 arch/arm/kvm/guest.c 		return get_timer_reg(vcpu, reg);
vcpu              218 arch/arm/kvm/guest.c 	return kvm_arm_coproc_get_reg(vcpu, reg);
vcpu              221 arch/arm/kvm/guest.c int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
vcpu              229 arch/arm/kvm/guest.c 		return set_core_reg(vcpu, reg);
vcpu              232 arch/arm/kvm/guest.c 		return kvm_arm_set_fw_reg(vcpu, reg);
vcpu              235 arch/arm/kvm/guest.c 		return set_timer_reg(vcpu, reg);
vcpu              237 arch/arm/kvm/guest.c 	return kvm_arm_coproc_set_reg(vcpu, reg);
vcpu              240 arch/arm/kvm/guest.c int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
vcpu              246 arch/arm/kvm/guest.c int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
vcpu              253 arch/arm/kvm/guest.c int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
vcpu              256 arch/arm/kvm/guest.c 	events->exception.serror_pending = !!(*vcpu_hcr(vcpu) & HCR_VA);
vcpu              261 arch/arm/kvm/guest.c int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
vcpu              270 arch/arm/kvm/guest.c 		kvm_inject_vabt(vcpu);
vcpu              307 arch/arm/kvm/guest.c int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
vcpu              312 arch/arm/kvm/guest.c int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
vcpu              317 arch/arm/kvm/guest.c int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
vcpu              323 arch/arm/kvm/guest.c int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
vcpu              329 arch/arm/kvm/guest.c int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
vcpu              336 arch/arm/kvm/guest.c 		ret = kvm_arm_timer_set_attr(vcpu, attr);
vcpu              346 arch/arm/kvm/guest.c int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
vcpu              353 arch/arm/kvm/guest.c 		ret = kvm_arm_timer_get_attr(vcpu, attr);
vcpu              363 arch/arm/kvm/guest.c int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
vcpu              370 arch/arm/kvm/guest.c 		ret = kvm_arm_timer_has_attr(vcpu, attr);
vcpu               19 arch/arm/kvm/handle_exit.c static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu               23 arch/arm/kvm/handle_exit.c 	trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
vcpu               24 arch/arm/kvm/handle_exit.c 		      kvm_vcpu_hvc_get_imm(vcpu));
vcpu               25 arch/arm/kvm/handle_exit.c 	vcpu->stat.hvc_exit_stat++;
vcpu               27 arch/arm/kvm/handle_exit.c 	ret = kvm_hvc_call_handler(vcpu);
vcpu               29 arch/arm/kvm/handle_exit.c 		vcpu_set_reg(vcpu, 0, ~0UL);
vcpu               36 arch/arm/kvm/handle_exit.c static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu               46 arch/arm/kvm/handle_exit.c 	vcpu_set_reg(vcpu, 0, ~0UL);
vcpu               47 arch/arm/kvm/handle_exit.c 	kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
vcpu               62 arch/arm/kvm/handle_exit.c static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu               64 arch/arm/kvm/handle_exit.c 	if (kvm_vcpu_get_hsr(vcpu) & HSR_WFI_IS_WFE) {
vcpu               65 arch/arm/kvm/handle_exit.c 		trace_kvm_wfx(*vcpu_pc(vcpu), true);
vcpu               66 arch/arm/kvm/handle_exit.c 		vcpu->stat.wfe_exit_stat++;
vcpu               67 arch/arm/kvm/handle_exit.c 		kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
vcpu               69 arch/arm/kvm/handle_exit.c 		trace_kvm_wfx(*vcpu_pc(vcpu), false);
vcpu               70 arch/arm/kvm/handle_exit.c 		vcpu->stat.wfi_exit_stat++;
vcpu               71 arch/arm/kvm/handle_exit.c 		kvm_vcpu_block(vcpu);
vcpu               72 arch/arm/kvm/handle_exit.c 		kvm_clear_request(KVM_REQ_UNHALT, vcpu);
vcpu               75 arch/arm/kvm/handle_exit.c 	kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
vcpu               80 arch/arm/kvm/handle_exit.c static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu               82 arch/arm/kvm/handle_exit.c 	u32 hsr = kvm_vcpu_get_hsr(vcpu);
vcpu               87 arch/arm/kvm/handle_exit.c 	kvm_inject_undefined(vcpu);
vcpu              107 arch/arm/kvm/handle_exit.c static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
vcpu              109 arch/arm/kvm/handle_exit.c 	u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
vcpu              118 arch/arm/kvm/handle_exit.c int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
vcpu              124 arch/arm/kvm/handle_exit.c 		u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
vcpu              132 arch/arm/kvm/handle_exit.c 			u32 adj =  kvm_vcpu_trap_il_is32bit(vcpu) ? 4 : 2;
vcpu              133 arch/arm/kvm/handle_exit.c 			*vcpu_pc(vcpu) -= adj;
vcpu              136 arch/arm/kvm/handle_exit.c 		kvm_inject_vabt(vcpu);
vcpu              150 arch/arm/kvm/handle_exit.c 		if (!kvm_condition_valid(vcpu)) {
vcpu              151 arch/arm/kvm/handle_exit.c 			kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
vcpu              155 arch/arm/kvm/handle_exit.c 		exit_handler = kvm_get_exit_handler(vcpu);
vcpu              157 arch/arm/kvm/handle_exit.c 		return exit_handler(vcpu, run);
vcpu              159 arch/arm/kvm/handle_exit.c 		kvm_inject_vabt(vcpu);
vcpu               18 arch/arm/kvm/hyp/switch.c static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu, u32 *fpexc_host)
vcpu               36 arch/arm/kvm/hyp/switch.c 	write_sysreg(vcpu->arch.hcr, HCR);
vcpu               46 arch/arm/kvm/hyp/switch.c static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
vcpu               56 arch/arm/kvm/hyp/switch.c 	if (vcpu->arch.hcr & HCR_VA)
vcpu               57 arch/arm/kvm/hyp/switch.c 		vcpu->arch.hcr = read_sysreg(HCR);
vcpu               66 arch/arm/kvm/hyp/switch.c static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
vcpu               68 arch/arm/kvm/hyp/switch.c 	struct kvm *kvm = kern_hyp_va(vcpu->kvm);
vcpu               70 arch/arm/kvm/hyp/switch.c 	write_sysreg(vcpu->arch.midr, VPIDR);
vcpu               73 arch/arm/kvm/hyp/switch.c static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
vcpu               80 arch/arm/kvm/hyp/switch.c static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
vcpu               83 arch/arm/kvm/hyp/switch.c 		__vgic_v3_save_state(vcpu);
vcpu               84 arch/arm/kvm/hyp/switch.c 		__vgic_v3_deactivate_traps(vcpu);
vcpu               88 arch/arm/kvm/hyp/switch.c static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
vcpu               91 arch/arm/kvm/hyp/switch.c 		__vgic_v3_activate_traps(vcpu);
vcpu               92 arch/arm/kvm/hyp/switch.c 		__vgic_v3_restore_state(vcpu);
vcpu               96 arch/arm/kvm/hyp/switch.c static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
vcpu              102 arch/arm/kvm/hyp/switch.c 	vcpu->arch.fault.hsr = hsr;
vcpu              141 arch/arm/kvm/hyp/switch.c 	vcpu->arch.fault.hxfar = far;
vcpu              142 arch/arm/kvm/hyp/switch.c 	vcpu->arch.fault.hpfar = hpfar;
vcpu              146 arch/arm/kvm/hyp/switch.c int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
vcpu              154 arch/arm/kvm/hyp/switch.c 	vcpu = kern_hyp_va(vcpu);
vcpu              155 arch/arm/kvm/hyp/switch.c 	write_sysreg(vcpu, HTPIDR);
vcpu              157 arch/arm/kvm/hyp/switch.c 	host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
vcpu              158 arch/arm/kvm/hyp/switch.c 	guest_ctxt = &vcpu->arch.ctxt;
vcpu              163 arch/arm/kvm/hyp/switch.c 	__activate_traps(vcpu, &fpexc);
vcpu              164 arch/arm/kvm/hyp/switch.c 	__activate_vm(vcpu);
vcpu              166 arch/arm/kvm/hyp/switch.c 	__vgic_restore_state(vcpu);
vcpu              167 arch/arm/kvm/hyp/switch.c 	__timer_enable_traps(vcpu);
vcpu              174 arch/arm/kvm/hyp/switch.c 	exit_code = __guest_enter(vcpu, host_ctxt);
vcpu              177 arch/arm/kvm/hyp/switch.c 	if (exit_code == ARM_EXCEPTION_HVC && !__populate_fault_info(vcpu))
vcpu              184 arch/arm/kvm/hyp/switch.c 	__timer_disable_traps(vcpu);
vcpu              186 arch/arm/kvm/hyp/switch.c 	__vgic_save_state(vcpu);
vcpu              188 arch/arm/kvm/hyp/switch.c 	__deactivate_traps(vcpu);
vcpu              189 arch/arm/kvm/hyp/switch.c 	__deactivate_vm(vcpu);
vcpu              226 arch/arm/kvm/hyp/switch.c 		struct kvm_vcpu *vcpu;
vcpu              229 arch/arm/kvm/hyp/switch.c 		vcpu = (struct kvm_vcpu *)read_sysreg(HTPIDR);
vcpu              230 arch/arm/kvm/hyp/switch.c 		host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
vcpu              231 arch/arm/kvm/hyp/switch.c 		__timer_disable_traps(vcpu);
vcpu              232 arch/arm/kvm/hyp/switch.c 		__deactivate_traps(vcpu);
vcpu              233 arch/arm/kvm/hyp/switch.c 		__deactivate_vm(vcpu);
vcpu               48 arch/arm/kvm/hyp/tlb.c void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
vcpu               50 arch/arm/kvm/hyp/tlb.c 	struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
vcpu               41 arch/arm/kvm/reset.c int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
vcpu               45 arch/arm/kvm/reset.c 	switch (vcpu->arch.target) {
vcpu               49 arch/arm/kvm/reset.c 		vcpu->arch.midr = read_cpuid_id();
vcpu               56 arch/arm/kvm/reset.c 	memcpy(&vcpu->arch.ctxt.gp_regs, reset_regs, sizeof(vcpu->arch.ctxt.gp_regs));
vcpu               59 arch/arm/kvm/reset.c 	kvm_reset_coprocs(vcpu);
vcpu               65 arch/arm/kvm/reset.c 	if (READ_ONCE(vcpu->arch.reset_state.reset)) {
vcpu               66 arch/arm/kvm/reset.c 		unsigned long target_pc = vcpu->arch.reset_state.pc;
vcpu               71 arch/arm/kvm/reset.c 			vcpu_set_thumb(vcpu);
vcpu               75 arch/arm/kvm/reset.c 		if (vcpu->arch.reset_state.be)
vcpu               76 arch/arm/kvm/reset.c 			kvm_vcpu_set_be(vcpu);
vcpu               78 arch/arm/kvm/reset.c 		*vcpu_pc(vcpu) = target_pc;
vcpu               79 arch/arm/kvm/reset.c 		vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
vcpu               81 arch/arm/kvm/reset.c 		vcpu->arch.reset_state.reset = false;
vcpu               85 arch/arm/kvm/reset.c 	return kvm_timer_vcpu_reset(vcpu);
vcpu               11 arch/arm/kvm/vgic-v3-coproc.c int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, bool is_write, u64 id,
vcpu               20 arch/arm/kvm/vgic-v3-coproc.c int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu, bool is_write, u64 id,
vcpu               63 arch/arm64/include/asm/kvm_asm.h extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
vcpu               67 arch/arm64/include/asm/kvm_asm.h extern int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu);
vcpu               69 arch/arm64/include/asm/kvm_asm.h extern int __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu);
vcpu              110 arch/arm64/include/asm/kvm_asm.h .macro get_vcpu_ptr vcpu, ctxt
vcpu               15 arch/arm64/include/asm/kvm_coproc.h void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
vcpu               30 arch/arm64/include/asm/kvm_coproc.h int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
vcpu               31 arch/arm64/include/asm/kvm_coproc.h int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
vcpu               32 arch/arm64/include/asm/kvm_coproc.h int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
vcpu               33 arch/arm64/include/asm/kvm_coproc.h int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
vcpu               34 arch/arm64/include/asm/kvm_coproc.h int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
vcpu               35 arch/arm64/include/asm/kvm_coproc.h int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run);
vcpu               41 arch/arm64/include/asm/kvm_coproc.h int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
vcpu               42 arch/arm64/include/asm/kvm_coproc.h int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
vcpu               43 arch/arm64/include/asm/kvm_coproc.h int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
vcpu               44 arch/arm64/include/asm/kvm_coproc.h unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu);
vcpu               25 arch/arm64/include/asm/kvm_emulate.h unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
vcpu               26 arch/arm64/include/asm/kvm_emulate.h unsigned long vcpu_read_spsr32(const struct kvm_vcpu *vcpu);
vcpu               27 arch/arm64/include/asm/kvm_emulate.h void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v);
vcpu               29 arch/arm64/include/asm/kvm_emulate.h bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
vcpu               30 arch/arm64/include/asm/kvm_emulate.h void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
vcpu               32 arch/arm64/include/asm/kvm_emulate.h void kvm_inject_undefined(struct kvm_vcpu *vcpu);
vcpu               33 arch/arm64/include/asm/kvm_emulate.h void kvm_inject_vabt(struct kvm_vcpu *vcpu);
vcpu               34 arch/arm64/include/asm/kvm_emulate.h void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
vcpu               35 arch/arm64/include/asm/kvm_emulate.h void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
vcpu               36 arch/arm64/include/asm/kvm_emulate.h void kvm_inject_undef32(struct kvm_vcpu *vcpu);
vcpu               37 arch/arm64/include/asm/kvm_emulate.h void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
vcpu               38 arch/arm64/include/asm/kvm_emulate.h void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
vcpu               40 arch/arm64/include/asm/kvm_emulate.h static inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
vcpu               42 arch/arm64/include/asm/kvm_emulate.h 	return !(vcpu->arch.hcr_el2 & HCR_RW);
vcpu               45 arch/arm64/include/asm/kvm_emulate.h static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
vcpu               47 arch/arm64/include/asm/kvm_emulate.h 	vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
vcpu               49 arch/arm64/include/asm/kvm_emulate.h 		vcpu->arch.hcr_el2 |= HCR_E2H;
vcpu               52 arch/arm64/include/asm/kvm_emulate.h 		vcpu->arch.hcr_el2 |= HCR_TEA;
vcpu               54 arch/arm64/include/asm/kvm_emulate.h 		vcpu->arch.hcr_el2 |= HCR_TERR;
vcpu               57 arch/arm64/include/asm/kvm_emulate.h 		vcpu->arch.hcr_el2 |= HCR_FWB;
vcpu               59 arch/arm64/include/asm/kvm_emulate.h 	if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
vcpu               60 arch/arm64/include/asm/kvm_emulate.h 		vcpu->arch.hcr_el2 &= ~HCR_RW;
vcpu               67 arch/arm64/include/asm/kvm_emulate.h 	if (!vcpu_el1_is_32bit(vcpu))
vcpu               68 arch/arm64/include/asm/kvm_emulate.h 		vcpu->arch.hcr_el2 |= HCR_TID3;
vcpu               71 arch/arm64/include/asm/kvm_emulate.h 	    vcpu_el1_is_32bit(vcpu))
vcpu               72 arch/arm64/include/asm/kvm_emulate.h 		vcpu->arch.hcr_el2 |= HCR_TID2;
vcpu               75 arch/arm64/include/asm/kvm_emulate.h static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
vcpu               77 arch/arm64/include/asm/kvm_emulate.h 	return (unsigned long *)&vcpu->arch.hcr_el2;
vcpu               80 arch/arm64/include/asm/kvm_emulate.h static inline void vcpu_clear_wfe_traps(struct kvm_vcpu *vcpu)
vcpu               82 arch/arm64/include/asm/kvm_emulate.h 	vcpu->arch.hcr_el2 &= ~HCR_TWE;
vcpu               85 arch/arm64/include/asm/kvm_emulate.h static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu)
vcpu               87 arch/arm64/include/asm/kvm_emulate.h 	vcpu->arch.hcr_el2 |= HCR_TWE;
vcpu               90 arch/arm64/include/asm/kvm_emulate.h static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
vcpu               92 arch/arm64/include/asm/kvm_emulate.h 	vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
vcpu               95 arch/arm64/include/asm/kvm_emulate.h static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
vcpu               97 arch/arm64/include/asm/kvm_emulate.h 	vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
vcpu              100 arch/arm64/include/asm/kvm_emulate.h static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
vcpu              102 arch/arm64/include/asm/kvm_emulate.h 	return vcpu->arch.vsesr_el2;
vcpu              105 arch/arm64/include/asm/kvm_emulate.h static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
vcpu              107 arch/arm64/include/asm/kvm_emulate.h 	vcpu->arch.vsesr_el2 = vsesr;
vcpu              110 arch/arm64/include/asm/kvm_emulate.h static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
vcpu              112 arch/arm64/include/asm/kvm_emulate.h 	return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
vcpu              115 arch/arm64/include/asm/kvm_emulate.h static inline unsigned long *__vcpu_elr_el1(const struct kvm_vcpu *vcpu)
vcpu              117 arch/arm64/include/asm/kvm_emulate.h 	return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1;
vcpu              120 arch/arm64/include/asm/kvm_emulate.h static inline unsigned long vcpu_read_elr_el1(const struct kvm_vcpu *vcpu)
vcpu              122 arch/arm64/include/asm/kvm_emulate.h 	if (vcpu->arch.sysregs_loaded_on_cpu)
vcpu              125 arch/arm64/include/asm/kvm_emulate.h 		return *__vcpu_elr_el1(vcpu);
vcpu              128 arch/arm64/include/asm/kvm_emulate.h static inline void vcpu_write_elr_el1(const struct kvm_vcpu *vcpu, unsigned long v)
vcpu              130 arch/arm64/include/asm/kvm_emulate.h 	if (vcpu->arch.sysregs_loaded_on_cpu)
vcpu              133 arch/arm64/include/asm/kvm_emulate.h 		*__vcpu_elr_el1(vcpu) = v;
vcpu              136 arch/arm64/include/asm/kvm_emulate.h static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
vcpu              138 arch/arm64/include/asm/kvm_emulate.h 	return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate;
vcpu              141 arch/arm64/include/asm/kvm_emulate.h static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
vcpu              143 arch/arm64/include/asm/kvm_emulate.h 	return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
vcpu              146 arch/arm64/include/asm/kvm_emulate.h static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
vcpu              148 arch/arm64/include/asm/kvm_emulate.h 	if (vcpu_mode_is_32bit(vcpu))
vcpu              149 arch/arm64/include/asm/kvm_emulate.h 		return kvm_condition_valid32(vcpu);
vcpu              154 arch/arm64/include/asm/kvm_emulate.h static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
vcpu              156 arch/arm64/include/asm/kvm_emulate.h 	*vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
vcpu              164 arch/arm64/include/asm/kvm_emulate.h static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
vcpu              167 arch/arm64/include/asm/kvm_emulate.h 	return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
vcpu              170 arch/arm64/include/asm/kvm_emulate.h static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
vcpu              174 arch/arm64/include/asm/kvm_emulate.h 		vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val;
vcpu              177 arch/arm64/include/asm/kvm_emulate.h static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu)
vcpu              179 arch/arm64/include/asm/kvm_emulate.h 	if (vcpu_mode_is_32bit(vcpu))
vcpu              180 arch/arm64/include/asm/kvm_emulate.h 		return vcpu_read_spsr32(vcpu);
vcpu              182 arch/arm64/include/asm/kvm_emulate.h 	if (vcpu->arch.sysregs_loaded_on_cpu)
vcpu              185 arch/arm64/include/asm/kvm_emulate.h 		return vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1];
vcpu              188 arch/arm64/include/asm/kvm_emulate.h static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
vcpu              190 arch/arm64/include/asm/kvm_emulate.h 	if (vcpu_mode_is_32bit(vcpu)) {
vcpu              191 arch/arm64/include/asm/kvm_emulate.h 		vcpu_write_spsr32(vcpu, v);
vcpu              195 arch/arm64/include/asm/kvm_emulate.h 	if (vcpu->arch.sysregs_loaded_on_cpu)
vcpu              198 arch/arm64/include/asm/kvm_emulate.h 		vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1] = v;
vcpu              233 arch/arm64/include/asm/kvm_emulate.h static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
vcpu              237 arch/arm64/include/asm/kvm_emulate.h 	if (vcpu_mode_is_32bit(vcpu)) {
vcpu              238 arch/arm64/include/asm/kvm_emulate.h 		mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
vcpu              242 arch/arm64/include/asm/kvm_emulate.h 	mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
vcpu              247 arch/arm64/include/asm/kvm_emulate.h static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
vcpu              249 arch/arm64/include/asm/kvm_emulate.h 	return vcpu->arch.fault.esr_el2;
vcpu              252 arch/arm64/include/asm/kvm_emulate.h static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
vcpu              254 arch/arm64/include/asm/kvm_emulate.h 	u32 esr = kvm_vcpu_get_hsr(vcpu);
vcpu              262 arch/arm64/include/asm/kvm_emulate.h static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
vcpu              264 arch/arm64/include/asm/kvm_emulate.h 	return vcpu->arch.fault.far_el2;
vcpu              267 arch/arm64/include/asm/kvm_emulate.h static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
vcpu              269 arch/arm64/include/asm/kvm_emulate.h 	return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
vcpu              272 arch/arm64/include/asm/kvm_emulate.h static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
vcpu              274 arch/arm64/include/asm/kvm_emulate.h 	return vcpu->arch.fault.disr_el1;
vcpu              277 arch/arm64/include/asm/kvm_emulate.h static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
vcpu              279 arch/arm64/include/asm/kvm_emulate.h 	return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
vcpu              282 arch/arm64/include/asm/kvm_emulate.h static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
vcpu              284 arch/arm64/include/asm/kvm_emulate.h 	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
vcpu              287 arch/arm64/include/asm/kvm_emulate.h static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
vcpu              289 arch/arm64/include/asm/kvm_emulate.h 	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
vcpu              292 arch/arm64/include/asm/kvm_emulate.h static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
vcpu              294 arch/arm64/include/asm/kvm_emulate.h 	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SF);
vcpu              297 arch/arm64/include/asm/kvm_emulate.h static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
vcpu              299 arch/arm64/include/asm/kvm_emulate.h 	return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
vcpu              302 arch/arm64/include/asm/kvm_emulate.h static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
vcpu              304 arch/arm64/include/asm/kvm_emulate.h 	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
vcpu              307 arch/arm64/include/asm/kvm_emulate.h static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
vcpu              309 arch/arm64/include/asm/kvm_emulate.h 	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
vcpu              310 arch/arm64/include/asm/kvm_emulate.h 		kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
vcpu              313 arch/arm64/include/asm/kvm_emulate.h static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
vcpu              315 arch/arm64/include/asm/kvm_emulate.h 	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
vcpu              318 arch/arm64/include/asm/kvm_emulate.h static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
vcpu              320 arch/arm64/include/asm/kvm_emulate.h 	return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
vcpu              324 arch/arm64/include/asm/kvm_emulate.h static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
vcpu              326 arch/arm64/include/asm/kvm_emulate.h 	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL);
vcpu              329 arch/arm64/include/asm/kvm_emulate.h static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
vcpu              331 arch/arm64/include/asm/kvm_emulate.h 	return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
vcpu              334 arch/arm64/include/asm/kvm_emulate.h static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
vcpu              336 arch/arm64/include/asm/kvm_emulate.h 	return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
vcpu              339 arch/arm64/include/asm/kvm_emulate.h static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
vcpu              341 arch/arm64/include/asm/kvm_emulate.h 	return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
vcpu              344 arch/arm64/include/asm/kvm_emulate.h static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
vcpu              346 arch/arm64/include/asm/kvm_emulate.h 	return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
vcpu              349 arch/arm64/include/asm/kvm_emulate.h static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
vcpu              351 arch/arm64/include/asm/kvm_emulate.h 	switch (kvm_vcpu_trap_get_fault(vcpu)) {
vcpu              368 arch/arm64/include/asm/kvm_emulate.h static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
vcpu              370 arch/arm64/include/asm/kvm_emulate.h 	u32 esr = kvm_vcpu_get_hsr(vcpu);
vcpu              374 arch/arm64/include/asm/kvm_emulate.h static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
vcpu              376 arch/arm64/include/asm/kvm_emulate.h 	if (kvm_vcpu_trap_is_iabt(vcpu))
vcpu              379 arch/arm64/include/asm/kvm_emulate.h 	return kvm_vcpu_dabt_iswrite(vcpu);
vcpu              382 arch/arm64/include/asm/kvm_emulate.h static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
vcpu              384 arch/arm64/include/asm/kvm_emulate.h 	return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
vcpu              387 arch/arm64/include/asm/kvm_emulate.h static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu)
vcpu              389 arch/arm64/include/asm/kvm_emulate.h 	return vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG;
vcpu              392 arch/arm64/include/asm/kvm_emulate.h static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu,
vcpu              396 arch/arm64/include/asm/kvm_emulate.h 		vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
vcpu              398 arch/arm64/include/asm/kvm_emulate.h 		vcpu->arch.workaround_flags &= ~VCPU_WORKAROUND_2_FLAG;
vcpu              401 arch/arm64/include/asm/kvm_emulate.h static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
vcpu              403 arch/arm64/include/asm/kvm_emulate.h 	if (vcpu_mode_is_32bit(vcpu)) {
vcpu              404 arch/arm64/include/asm/kvm_emulate.h 		*vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
vcpu              406 arch/arm64/include/asm/kvm_emulate.h 		u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
vcpu              408 arch/arm64/include/asm/kvm_emulate.h 		vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
vcpu              412 arch/arm64/include/asm/kvm_emulate.h static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
vcpu              414 arch/arm64/include/asm/kvm_emulate.h 	if (vcpu_mode_is_32bit(vcpu))
vcpu              415 arch/arm64/include/asm/kvm_emulate.h 		return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
vcpu              417 arch/arm64/include/asm/kvm_emulate.h 	return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
vcpu              420 arch/arm64/include/asm/kvm_emulate.h static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
vcpu              424 arch/arm64/include/asm/kvm_emulate.h 	if (kvm_vcpu_is_be(vcpu)) {
vcpu              451 arch/arm64/include/asm/kvm_emulate.h static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
vcpu              455 arch/arm64/include/asm/kvm_emulate.h 	if (kvm_vcpu_is_be(vcpu)) {
vcpu              482 arch/arm64/include/asm/kvm_emulate.h static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
vcpu              484 arch/arm64/include/asm/kvm_emulate.h 	if (vcpu_mode_is_32bit(vcpu))
vcpu              485 arch/arm64/include/asm/kvm_emulate.h 		kvm_skip_instr32(vcpu, is_wide_instr);
vcpu              487 arch/arm64/include/asm/kvm_emulate.h 		*vcpu_pc(vcpu) += 4;
vcpu              490 arch/arm64/include/asm/kvm_emulate.h 	*vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
vcpu              497 arch/arm64/include/asm/kvm_emulate.h static inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu)
vcpu              499 arch/arm64/include/asm/kvm_emulate.h 	*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
vcpu              500 arch/arm64/include/asm/kvm_emulate.h 	vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR);
vcpu              502 arch/arm64/include/asm/kvm_emulate.h 	kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
vcpu              504 arch/arm64/include/asm/kvm_emulate.h 	write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, SYS_SPSR);
vcpu              505 arch/arm64/include/asm/kvm_emulate.h 	write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
vcpu               54 arch/arm64/include/asm/kvm_host.h int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
vcpu               55 arch/arm64/include/asm/kvm_host.h void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
vcpu              344 arch/arm64/include/asm/kvm_host.h #define vcpu_sve_pffr(vcpu) ((void *)((char *)((vcpu)->arch.sve_state) + \
vcpu              345 arch/arm64/include/asm/kvm_host.h 				      sve_ffr_offset((vcpu)->arch.sve_max_vl)))
vcpu              347 arch/arm64/include/asm/kvm_host.h #define vcpu_sve_state_size(vcpu) ({					\
vcpu              351 arch/arm64/include/asm/kvm_host.h 	if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) {		\
vcpu              354 arch/arm64/include/asm/kvm_host.h 		__vcpu_vq = sve_vq_from_vl((vcpu)->arch.sve_max_vl);	\
vcpu              371 arch/arm64/include/asm/kvm_host.h #define vcpu_has_sve(vcpu) (system_supports_sve() && \
vcpu              372 arch/arm64/include/asm/kvm_host.h 			    ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE))
vcpu              374 arch/arm64/include/asm/kvm_host.h #define vcpu_has_ptrauth(vcpu)	((system_supports_address_auth() || \
vcpu              376 arch/arm64/include/asm/kvm_host.h 				 ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH))
vcpu              388 arch/arm64/include/asm/kvm_host.h u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
vcpu              389 arch/arm64/include/asm/kvm_host.h void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
vcpu              418 arch/arm64/include/asm/kvm_host.h unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
vcpu              419 arch/arm64/include/asm/kvm_host.h int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
vcpu              420 arch/arm64/include/asm/kvm_host.h int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
vcpu              421 arch/arm64/include/asm/kvm_host.h int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
vcpu              422 arch/arm64/include/asm/kvm_host.h int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
vcpu              425 arch/arm64/include/asm/kvm_host.h int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
vcpu              475 arch/arm64/include/asm/kvm_host.h int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
vcpu              477 arch/arm64/include/asm/kvm_host.h void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
vcpu              483 arch/arm64/include/asm/kvm_host.h void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
vcpu              545 arch/arm64/include/asm/kvm_host.h void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);
vcpu              549 arch/arm64/include/asm/kvm_host.h static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
vcpu              550 arch/arm64/include/asm/kvm_host.h static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
vcpu              553 arch/arm64/include/asm/kvm_host.h void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
vcpu              554 arch/arm64/include/asm/kvm_host.h void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
vcpu              555 arch/arm64/include/asm/kvm_host.h void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
vcpu              556 arch/arm64/include/asm/kvm_host.h int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
vcpu              558 arch/arm64/include/asm/kvm_host.h int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
vcpu              560 arch/arm64/include/asm/kvm_host.h int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
vcpu              566 arch/arm64/include/asm/kvm_host.h int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
vcpu              567 arch/arm64/include/asm/kvm_host.h void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
vcpu              568 arch/arm64/include/asm/kvm_host.h void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
vcpu              569 arch/arm64/include/asm/kvm_host.h void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
vcpu              577 arch/arm64/include/asm/kvm_host.h static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
vcpu              579 arch/arm64/include/asm/kvm_host.h 	return kvm_arch_vcpu_run_map_fp(vcpu);
vcpu              585 arch/arm64/include/asm/kvm_host.h void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
vcpu              586 arch/arm64/include/asm/kvm_host.h void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
vcpu              665 arch/arm64/include/asm/kvm_host.h void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu);
vcpu              666 arch/arm64/include/asm/kvm_host.h void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu);
vcpu              676 arch/arm64/include/asm/kvm_host.h int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
vcpu              677 arch/arm64/include/asm/kvm_host.h bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
vcpu              679 arch/arm64/include/asm/kvm_host.h #define kvm_arm_vcpu_sve_finalized(vcpu) \
vcpu              680 arch/arm64/include/asm/kvm_host.h 	((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
vcpu              682 arch/arm64/include/asm/kvm_host.h #define kvm_arm_vcpu_loaded(vcpu)	((vcpu)->arch.sysregs_loaded_on_cpu)
vcpu               50 arch/arm64/include/asm/kvm_hyp.h int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
vcpu               52 arch/arm64/include/asm/kvm_hyp.h void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
vcpu               53 arch/arm64/include/asm/kvm_hyp.h void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
vcpu               54 arch/arm64/include/asm/kvm_hyp.h void __vgic_v3_activate_traps(struct kvm_vcpu *vcpu);
vcpu               55 arch/arm64/include/asm/kvm_hyp.h void __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu);
vcpu               56 arch/arm64/include/asm/kvm_hyp.h void __vgic_v3_save_aprs(struct kvm_vcpu *vcpu);
vcpu               57 arch/arm64/include/asm/kvm_hyp.h void __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu);
vcpu               58 arch/arm64/include/asm/kvm_hyp.h int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);
vcpu               60 arch/arm64/include/asm/kvm_hyp.h void __timer_enable_traps(struct kvm_vcpu *vcpu);
vcpu               61 arch/arm64/include/asm/kvm_hyp.h void __timer_disable_traps(struct kvm_vcpu *vcpu);
vcpu               69 arch/arm64/include/asm/kvm_hyp.h void __sysreg32_save_state(struct kvm_vcpu *vcpu);
vcpu               70 arch/arm64/include/asm/kvm_hyp.h void __sysreg32_restore_state(struct kvm_vcpu *vcpu);
vcpu               72 arch/arm64/include/asm/kvm_hyp.h void __debug_switch_to_guest(struct kvm_vcpu *vcpu);
vcpu               73 arch/arm64/include/asm/kvm_hyp.h void __debug_switch_to_host(struct kvm_vcpu *vcpu);
vcpu               78 arch/arm64/include/asm/kvm_hyp.h void activate_traps_vhe_load(struct kvm_vcpu *vcpu);
vcpu               81 arch/arm64/include/asm/kvm_hyp.h u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt);
vcpu               23 arch/arm64/include/asm/kvm_mmio.h int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
vcpu               24 arch/arm64/include/asm/kvm_mmio.h int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
vcpu              161 arch/arm64/include/asm/kvm_mmu.h int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
vcpu              163 arch/arm64/include/asm/kvm_mmu.h void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
vcpu              305 arch/arm64/include/asm/kvm_mmu.h static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
vcpu              307 arch/arm64/include/asm/kvm_mmu.h 	return (vcpu_read_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
vcpu              367 arch/arm64/include/asm/kvm_mmu.h void kvm_set_way_flush(struct kvm_vcpu *vcpu);
vcpu              368 arch/arm64/include/asm/kvm_mmu.h void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
vcpu               36 arch/arm64/kvm/debug.c static void save_guest_debug_regs(struct kvm_vcpu *vcpu)
vcpu               38 arch/arm64/kvm/debug.c 	u64 val = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
vcpu               40 arch/arm64/kvm/debug.c 	vcpu->arch.guest_debug_preserved.mdscr_el1 = val;
vcpu               43 arch/arm64/kvm/debug.c 				vcpu->arch.guest_debug_preserved.mdscr_el1);
vcpu               46 arch/arm64/kvm/debug.c static void restore_guest_debug_regs(struct kvm_vcpu *vcpu)
vcpu               48 arch/arm64/kvm/debug.c 	u64 val = vcpu->arch.guest_debug_preserved.mdscr_el1;
vcpu               50 arch/arm64/kvm/debug.c 	vcpu_write_sys_reg(vcpu, val, MDSCR_EL1);
vcpu               53 arch/arm64/kvm/debug.c 				vcpu_read_sys_reg(vcpu, MDSCR_EL1));
vcpu               75 arch/arm64/kvm/debug.c void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
vcpu               77 arch/arm64/kvm/debug.c 	vcpu->arch.debug_ptr = &vcpu->arch.vcpu_debug_state;
vcpu              101 arch/arm64/kvm/debug.c void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
vcpu              103 arch/arm64/kvm/debug.c 	bool trap_debug = !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY);
vcpu              104 arch/arm64/kvm/debug.c 	unsigned long mdscr, orig_mdcr_el2 = vcpu->arch.mdcr_el2;
vcpu              106 arch/arm64/kvm/debug.c 	trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug);
vcpu              112 arch/arm64/kvm/debug.c 	vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
vcpu              113 arch/arm64/kvm/debug.c 	vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
vcpu              120 arch/arm64/kvm/debug.c 	if (vcpu->guest_debug) {
vcpu              122 arch/arm64/kvm/debug.c 		vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
vcpu              125 arch/arm64/kvm/debug.c 		save_guest_debug_regs(vcpu);
vcpu              147 arch/arm64/kvm/debug.c 		if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
vcpu              148 arch/arm64/kvm/debug.c 			*vcpu_cpsr(vcpu) |=  DBG_SPSR_SS;
vcpu              149 arch/arm64/kvm/debug.c 			mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
vcpu              151 arch/arm64/kvm/debug.c 			vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
vcpu              153 arch/arm64/kvm/debug.c 			mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
vcpu              155 arch/arm64/kvm/debug.c 			vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
vcpu              158 arch/arm64/kvm/debug.c 		trace_kvm_arm_set_dreg32("SPSR_EL2", *vcpu_cpsr(vcpu));
vcpu              169 arch/arm64/kvm/debug.c 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
vcpu              171 arch/arm64/kvm/debug.c 			mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
vcpu              173 arch/arm64/kvm/debug.c 			vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
vcpu              175 arch/arm64/kvm/debug.c 			vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state;
vcpu              176 arch/arm64/kvm/debug.c 			vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
vcpu              180 arch/arm64/kvm/debug.c 						&vcpu->arch.debug_ptr->dbg_bcr[0],
vcpu              181 arch/arm64/kvm/debug.c 						&vcpu->arch.debug_ptr->dbg_bvr[0]);
vcpu              184 arch/arm64/kvm/debug.c 						&vcpu->arch.debug_ptr->dbg_wcr[0],
vcpu              185 arch/arm64/kvm/debug.c 						&vcpu->arch.debug_ptr->dbg_wvr[0]);
vcpu              189 arch/arm64/kvm/debug.c 	BUG_ON(!vcpu->guest_debug &&
vcpu              190 arch/arm64/kvm/debug.c 		vcpu->arch.debug_ptr != &vcpu->arch.vcpu_debug_state);
vcpu              194 arch/arm64/kvm/debug.c 		vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
vcpu              197 arch/arm64/kvm/debug.c 	if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE))
vcpu              198 arch/arm64/kvm/debug.c 		vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
vcpu              201 arch/arm64/kvm/debug.c 	if (has_vhe() && orig_mdcr_el2 != vcpu->arch.mdcr_el2)
vcpu              202 arch/arm64/kvm/debug.c 		write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
vcpu              204 arch/arm64/kvm/debug.c 	trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
vcpu              205 arch/arm64/kvm/debug.c 	trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_read_sys_reg(vcpu, MDSCR_EL1));
vcpu              208 arch/arm64/kvm/debug.c void kvm_arm_clear_debug(struct kvm_vcpu *vcpu)
vcpu              210 arch/arm64/kvm/debug.c 	trace_kvm_arm_clear_debug(vcpu->guest_debug);
vcpu              212 arch/arm64/kvm/debug.c 	if (vcpu->guest_debug) {
vcpu              213 arch/arm64/kvm/debug.c 		restore_guest_debug_regs(vcpu);
vcpu              219 arch/arm64/kvm/debug.c 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
vcpu              220 arch/arm64/kvm/debug.c 			kvm_arm_reset_debug_ptr(vcpu);
vcpu              223 arch/arm64/kvm/debug.c 						&vcpu->arch.debug_ptr->dbg_bcr[0],
vcpu              224 arch/arm64/kvm/debug.c 						&vcpu->arch.debug_ptr->dbg_bvr[0]);
vcpu              227 arch/arm64/kvm/debug.c 						&vcpu->arch.debug_ptr->dbg_wcr[0],
vcpu              228 arch/arm64/kvm/debug.c 						&vcpu->arch.debug_ptr->dbg_wvr[0]);
vcpu               27 arch/arm64/kvm/fpsimd.c int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu)
vcpu               46 arch/arm64/kvm/fpsimd.c 	vcpu->arch.host_thread_info = kern_hyp_va(ti);
vcpu               47 arch/arm64/kvm/fpsimd.c 	vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd);
vcpu               62 arch/arm64/kvm/fpsimd.c void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
vcpu               66 arch/arm64/kvm/fpsimd.c 	vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
vcpu               69 arch/arm64/kvm/fpsimd.c 	vcpu->arch.flags |= KVM_ARM64_FP_HOST;
vcpu               72 arch/arm64/kvm/fpsimd.c 		vcpu->arch.flags |= KVM_ARM64_HOST_SVE_IN_USE;
vcpu               75 arch/arm64/kvm/fpsimd.c 		vcpu->arch.flags |= KVM_ARM64_HOST_SVE_ENABLED;
vcpu               84 arch/arm64/kvm/fpsimd.c void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
vcpu               88 arch/arm64/kvm/fpsimd.c 	if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
vcpu               89 arch/arm64/kvm/fpsimd.c 		fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.gp_regs.fp_regs,
vcpu               90 arch/arm64/kvm/fpsimd.c 					 vcpu->arch.sve_state,
vcpu               91 arch/arm64/kvm/fpsimd.c 					 vcpu->arch.sve_max_vl);
vcpu               94 arch/arm64/kvm/fpsimd.c 		update_thread_flag(TIF_SVE, vcpu_has_sve(vcpu));
vcpu              104 arch/arm64/kvm/fpsimd.c void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
vcpu              108 arch/arm64/kvm/fpsimd.c 	bool guest_has_sve = vcpu_has_sve(vcpu);
vcpu              112 arch/arm64/kvm/fpsimd.c 	if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
vcpu              113 arch/arm64/kvm/fpsimd.c 		u64 *guest_zcr = &vcpu->arch.ctxt.sys_regs[ZCR_EL1];
vcpu              127 arch/arm64/kvm/fpsimd.c 		if (vcpu->arch.flags & KVM_ARM64_HOST_SVE_ENABLED)
vcpu              134 arch/arm64/kvm/fpsimd.c 			   vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE);
vcpu               46 arch/arm64/kvm/guest.c int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu               62 arch/arm64/kvm/guest.c static int core_reg_size_from_offset(const struct kvm_vcpu *vcpu, u64 off)
vcpu              101 arch/arm64/kvm/guest.c 	if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off))
vcpu              107 arch/arm64/kvm/guest.c static int validate_core_offset(const struct kvm_vcpu *vcpu,
vcpu              111 arch/arm64/kvm/guest.c 	int size = core_reg_size_from_offset(vcpu, off);
vcpu              122 arch/arm64/kvm/guest.c static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
vcpu              131 arch/arm64/kvm/guest.c 	struct kvm_regs *regs = vcpu_gp_regs(vcpu);
vcpu              141 arch/arm64/kvm/guest.c 	if (validate_core_offset(vcpu, reg))
vcpu              150 arch/arm64/kvm/guest.c static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
vcpu              153 arch/arm64/kvm/guest.c 	struct kvm_regs *regs = vcpu_gp_regs(vcpu);
vcpu              166 arch/arm64/kvm/guest.c 	if (validate_core_offset(vcpu, reg))
vcpu              189 arch/arm64/kvm/guest.c 			if (!vcpu_el1_is_32bit(vcpu))
vcpu              195 arch/arm64/kvm/guest.c 			if (vcpu_el1_is_32bit(vcpu))
vcpu              206 arch/arm64/kvm/guest.c 	if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
vcpu              210 arch/arm64/kvm/guest.c 			*vcpu_reg32(vcpu, i) = (u32)*vcpu_reg32(vcpu, i);
vcpu              220 arch/arm64/kvm/guest.c static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
vcpu              225 arch/arm64/kvm/guest.c 	if (!vcpu_has_sve(vcpu))
vcpu              228 arch/arm64/kvm/guest.c 	if (WARN_ON(!sve_vl_valid(vcpu->arch.sve_max_vl)))
vcpu              233 arch/arm64/kvm/guest.c 	max_vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
vcpu              244 arch/arm64/kvm/guest.c static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
vcpu              249 arch/arm64/kvm/guest.c 	if (!vcpu_has_sve(vcpu))
vcpu              252 arch/arm64/kvm/guest.c 	if (kvm_arm_vcpu_sve_finalized(vcpu))
vcpu              255 arch/arm64/kvm/guest.c 	if (WARN_ON(vcpu->arch.sve_state))
vcpu              285 arch/arm64/kvm/guest.c 	vcpu->arch.sve_max_vl = sve_vl_from_vq(max_vq);
vcpu              312 arch/arm64/kvm/guest.c #define vcpu_sve_slices(vcpu) 1
vcpu              326 arch/arm64/kvm/guest.c 			     struct kvm_vcpu *vcpu,
vcpu              358 arch/arm64/kvm/guest.c 		if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
vcpu              361 arch/arm64/kvm/guest.c 		vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
vcpu              368 arch/arm64/kvm/guest.c 		if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
vcpu              371 arch/arm64/kvm/guest.c 		vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
vcpu              381 arch/arm64/kvm/guest.c 	sve_state_size = vcpu_sve_state_size(vcpu);
vcpu              392 arch/arm64/kvm/guest.c static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
vcpu              400 arch/arm64/kvm/guest.c 		return get_sve_vls(vcpu, reg);
vcpu              403 arch/arm64/kvm/guest.c 	ret = sve_reg_to_region(&region, vcpu, reg);
vcpu              407 arch/arm64/kvm/guest.c 	if (!kvm_arm_vcpu_sve_finalized(vcpu))
vcpu              410 arch/arm64/kvm/guest.c 	if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset,
vcpu              418 arch/arm64/kvm/guest.c static int set_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
vcpu              426 arch/arm64/kvm/guest.c 		return set_sve_vls(vcpu, reg);
vcpu              429 arch/arm64/kvm/guest.c 	ret = sve_reg_to_region(&region, vcpu, reg);
vcpu              433 arch/arm64/kvm/guest.c 	if (!kvm_arm_vcpu_sve_finalized(vcpu))
vcpu              436 arch/arm64/kvm/guest.c 	if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr,
vcpu              443 arch/arm64/kvm/guest.c int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu              448 arch/arm64/kvm/guest.c int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu              453 arch/arm64/kvm/guest.c static int copy_core_reg_indices(const struct kvm_vcpu *vcpu,
vcpu              461 arch/arm64/kvm/guest.c 		int size = core_reg_size_from_offset(vcpu, i);
vcpu              496 arch/arm64/kvm/guest.c static unsigned long num_core_regs(const struct kvm_vcpu *vcpu)
vcpu              498 arch/arm64/kvm/guest.c 	return copy_core_reg_indices(vcpu, NULL);
vcpu              518 arch/arm64/kvm/guest.c static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
vcpu              532 arch/arm64/kvm/guest.c static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
vcpu              542 arch/arm64/kvm/guest.c 	return kvm_arm_timer_set_reg(vcpu, reg->id, val);
vcpu              545 arch/arm64/kvm/guest.c static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
vcpu              550 arch/arm64/kvm/guest.c 	val = kvm_arm_timer_get_reg(vcpu, reg->id);
vcpu              554 arch/arm64/kvm/guest.c static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu)
vcpu              556 arch/arm64/kvm/guest.c 	const unsigned int slices = vcpu_sve_slices(vcpu);
vcpu              558 arch/arm64/kvm/guest.c 	if (!vcpu_has_sve(vcpu))
vcpu              562 arch/arm64/kvm/guest.c 	WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
vcpu              568 arch/arm64/kvm/guest.c static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu,
vcpu              571 arch/arm64/kvm/guest.c 	const unsigned int slices = vcpu_sve_slices(vcpu);
vcpu              576 arch/arm64/kvm/guest.c 	if (!vcpu_has_sve(vcpu))
vcpu              580 arch/arm64/kvm/guest.c 	WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
vcpu              620 arch/arm64/kvm/guest.c unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
vcpu              624 arch/arm64/kvm/guest.c 	res += num_core_regs(vcpu);
vcpu              625 arch/arm64/kvm/guest.c 	res += num_sve_regs(vcpu);
vcpu              626 arch/arm64/kvm/guest.c 	res += kvm_arm_num_sys_reg_descs(vcpu);
vcpu              627 arch/arm64/kvm/guest.c 	res += kvm_arm_get_fw_num_regs(vcpu);
vcpu              638 arch/arm64/kvm/guest.c int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
vcpu              642 arch/arm64/kvm/guest.c 	ret = copy_core_reg_indices(vcpu, uindices);
vcpu              647 arch/arm64/kvm/guest.c 	ret = copy_sve_reg_indices(vcpu, uindices);
vcpu              652 arch/arm64/kvm/guest.c 	ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
vcpu              655 arch/arm64/kvm/guest.c 	uindices += kvm_arm_get_fw_num_regs(vcpu);
vcpu              657 arch/arm64/kvm/guest.c 	ret = copy_timer_indices(vcpu, uindices);
vcpu              662 arch/arm64/kvm/guest.c 	return kvm_arm_copy_sys_reg_indices(vcpu, uindices);
vcpu              665 arch/arm64/kvm/guest.c int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
vcpu              672 arch/arm64/kvm/guest.c 	case KVM_REG_ARM_CORE:	return get_core_reg(vcpu, reg);
vcpu              673 arch/arm64/kvm/guest.c 	case KVM_REG_ARM_FW:	return kvm_arm_get_fw_reg(vcpu, reg);
vcpu              674 arch/arm64/kvm/guest.c 	case KVM_REG_ARM64_SVE:	return get_sve_reg(vcpu, reg);
vcpu              678 arch/arm64/kvm/guest.c 		return get_timer_reg(vcpu, reg);
vcpu              680 arch/arm64/kvm/guest.c 	return kvm_arm_sys_reg_get_reg(vcpu, reg);
vcpu              683 arch/arm64/kvm/guest.c int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
vcpu              690 arch/arm64/kvm/guest.c 	case KVM_REG_ARM_CORE:	return set_core_reg(vcpu, reg);
vcpu              691 arch/arm64/kvm/guest.c 	case KVM_REG_ARM_FW:	return kvm_arm_set_fw_reg(vcpu, reg);
vcpu              692 arch/arm64/kvm/guest.c 	case KVM_REG_ARM64_SVE:	return set_sve_reg(vcpu, reg);
vcpu              696 arch/arm64/kvm/guest.c 		return set_timer_reg(vcpu, reg);
vcpu              698 arch/arm64/kvm/guest.c 	return kvm_arm_sys_reg_set_reg(vcpu, reg);
vcpu              701 arch/arm64/kvm/guest.c int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
vcpu              707 arch/arm64/kvm/guest.c int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
vcpu              713 arch/arm64/kvm/guest.c int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
vcpu              716 arch/arm64/kvm/guest.c 	events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE);
vcpu              720 arch/arm64/kvm/guest.c 		events->exception.serror_esr = vcpu_get_vsesr(vcpu);
vcpu              725 arch/arm64/kvm/guest.c int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
vcpu              736 arch/arm64/kvm/guest.c 			kvm_set_sei_esr(vcpu, events->exception.serror_esr);
vcpu              740 arch/arm64/kvm/guest.c 		kvm_inject_vabt(vcpu);
vcpu              796 arch/arm64/kvm/guest.c int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
vcpu              801 arch/arm64/kvm/guest.c int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
vcpu              806 arch/arm64/kvm/guest.c int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
vcpu              827 arch/arm64/kvm/guest.c int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
vcpu              832 arch/arm64/kvm/guest.c 	trace_kvm_set_guest_debug(vcpu, dbg->control);
vcpu              840 arch/arm64/kvm/guest.c 		vcpu->guest_debug = dbg->control;
vcpu              843 arch/arm64/kvm/guest.c 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
vcpu              844 arch/arm64/kvm/guest.c 			vcpu->arch.external_debug_state = dbg->arch;
vcpu              849 arch/arm64/kvm/guest.c 		vcpu->guest_debug = 0;
vcpu              856 arch/arm64/kvm/guest.c int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
vcpu              863 arch/arm64/kvm/guest.c 		ret = kvm_arm_pmu_v3_set_attr(vcpu, attr);
vcpu              866 arch/arm64/kvm/guest.c 		ret = kvm_arm_timer_set_attr(vcpu, attr);
vcpu              876 arch/arm64/kvm/guest.c int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
vcpu              883 arch/arm64/kvm/guest.c 		ret = kvm_arm_pmu_v3_get_attr(vcpu, attr);
vcpu              886 arch/arm64/kvm/guest.c 		ret = kvm_arm_timer_get_attr(vcpu, attr);
vcpu              896 arch/arm64/kvm/guest.c int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
vcpu              903 arch/arm64/kvm/guest.c 		ret = kvm_arm_pmu_v3_has_attr(vcpu, attr);
vcpu              906 arch/arm64/kvm/guest.c 		ret = kvm_arm_timer_has_attr(vcpu, attr);
vcpu               30 arch/arm64/kvm/handle_exit.c static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u32 esr)
vcpu               33 arch/arm64/kvm/handle_exit.c 		kvm_inject_vabt(vcpu);
vcpu               36 arch/arm64/kvm/handle_exit.c static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu               40 arch/arm64/kvm/handle_exit.c 	trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0),
vcpu               41 arch/arm64/kvm/handle_exit.c 			    kvm_vcpu_hvc_get_imm(vcpu));
vcpu               42 arch/arm64/kvm/handle_exit.c 	vcpu->stat.hvc_exit_stat++;
vcpu               44 arch/arm64/kvm/handle_exit.c 	ret = kvm_hvc_call_handler(vcpu);
vcpu               46 arch/arm64/kvm/handle_exit.c 		vcpu_set_reg(vcpu, 0, ~0UL);
vcpu               53 arch/arm64/kvm/handle_exit.c static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu               63 arch/arm64/kvm/handle_exit.c 	vcpu_set_reg(vcpu, 0, ~0UL);
vcpu               64 arch/arm64/kvm/handle_exit.c 	kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
vcpu               72 arch/arm64/kvm/handle_exit.c static int handle_no_fpsimd(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu               74 arch/arm64/kvm/handle_exit.c 	kvm_inject_undefined(vcpu);
vcpu               90 arch/arm64/kvm/handle_exit.c static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu               92 arch/arm64/kvm/handle_exit.c 	if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
vcpu               93 arch/arm64/kvm/handle_exit.c 		trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
vcpu               94 arch/arm64/kvm/handle_exit.c 		vcpu->stat.wfe_exit_stat++;
vcpu               95 arch/arm64/kvm/handle_exit.c 		kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
vcpu               97 arch/arm64/kvm/handle_exit.c 		trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
vcpu               98 arch/arm64/kvm/handle_exit.c 		vcpu->stat.wfi_exit_stat++;
vcpu               99 arch/arm64/kvm/handle_exit.c 		kvm_vcpu_block(vcpu);
vcpu              100 arch/arm64/kvm/handle_exit.c 		kvm_clear_request(KVM_REQ_UNHALT, vcpu);
vcpu              103 arch/arm64/kvm/handle_exit.c 	kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
vcpu              120 arch/arm64/kvm/handle_exit.c static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu              122 arch/arm64/kvm/handle_exit.c 	u32 hsr = kvm_vcpu_get_hsr(vcpu);
vcpu              130 arch/arm64/kvm/handle_exit.c 		run->debug.arch.far = vcpu->arch.fault.far_el2;
vcpu              147 arch/arm64/kvm/handle_exit.c static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu              149 arch/arm64/kvm/handle_exit.c 	u32 hsr = kvm_vcpu_get_hsr(vcpu);
vcpu              154 arch/arm64/kvm/handle_exit.c 	kvm_inject_undefined(vcpu);
vcpu              158 arch/arm64/kvm/handle_exit.c static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu              161 arch/arm64/kvm/handle_exit.c 	kvm_inject_undefined(vcpu);
vcpu              169 arch/arm64/kvm/handle_exit.c void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu)
vcpu              171 arch/arm64/kvm/handle_exit.c 	if (vcpu_has_ptrauth(vcpu))
vcpu              172 arch/arm64/kvm/handle_exit.c 		vcpu_ptrauth_enable(vcpu);
vcpu              174 arch/arm64/kvm/handle_exit.c 		kvm_inject_undefined(vcpu);
vcpu              181 arch/arm64/kvm/handle_exit.c static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu              183 arch/arm64/kvm/handle_exit.c 	kvm_arm_vcpu_ptrauth_trap(vcpu);
vcpu              212 arch/arm64/kvm/handle_exit.c static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
vcpu              214 arch/arm64/kvm/handle_exit.c 	u32 hsr = kvm_vcpu_get_hsr(vcpu);
vcpu              226 arch/arm64/kvm/handle_exit.c static int handle_trap_exceptions(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu              234 arch/arm64/kvm/handle_exit.c 	if (!kvm_condition_valid(vcpu)) {
vcpu              235 arch/arm64/kvm/handle_exit.c 		kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
vcpu              240 arch/arm64/kvm/handle_exit.c 		exit_handler = kvm_get_exit_handler(vcpu);
vcpu              241 arch/arm64/kvm/handle_exit.c 		handled = exit_handler(vcpu, run);
vcpu              251 arch/arm64/kvm/handle_exit.c int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
vcpu              255 arch/arm64/kvm/handle_exit.c 		u8 hsr_ec = ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
vcpu              264 arch/arm64/kvm/handle_exit.c 			u32 adj =  kvm_vcpu_trap_il_is32bit(vcpu) ? 4 : 2;
vcpu              265 arch/arm64/kvm/handle_exit.c 			*vcpu_pc(vcpu) -= adj;
vcpu              279 arch/arm64/kvm/handle_exit.c 		return handle_trap_exceptions(vcpu, run);
vcpu              303 arch/arm64/kvm/handle_exit.c void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
vcpu              308 arch/arm64/kvm/handle_exit.c 			u64 disr = kvm_vcpu_get_disr(vcpu);
vcpu              310 arch/arm64/kvm/handle_exit.c 			kvm_handle_guest_serror(vcpu, disr_to_esr(disr));
vcpu              312 arch/arm64/kvm/handle_exit.c 			kvm_inject_vabt(vcpu);
vcpu              321 arch/arm64/kvm/handle_exit.c 		kvm_handle_guest_serror(vcpu, kvm_vcpu_get_hsr(vcpu));
vcpu              132 arch/arm64/kvm/hyp/debug-sr.c static void __hyp_text __debug_save_state(struct kvm_vcpu *vcpu,
vcpu              151 arch/arm64/kvm/hyp/debug-sr.c static void __hyp_text __debug_restore_state(struct kvm_vcpu *vcpu,
vcpu              171 arch/arm64/kvm/hyp/debug-sr.c void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu)
vcpu              183 arch/arm64/kvm/hyp/debug-sr.c 		__debug_save_spe_nvhe(&vcpu->arch.host_debug_state.pmscr_el1);
vcpu              185 arch/arm64/kvm/hyp/debug-sr.c 	if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
vcpu              188 arch/arm64/kvm/hyp/debug-sr.c 	host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
vcpu              189 arch/arm64/kvm/hyp/debug-sr.c 	guest_ctxt = &vcpu->arch.ctxt;
vcpu              190 arch/arm64/kvm/hyp/debug-sr.c 	host_dbg = &vcpu->arch.host_debug_state.regs;
vcpu              191 arch/arm64/kvm/hyp/debug-sr.c 	guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
vcpu              193 arch/arm64/kvm/hyp/debug-sr.c 	__debug_save_state(vcpu, host_dbg, host_ctxt);
vcpu              194 arch/arm64/kvm/hyp/debug-sr.c 	__debug_restore_state(vcpu, guest_dbg, guest_ctxt);
vcpu              197 arch/arm64/kvm/hyp/debug-sr.c void __hyp_text __debug_switch_to_host(struct kvm_vcpu *vcpu)
vcpu              205 arch/arm64/kvm/hyp/debug-sr.c 		__debug_restore_spe_nvhe(vcpu->arch.host_debug_state.pmscr_el1);
vcpu              207 arch/arm64/kvm/hyp/debug-sr.c 	if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
vcpu              210 arch/arm64/kvm/hyp/debug-sr.c 	host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
vcpu              211 arch/arm64/kvm/hyp/debug-sr.c 	guest_ctxt = &vcpu->arch.ctxt;
vcpu              212 arch/arm64/kvm/hyp/debug-sr.c 	host_dbg = &vcpu->arch.host_debug_state.regs;
vcpu              213 arch/arm64/kvm/hyp/debug-sr.c 	guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
vcpu              215 arch/arm64/kvm/hyp/debug-sr.c 	__debug_save_state(vcpu, guest_dbg, guest_ctxt);
vcpu              216 arch/arm64/kvm/hyp/debug-sr.c 	__debug_restore_state(vcpu, host_dbg, host_ctxt);
vcpu              218 arch/arm64/kvm/hyp/debug-sr.c 	vcpu->arch.flags &= ~KVM_ARM64_DEBUG_DIRTY;
vcpu               29 arch/arm64/kvm/hyp/switch.c static bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu)
vcpu               39 arch/arm64/kvm/hyp/switch.c 	    vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
vcpu               40 arch/arm64/kvm/hyp/switch.c 		vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
vcpu               43 arch/arm64/kvm/hyp/switch.c 	return !!(vcpu->arch.flags & KVM_ARM64_FP_ENABLED);
vcpu               47 arch/arm64/kvm/hyp/switch.c static void __hyp_text __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
vcpu               49 arch/arm64/kvm/hyp/switch.c 	if (!vcpu_el1_is_32bit(vcpu))
vcpu               52 arch/arm64/kvm/hyp/switch.c 	vcpu->arch.ctxt.sys_regs[FPEXC32_EL2] = read_sysreg(fpexc32_el2);
vcpu               55 arch/arm64/kvm/hyp/switch.c static void __hyp_text __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
vcpu               66 arch/arm64/kvm/hyp/switch.c 	if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd()) {
vcpu               72 arch/arm64/kvm/hyp/switch.c static void __hyp_text __activate_traps_common(struct kvm_vcpu *vcpu)
vcpu               85 arch/arm64/kvm/hyp/switch.c 	write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
vcpu               94 arch/arm64/kvm/hyp/switch.c static void activate_traps_vhe(struct kvm_vcpu *vcpu)
vcpu              101 arch/arm64/kvm/hyp/switch.c 	if (update_fp_enabled(vcpu)) {
vcpu              102 arch/arm64/kvm/hyp/switch.c 		if (vcpu_has_sve(vcpu))
vcpu              106 arch/arm64/kvm/hyp/switch.c 		__activate_traps_fpsimd32(vcpu);
vcpu              115 arch/arm64/kvm/hyp/switch.c static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
vcpu              119 arch/arm64/kvm/hyp/switch.c 	__activate_traps_common(vcpu);
vcpu              123 arch/arm64/kvm/hyp/switch.c 	if (!update_fp_enabled(vcpu)) {
vcpu              125 arch/arm64/kvm/hyp/switch.c 		__activate_traps_fpsimd32(vcpu);
vcpu              131 arch/arm64/kvm/hyp/switch.c static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
vcpu              133 arch/arm64/kvm/hyp/switch.c 	u64 hcr = vcpu->arch.hcr_el2;
vcpu              141 arch/arm64/kvm/hyp/switch.c 		write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
vcpu              144 arch/arm64/kvm/hyp/switch.c 		activate_traps_vhe(vcpu);
vcpu              146 arch/arm64/kvm/hyp/switch.c 		__activate_traps_nvhe(vcpu);
vcpu              180 arch/arm64/kvm/hyp/switch.c static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
vcpu              188 arch/arm64/kvm/hyp/switch.c 	if (vcpu->arch.hcr_el2 & HCR_VSE) {
vcpu              189 arch/arm64/kvm/hyp/switch.c 		vcpu->arch.hcr_el2 &= ~HCR_VSE;
vcpu              190 arch/arm64/kvm/hyp/switch.c 		vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE;
vcpu              199 arch/arm64/kvm/hyp/switch.c void activate_traps_vhe_load(struct kvm_vcpu *vcpu)
vcpu              201 arch/arm64/kvm/hyp/switch.c 	__activate_traps_common(vcpu);
vcpu              222 arch/arm64/kvm/hyp/switch.c static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
vcpu              228 arch/arm64/kvm/hyp/switch.c static void __hyp_text __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
vcpu              231 arch/arm64/kvm/hyp/switch.c 		__vgic_v3_save_state(vcpu);
vcpu              232 arch/arm64/kvm/hyp/switch.c 		__vgic_v3_deactivate_traps(vcpu);
vcpu              237 arch/arm64/kvm/hyp/switch.c static void __hyp_text __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
vcpu              240 arch/arm64/kvm/hyp/switch.c 		__vgic_v3_activate_traps(vcpu);
vcpu              241 arch/arm64/kvm/hyp/switch.c 		__vgic_v3_restore_state(vcpu);
vcpu              274 arch/arm64/kvm/hyp/switch.c static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
vcpu              280 arch/arm64/kvm/hyp/switch.c 	esr = vcpu->arch.fault.esr_el2;
vcpu              308 arch/arm64/kvm/hyp/switch.c 	vcpu->arch.fault.far_el2 = far;
vcpu              309 arch/arm64/kvm/hyp/switch.c 	vcpu->arch.fault.hpfar_el2 = hpfar;
vcpu              314 arch/arm64/kvm/hyp/switch.c static bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
vcpu              323 arch/arm64/kvm/hyp/switch.c 		sve_guest = vcpu_has_sve(vcpu);
vcpu              324 arch/arm64/kvm/hyp/switch.c 		sve_host = vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE;
vcpu              332 arch/arm64/kvm/hyp/switch.c 	hsr_ec = kvm_vcpu_trap_get_class(vcpu);
vcpu              358 arch/arm64/kvm/hyp/switch.c 	if (vcpu->arch.flags & KVM_ARM64_FP_HOST) {
vcpu              365 arch/arm64/kvm/hyp/switch.c 				vcpu->arch.host_fpsimd_state,
vcpu              369 arch/arm64/kvm/hyp/switch.c 				       &vcpu->arch.host_fpsimd_state->fpsr);
vcpu              371 arch/arm64/kvm/hyp/switch.c 			__fpsimd_save_state(vcpu->arch.host_fpsimd_state);
vcpu              374 arch/arm64/kvm/hyp/switch.c 		vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
vcpu              378 arch/arm64/kvm/hyp/switch.c 		sve_load_state(vcpu_sve_pffr(vcpu),
vcpu              379 arch/arm64/kvm/hyp/switch.c 			       &vcpu->arch.ctxt.gp_regs.fp_regs.fpsr,
vcpu              380 arch/arm64/kvm/hyp/switch.c 			       sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1);
vcpu              381 arch/arm64/kvm/hyp/switch.c 		write_sysreg_s(vcpu->arch.ctxt.sys_regs[ZCR_EL1], SYS_ZCR_EL12);
vcpu              383 arch/arm64/kvm/hyp/switch.c 		__fpsimd_restore_state(&vcpu->arch.ctxt.gp_regs.fp_regs);
vcpu              388 arch/arm64/kvm/hyp/switch.c 		write_sysreg(vcpu->arch.ctxt.sys_regs[FPEXC32_EL2],
vcpu              391 arch/arm64/kvm/hyp/switch.c 	vcpu->arch.flags |= KVM_ARM64_FP_ENABLED;
vcpu              396 arch/arm64/kvm/hyp/switch.c static bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu)
vcpu              398 arch/arm64/kvm/hyp/switch.c 	u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_hsr(vcpu));
vcpu              399 arch/arm64/kvm/hyp/switch.c 	int rt = kvm_vcpu_sys_get_rt(vcpu);
vcpu              400 arch/arm64/kvm/hyp/switch.c 	u64 val = vcpu_get_reg(vcpu, rt);
vcpu              406 arch/arm64/kvm/hyp/switch.c 	if (vcpu->arch.hcr_el2 & HCR_TVM)
vcpu              447 arch/arm64/kvm/hyp/switch.c 	__kvm_skip_instr(vcpu);
vcpu              456 arch/arm64/kvm/hyp/switch.c static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
vcpu              459 arch/arm64/kvm/hyp/switch.c 		vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
vcpu              471 arch/arm64/kvm/hyp/switch.c 	    kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 &&
vcpu              472 arch/arm64/kvm/hyp/switch.c 	    handle_tx2_tvm(vcpu))
vcpu              482 arch/arm64/kvm/hyp/switch.c 	if (__hyp_handle_fpsimd(vcpu))
vcpu              485 arch/arm64/kvm/hyp/switch.c 	if (!__populate_fault_info(vcpu))
vcpu              491 arch/arm64/kvm/hyp/switch.c 		valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW &&
vcpu              492 arch/arm64/kvm/hyp/switch.c 			kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
vcpu              493 arch/arm64/kvm/hyp/switch.c 			kvm_vcpu_dabt_isvalid(vcpu) &&
vcpu              494 arch/arm64/kvm/hyp/switch.c 			!kvm_vcpu_dabt_isextabt(vcpu) &&
vcpu              495 arch/arm64/kvm/hyp/switch.c 			!kvm_vcpu_dabt_iss1tw(vcpu);
vcpu              498 arch/arm64/kvm/hyp/switch.c 			int ret = __vgic_v2_perform_cpuif_access(vcpu);
vcpu              512 arch/arm64/kvm/hyp/switch.c 	    (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 ||
vcpu              513 arch/arm64/kvm/hyp/switch.c 	     kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_CP15_32)) {
vcpu              514 arch/arm64/kvm/hyp/switch.c 		int ret = __vgic_v3_perform_cpuif_access(vcpu);
vcpu              525 arch/arm64/kvm/hyp/switch.c static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu)
vcpu              530 arch/arm64/kvm/hyp/switch.c 	return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
vcpu              533 arch/arm64/kvm/hyp/switch.c static void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
vcpu              540 arch/arm64/kvm/hyp/switch.c 	if (__needs_ssbd_off(vcpu) &&
vcpu              546 arch/arm64/kvm/hyp/switch.c static void __hyp_text __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
vcpu              552 arch/arm64/kvm/hyp/switch.c 	if (__needs_ssbd_off(vcpu) &&
vcpu              597 arch/arm64/kvm/hyp/switch.c int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
vcpu              603 arch/arm64/kvm/hyp/switch.c 	host_ctxt = vcpu->arch.host_cpu_context;
vcpu              604 arch/arm64/kvm/hyp/switch.c 	host_ctxt->__hyp_running_vcpu = vcpu;
vcpu              605 arch/arm64/kvm/hyp/switch.c 	guest_ctxt = &vcpu->arch.ctxt;
vcpu              620 arch/arm64/kvm/hyp/switch.c 	__activate_vm(vcpu->kvm);
vcpu              621 arch/arm64/kvm/hyp/switch.c 	__activate_traps(vcpu);
vcpu              624 arch/arm64/kvm/hyp/switch.c 	__debug_switch_to_guest(vcpu);
vcpu              626 arch/arm64/kvm/hyp/switch.c 	__set_guest_arch_workaround_state(vcpu);
vcpu              630 arch/arm64/kvm/hyp/switch.c 		exit_code = __guest_enter(vcpu, host_ctxt);
vcpu              633 arch/arm64/kvm/hyp/switch.c 	} while (fixup_guest_exit(vcpu, &exit_code));
vcpu              635 arch/arm64/kvm/hyp/switch.c 	__set_host_arch_workaround_state(vcpu);
vcpu              639 arch/arm64/kvm/hyp/switch.c 	__deactivate_traps(vcpu);
vcpu              643 arch/arm64/kvm/hyp/switch.c 	if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
vcpu              644 arch/arm64/kvm/hyp/switch.c 		__fpsimd_save_fpexc32(vcpu);
vcpu              646 arch/arm64/kvm/hyp/switch.c 	__debug_switch_to_host(vcpu);
vcpu              653 arch/arm64/kvm/hyp/switch.c int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
vcpu              671 arch/arm64/kvm/hyp/switch.c 	vcpu = kern_hyp_va(vcpu);
vcpu              673 arch/arm64/kvm/hyp/switch.c 	host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
vcpu              674 arch/arm64/kvm/hyp/switch.c 	host_ctxt->__hyp_running_vcpu = vcpu;
vcpu              675 arch/arm64/kvm/hyp/switch.c 	guest_ctxt = &vcpu->arch.ctxt;
vcpu              681 arch/arm64/kvm/hyp/switch.c 	__activate_vm(kern_hyp_va(vcpu->kvm));
vcpu              682 arch/arm64/kvm/hyp/switch.c 	__activate_traps(vcpu);
vcpu              684 arch/arm64/kvm/hyp/switch.c 	__hyp_vgic_restore_state(vcpu);
vcpu              685 arch/arm64/kvm/hyp/switch.c 	__timer_enable_traps(vcpu);
vcpu              691 arch/arm64/kvm/hyp/switch.c 	__sysreg32_restore_state(vcpu);
vcpu              693 arch/arm64/kvm/hyp/switch.c 	__debug_switch_to_guest(vcpu);
vcpu              695 arch/arm64/kvm/hyp/switch.c 	__set_guest_arch_workaround_state(vcpu);
vcpu              699 arch/arm64/kvm/hyp/switch.c 		exit_code = __guest_enter(vcpu, host_ctxt);
vcpu              702 arch/arm64/kvm/hyp/switch.c 	} while (fixup_guest_exit(vcpu, &exit_code));
vcpu              704 arch/arm64/kvm/hyp/switch.c 	__set_host_arch_workaround_state(vcpu);
vcpu              707 arch/arm64/kvm/hyp/switch.c 	__sysreg32_save_state(vcpu);
vcpu              708 arch/arm64/kvm/hyp/switch.c 	__timer_disable_traps(vcpu);
vcpu              709 arch/arm64/kvm/hyp/switch.c 	__hyp_vgic_save_state(vcpu);
vcpu              711 arch/arm64/kvm/hyp/switch.c 	__deactivate_traps(vcpu);
vcpu              712 arch/arm64/kvm/hyp/switch.c 	__deactivate_vm(vcpu);
vcpu              716 arch/arm64/kvm/hyp/switch.c 	if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
vcpu              717 arch/arm64/kvm/hyp/switch.c 		__fpsimd_save_fpexc32(vcpu);
vcpu              723 arch/arm64/kvm/hyp/switch.c 	__debug_switch_to_host(vcpu);
vcpu              740 arch/arm64/kvm/hyp/switch.c 	struct kvm_vcpu *vcpu;
vcpu              743 arch/arm64/kvm/hyp/switch.c 	vcpu = __host_ctxt->__hyp_running_vcpu;
vcpu              746 arch/arm64/kvm/hyp/switch.c 		__timer_disable_traps(vcpu);
vcpu              747 arch/arm64/kvm/hyp/switch.c 		__deactivate_traps(vcpu);
vcpu              748 arch/arm64/kvm/hyp/switch.c 		__deactivate_vm(vcpu);
vcpu              762 arch/arm64/kvm/hyp/switch.c 		       read_sysreg(hpfar_el2), par, vcpu);
vcpu              768 arch/arm64/kvm/hyp/switch.c 	struct kvm_vcpu *vcpu;
vcpu              769 arch/arm64/kvm/hyp/switch.c 	vcpu = host_ctxt->__hyp_running_vcpu;
vcpu              771 arch/arm64/kvm/hyp/switch.c 	__deactivate_traps(vcpu);
vcpu              777 arch/arm64/kvm/hyp/switch.c 	      read_sysreg(hpfar_el2), par, vcpu);
vcpu              191 arch/arm64/kvm/hyp/sysreg-sr.c void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
vcpu              195 arch/arm64/kvm/hyp/sysreg-sr.c 	if (!vcpu_el1_is_32bit(vcpu))
vcpu              198 arch/arm64/kvm/hyp/sysreg-sr.c 	spsr = vcpu->arch.ctxt.gp_regs.spsr;
vcpu              199 arch/arm64/kvm/hyp/sysreg-sr.c 	sysreg = vcpu->arch.ctxt.sys_regs;
vcpu              209 arch/arm64/kvm/hyp/sysreg-sr.c 	if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)
vcpu              213 arch/arm64/kvm/hyp/sysreg-sr.c void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
vcpu              217 arch/arm64/kvm/hyp/sysreg-sr.c 	if (!vcpu_el1_is_32bit(vcpu))
vcpu              220 arch/arm64/kvm/hyp/sysreg-sr.c 	spsr = vcpu->arch.ctxt.gp_regs.spsr;
vcpu              221 arch/arm64/kvm/hyp/sysreg-sr.c 	sysreg = vcpu->arch.ctxt.sys_regs;
vcpu              231 arch/arm64/kvm/hyp/sysreg-sr.c 	if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)
vcpu              246 arch/arm64/kvm/hyp/sysreg-sr.c void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu)
vcpu              248 arch/arm64/kvm/hyp/sysreg-sr.c 	struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
vcpu              249 arch/arm64/kvm/hyp/sysreg-sr.c 	struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
vcpu              262 arch/arm64/kvm/hyp/sysreg-sr.c 	__sysreg32_restore_state(vcpu);
vcpu              266 arch/arm64/kvm/hyp/sysreg-sr.c 	vcpu->arch.sysregs_loaded_on_cpu = true;
vcpu              268 arch/arm64/kvm/hyp/sysreg-sr.c 	activate_traps_vhe_load(vcpu);
vcpu              282 arch/arm64/kvm/hyp/sysreg-sr.c void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu)
vcpu              284 arch/arm64/kvm/hyp/sysreg-sr.c 	struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
vcpu              285 arch/arm64/kvm/hyp/sysreg-sr.c 	struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
vcpu              294 arch/arm64/kvm/hyp/sysreg-sr.c 	__sysreg32_save_state(vcpu);
vcpu              299 arch/arm64/kvm/hyp/sysreg-sr.c 	vcpu->arch.sysregs_loaded_on_cpu = false;
vcpu              185 arch/arm64/kvm/hyp/tlb.c void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
vcpu              187 arch/arm64/kvm/hyp/tlb.c 	struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
vcpu               16 arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c static bool __hyp_text __is_be(struct kvm_vcpu *vcpu)
vcpu               18 arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c 	if (vcpu_mode_is_32bit(vcpu))
vcpu               35 arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
vcpu               37 arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c 	struct kvm *kvm = kern_hyp_va(vcpu->kvm);
vcpu               44 arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c 	fault_ipa  = kvm_vcpu_get_fault_ipa(vcpu);
vcpu               45 arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c 	fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
vcpu               53 arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c 	if (kvm_vcpu_dabt_get_as(vcpu) != sizeof(u32)) {
vcpu               54 arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c 		__kvm_skip_instr(vcpu);
vcpu               60 arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c 		__kvm_skip_instr(vcpu);
vcpu               64 arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c 	rd = kvm_vcpu_dabt_get_rd(vcpu);
vcpu               68 arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c 	if (kvm_vcpu_dabt_iswrite(vcpu)) {
vcpu               69 arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c 		u32 data = vcpu_get_reg(vcpu, rd);
vcpu               70 arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c 		if (__is_be(vcpu)) {
vcpu               77 arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c 		if (__is_be(vcpu)) {
vcpu               81 arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c 		vcpu_set_reg(vcpu, rd, data);
vcpu               84 arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c 	__kvm_skip_instr(vcpu);
vcpu               29 arch/arm64/kvm/inject_fault.c static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
vcpu               33 arch/arm64/kvm/inject_fault.c 	switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
vcpu               47 arch/arm64/kvm/inject_fault.c 	return vcpu_read_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
vcpu               62 arch/arm64/kvm/inject_fault.c static unsigned long get_except64_pstate(struct kvm_vcpu *vcpu)
vcpu               64 arch/arm64/kvm/inject_fault.c 	unsigned long sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
vcpu               67 arch/arm64/kvm/inject_fault.c 	old = *vcpu_cpsr(vcpu);
vcpu              113 arch/arm64/kvm/inject_fault.c static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
vcpu              115 arch/arm64/kvm/inject_fault.c 	unsigned long cpsr = *vcpu_cpsr(vcpu);
vcpu              116 arch/arm64/kvm/inject_fault.c 	bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
vcpu              119 arch/arm64/kvm/inject_fault.c 	vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
vcpu              120 arch/arm64/kvm/inject_fault.c 	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
vcpu              122 arch/arm64/kvm/inject_fault.c 	*vcpu_cpsr(vcpu) = get_except64_pstate(vcpu);
vcpu              123 arch/arm64/kvm/inject_fault.c 	vcpu_write_spsr(vcpu, cpsr);
vcpu              125 arch/arm64/kvm/inject_fault.c 	vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
vcpu              131 arch/arm64/kvm/inject_fault.c 	if (kvm_vcpu_trap_il_is32bit(vcpu))
vcpu              146 arch/arm64/kvm/inject_fault.c 	vcpu_write_sys_reg(vcpu, esr | ESR_ELx_FSC_EXTABT, ESR_EL1);
vcpu              149 arch/arm64/kvm/inject_fault.c static void inject_undef64(struct kvm_vcpu *vcpu)
vcpu              151 arch/arm64/kvm/inject_fault.c 	unsigned long cpsr = *vcpu_cpsr(vcpu);
vcpu              154 arch/arm64/kvm/inject_fault.c 	vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
vcpu              155 arch/arm64/kvm/inject_fault.c 	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
vcpu              157 arch/arm64/kvm/inject_fault.c 	*vcpu_cpsr(vcpu) = get_except64_pstate(vcpu);
vcpu              158 arch/arm64/kvm/inject_fault.c 	vcpu_write_spsr(vcpu, cpsr);
vcpu              164 arch/arm64/kvm/inject_fault.c 	if (kvm_vcpu_trap_il_is32bit(vcpu))
vcpu              167 arch/arm64/kvm/inject_fault.c 	vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
vcpu              178 arch/arm64/kvm/inject_fault.c void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
vcpu              180 arch/arm64/kvm/inject_fault.c 	if (vcpu_el1_is_32bit(vcpu))
vcpu              181 arch/arm64/kvm/inject_fault.c 		kvm_inject_dabt32(vcpu, addr);
vcpu              183 arch/arm64/kvm/inject_fault.c 		inject_abt64(vcpu, false, addr);
vcpu              194 arch/arm64/kvm/inject_fault.c void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
vcpu              196 arch/arm64/kvm/inject_fault.c 	if (vcpu_el1_is_32bit(vcpu))
vcpu              197 arch/arm64/kvm/inject_fault.c 		kvm_inject_pabt32(vcpu, addr);
vcpu              199 arch/arm64/kvm/inject_fault.c 		inject_abt64(vcpu, true, addr);
vcpu              208 arch/arm64/kvm/inject_fault.c void kvm_inject_undefined(struct kvm_vcpu *vcpu)
vcpu              210 arch/arm64/kvm/inject_fault.c 	if (vcpu_el1_is_32bit(vcpu))
vcpu              211 arch/arm64/kvm/inject_fault.c 		kvm_inject_undef32(vcpu);
vcpu              213 arch/arm64/kvm/inject_fault.c 		inject_undef64(vcpu);
vcpu              216 arch/arm64/kvm/inject_fault.c void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 esr)
vcpu              218 arch/arm64/kvm/inject_fault.c 	vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK);
vcpu              219 arch/arm64/kvm/inject_fault.c 	*vcpu_hcr(vcpu) |= HCR_VSE;
vcpu              234 arch/arm64/kvm/inject_fault.c void kvm_inject_vabt(struct kvm_vcpu *vcpu)
vcpu              236 arch/arm64/kvm/inject_fault.c 	kvm_set_sei_esr(vcpu, ESR_ELx_ISV);
vcpu              164 arch/arm64/kvm/pmu.c void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
vcpu              173 arch/arm64/kvm/pmu.c 	host_ctxt = vcpu->arch.host_cpu_context;
vcpu              185 arch/arm64/kvm/pmu.c void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
vcpu              194 arch/arm64/kvm/pmu.c 	host_ctxt = vcpu->arch.host_cpu_context;
vcpu              101 arch/arm64/kvm/regmap.c unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num)
vcpu              103 arch/arm64/kvm/regmap.c 	unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs.regs;
vcpu              104 arch/arm64/kvm/regmap.c 	unsigned long mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
vcpu              133 arch/arm64/kvm/regmap.c static int vcpu_spsr32_mode(const struct kvm_vcpu *vcpu)
vcpu              135 arch/arm64/kvm/regmap.c 	unsigned long mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
vcpu              146 arch/arm64/kvm/regmap.c unsigned long vcpu_read_spsr32(const struct kvm_vcpu *vcpu)
vcpu              148 arch/arm64/kvm/regmap.c 	int spsr_idx = vcpu_spsr32_mode(vcpu);
vcpu              150 arch/arm64/kvm/regmap.c 	if (!vcpu->arch.sysregs_loaded_on_cpu)
vcpu              151 arch/arm64/kvm/regmap.c 		return vcpu_gp_regs(vcpu)->spsr[spsr_idx];
vcpu              169 arch/arm64/kvm/regmap.c void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v)
vcpu              171 arch/arm64/kvm/regmap.c 	int spsr_idx = vcpu_spsr32_mode(vcpu);
vcpu              173 arch/arm64/kvm/regmap.c 	if (!vcpu->arch.sysregs_loaded_on_cpu) {
vcpu              174 arch/arm64/kvm/regmap.c 		vcpu_gp_regs(vcpu)->spsr[spsr_idx] = v;
vcpu              133 arch/arm64/kvm/reset.c static int kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu)
vcpu              142 arch/arm64/kvm/reset.c 	vcpu->arch.sve_max_vl = kvm_sve_max_vl;
vcpu              149 arch/arm64/kvm/reset.c 	vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_SVE;
vcpu              158 arch/arm64/kvm/reset.c static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
vcpu              163 arch/arm64/kvm/reset.c 	vl = vcpu->arch.sve_max_vl;
vcpu              178 arch/arm64/kvm/reset.c 	vcpu->arch.sve_state = buf;
vcpu              179 arch/arm64/kvm/reset.c 	vcpu->arch.flags |= KVM_ARM64_VCPU_SVE_FINALIZED;
vcpu              183 arch/arm64/kvm/reset.c int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature)
vcpu              187 arch/arm64/kvm/reset.c 		if (!vcpu_has_sve(vcpu))
vcpu              190 arch/arm64/kvm/reset.c 		if (kvm_arm_vcpu_sve_finalized(vcpu))
vcpu              193 arch/arm64/kvm/reset.c 		return kvm_vcpu_finalize_sve(vcpu);
vcpu              199 arch/arm64/kvm/reset.c bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
vcpu              201 arch/arm64/kvm/reset.c 	if (vcpu_has_sve(vcpu) && !kvm_arm_vcpu_sve_finalized(vcpu))
vcpu              207 arch/arm64/kvm/reset.c void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
vcpu              209 arch/arm64/kvm/reset.c 	kfree(vcpu->arch.sve_state);
vcpu              212 arch/arm64/kvm/reset.c static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)
vcpu              214 arch/arm64/kvm/reset.c 	if (vcpu_has_sve(vcpu))
vcpu              215 arch/arm64/kvm/reset.c 		memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu));
vcpu              218 arch/arm64/kvm/reset.c static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
vcpu              231 arch/arm64/kvm/reset.c 	if (!test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
vcpu              232 arch/arm64/kvm/reset.c 	    !test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features))
vcpu              235 arch/arm64/kvm/reset.c 	vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_PTRAUTH;
vcpu              258 arch/arm64/kvm/reset.c int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
vcpu              265 arch/arm64/kvm/reset.c 	kvm_pmu_vcpu_reset(vcpu);
vcpu              268 arch/arm64/kvm/reset.c 	loaded = (vcpu->cpu != -1);
vcpu              270 arch/arm64/kvm/reset.c 		kvm_arch_vcpu_put(vcpu);
vcpu              272 arch/arm64/kvm/reset.c 	if (!kvm_arm_vcpu_sve_finalized(vcpu)) {
vcpu              273 arch/arm64/kvm/reset.c 		if (test_bit(KVM_ARM_VCPU_SVE, vcpu->arch.features)) {
vcpu              274 arch/arm64/kvm/reset.c 			ret = kvm_vcpu_enable_sve(vcpu);
vcpu              279 arch/arm64/kvm/reset.c 		kvm_vcpu_reset_sve(vcpu);
vcpu              282 arch/arm64/kvm/reset.c 	if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
vcpu              283 arch/arm64/kvm/reset.c 	    test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) {
vcpu              284 arch/arm64/kvm/reset.c 		if (kvm_vcpu_enable_ptrauth(vcpu))
vcpu              288 arch/arm64/kvm/reset.c 	switch (vcpu->arch.target) {
vcpu              290 arch/arm64/kvm/reset.c 		if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
vcpu              302 arch/arm64/kvm/reset.c 	memcpy(vcpu_gp_regs(vcpu), cpu_reset, sizeof(*cpu_reset));
vcpu              305 arch/arm64/kvm/reset.c 	kvm_reset_sys_regs(vcpu);
vcpu              311 arch/arm64/kvm/reset.c 	if (vcpu->arch.reset_state.reset) {
vcpu              312 arch/arm64/kvm/reset.c 		unsigned long target_pc = vcpu->arch.reset_state.pc;
vcpu              315 arch/arm64/kvm/reset.c 		if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
vcpu              317 arch/arm64/kvm/reset.c 			vcpu_set_thumb(vcpu);
vcpu              321 arch/arm64/kvm/reset.c 		if (vcpu->arch.reset_state.be)
vcpu              322 arch/arm64/kvm/reset.c 			kvm_vcpu_set_be(vcpu);
vcpu              324 arch/arm64/kvm/reset.c 		*vcpu_pc(vcpu) = target_pc;
vcpu              325 arch/arm64/kvm/reset.c 		vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
vcpu              327 arch/arm64/kvm/reset.c 		vcpu->arch.reset_state.reset = false;
vcpu              332 arch/arm64/kvm/reset.c 		vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
vcpu              335 arch/arm64/kvm/reset.c 	ret = kvm_timer_vcpu_reset(vcpu);
vcpu              338 arch/arm64/kvm/reset.c 		kvm_arch_vcpu_load(vcpu, smp_processor_id());
vcpu               48 arch/arm64/kvm/sys_regs.c static bool read_from_write_only(struct kvm_vcpu *vcpu,
vcpu               54 arch/arm64/kvm/sys_regs.c 	kvm_inject_undefined(vcpu);
vcpu               58 arch/arm64/kvm/sys_regs.c static bool write_to_read_only(struct kvm_vcpu *vcpu,
vcpu               64 arch/arm64/kvm/sys_regs.c 	kvm_inject_undefined(vcpu);
vcpu               68 arch/arm64/kvm/sys_regs.c u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
vcpu               70 arch/arm64/kvm/sys_regs.c 	if (!vcpu->arch.sysregs_loaded_on_cpu)
vcpu              109 arch/arm64/kvm/sys_regs.c 	return __vcpu_sys_reg(vcpu, reg);
vcpu              112 arch/arm64/kvm/sys_regs.c void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
vcpu              114 arch/arm64/kvm/sys_regs.c 	if (!vcpu->arch.sysregs_loaded_on_cpu)
vcpu              152 arch/arm64/kvm/sys_regs.c 	 __vcpu_sys_reg(vcpu, reg) = val;
vcpu              179 arch/arm64/kvm/sys_regs.c static bool access_dcsw(struct kvm_vcpu *vcpu,
vcpu              184 arch/arm64/kvm/sys_regs.c 		return read_from_write_only(vcpu, p, r);
vcpu              194 arch/arm64/kvm/sys_regs.c 		kvm_set_way_flush(vcpu);
vcpu              204 arch/arm64/kvm/sys_regs.c static bool access_vm_reg(struct kvm_vcpu *vcpu,
vcpu              208 arch/arm64/kvm/sys_regs.c 	bool was_enabled = vcpu_has_cache_enabled(vcpu);
vcpu              221 arch/arm64/kvm/sys_regs.c 		val = vcpu_read_sys_reg(vcpu, reg);
vcpu              228 arch/arm64/kvm/sys_regs.c 	vcpu_write_sys_reg(vcpu, val, reg);
vcpu              230 arch/arm64/kvm/sys_regs.c 	kvm_toggle_cache(vcpu, was_enabled);
vcpu              240 arch/arm64/kvm/sys_regs.c static bool access_gic_sgi(struct kvm_vcpu *vcpu,
vcpu              247 arch/arm64/kvm/sys_regs.c 		return read_from_write_only(vcpu, p, r);
vcpu              280 arch/arm64/kvm/sys_regs.c 	vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
vcpu              285 arch/arm64/kvm/sys_regs.c static bool access_gic_sre(struct kvm_vcpu *vcpu,
vcpu              290 arch/arm64/kvm/sys_regs.c 		return ignore_write(vcpu, p);
vcpu              292 arch/arm64/kvm/sys_regs.c 	p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
vcpu              296 arch/arm64/kvm/sys_regs.c static bool trap_raz_wi(struct kvm_vcpu *vcpu,
vcpu              301 arch/arm64/kvm/sys_regs.c 		return ignore_write(vcpu, p);
vcpu              303 arch/arm64/kvm/sys_regs.c 		return read_zero(vcpu, p);
vcpu              312 arch/arm64/kvm/sys_regs.c static bool trap_loregion(struct kvm_vcpu *vcpu,
vcpu              321 arch/arm64/kvm/sys_regs.c 		kvm_inject_undefined(vcpu);
vcpu              326 arch/arm64/kvm/sys_regs.c 		return write_to_read_only(vcpu, p, r);
vcpu              328 arch/arm64/kvm/sys_regs.c 	return trap_raz_wi(vcpu, p, r);
vcpu              331 arch/arm64/kvm/sys_regs.c static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
vcpu              336 arch/arm64/kvm/sys_regs.c 		return ignore_write(vcpu, p);
vcpu              343 arch/arm64/kvm/sys_regs.c static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
vcpu              348 arch/arm64/kvm/sys_regs.c 		return ignore_write(vcpu, p);
vcpu              382 arch/arm64/kvm/sys_regs.c static bool trap_debug_regs(struct kvm_vcpu *vcpu,
vcpu              387 arch/arm64/kvm/sys_regs.c 		vcpu_write_sys_reg(vcpu, p->regval, r->reg);
vcpu              388 arch/arm64/kvm/sys_regs.c 		vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
vcpu              390 arch/arm64/kvm/sys_regs.c 		p->regval = vcpu_read_sys_reg(vcpu, r->reg);
vcpu              407 arch/arm64/kvm/sys_regs.c static void reg_to_dbg(struct kvm_vcpu *vcpu,
vcpu              419 arch/arm64/kvm/sys_regs.c 	vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
vcpu              422 arch/arm64/kvm/sys_regs.c static void dbg_to_reg(struct kvm_vcpu *vcpu,
vcpu              431 arch/arm64/kvm/sys_regs.c static bool trap_bvr(struct kvm_vcpu *vcpu,
vcpu              435 arch/arm64/kvm/sys_regs.c 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
vcpu              438 arch/arm64/kvm/sys_regs.c 		reg_to_dbg(vcpu, p, dbg_reg);
vcpu              440 arch/arm64/kvm/sys_regs.c 		dbg_to_reg(vcpu, p, dbg_reg);
vcpu              447 arch/arm64/kvm/sys_regs.c static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
vcpu              450 arch/arm64/kvm/sys_regs.c 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
vcpu              457 arch/arm64/kvm/sys_regs.c static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
vcpu              460 arch/arm64/kvm/sys_regs.c 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
vcpu              467 arch/arm64/kvm/sys_regs.c static void reset_bvr(struct kvm_vcpu *vcpu,
vcpu              470 arch/arm64/kvm/sys_regs.c 	vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
vcpu              473 arch/arm64/kvm/sys_regs.c static bool trap_bcr(struct kvm_vcpu *vcpu,
vcpu              477 arch/arm64/kvm/sys_regs.c 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
vcpu              480 arch/arm64/kvm/sys_regs.c 		reg_to_dbg(vcpu, p, dbg_reg);
vcpu              482 arch/arm64/kvm/sys_regs.c 		dbg_to_reg(vcpu, p, dbg_reg);
vcpu              489 arch/arm64/kvm/sys_regs.c static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
vcpu              492 arch/arm64/kvm/sys_regs.c 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
vcpu              500 arch/arm64/kvm/sys_regs.c static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
vcpu              503 arch/arm64/kvm/sys_regs.c 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
vcpu              510 arch/arm64/kvm/sys_regs.c static void reset_bcr(struct kvm_vcpu *vcpu,
vcpu              513 arch/arm64/kvm/sys_regs.c 	vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
vcpu              516 arch/arm64/kvm/sys_regs.c static bool trap_wvr(struct kvm_vcpu *vcpu,
vcpu              520 arch/arm64/kvm/sys_regs.c 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
vcpu              523 arch/arm64/kvm/sys_regs.c 		reg_to_dbg(vcpu, p, dbg_reg);
vcpu              525 arch/arm64/kvm/sys_regs.c 		dbg_to_reg(vcpu, p, dbg_reg);
vcpu              528 arch/arm64/kvm/sys_regs.c 		vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
vcpu              533 arch/arm64/kvm/sys_regs.c static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
vcpu              536 arch/arm64/kvm/sys_regs.c 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
vcpu              543 arch/arm64/kvm/sys_regs.c static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
vcpu              546 arch/arm64/kvm/sys_regs.c 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
vcpu              553 arch/arm64/kvm/sys_regs.c static void reset_wvr(struct kvm_vcpu *vcpu,
vcpu              556 arch/arm64/kvm/sys_regs.c 	vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
vcpu              559 arch/arm64/kvm/sys_regs.c static bool trap_wcr(struct kvm_vcpu *vcpu,
vcpu              563 arch/arm64/kvm/sys_regs.c 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
vcpu              566 arch/arm64/kvm/sys_regs.c 		reg_to_dbg(vcpu, p, dbg_reg);
vcpu              568 arch/arm64/kvm/sys_regs.c 		dbg_to_reg(vcpu, p, dbg_reg);
vcpu              575 arch/arm64/kvm/sys_regs.c static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
vcpu              578 arch/arm64/kvm/sys_regs.c 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
vcpu              585 arch/arm64/kvm/sys_regs.c static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
vcpu              588 arch/arm64/kvm/sys_regs.c 	__u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
vcpu              595 arch/arm64/kvm/sys_regs.c static void reset_wcr(struct kvm_vcpu *vcpu,
vcpu              598 arch/arm64/kvm/sys_regs.c 	vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
vcpu              601 arch/arm64/kvm/sys_regs.c static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
vcpu              604 arch/arm64/kvm/sys_regs.c 	vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
vcpu              607 arch/arm64/kvm/sys_regs.c static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
vcpu              618 arch/arm64/kvm/sys_regs.c 	mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
vcpu              619 arch/arm64/kvm/sys_regs.c 	mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
vcpu              620 arch/arm64/kvm/sys_regs.c 	mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
vcpu              621 arch/arm64/kvm/sys_regs.c 	vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
vcpu              624 arch/arm64/kvm/sys_regs.c static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
vcpu              637 arch/arm64/kvm/sys_regs.c 	__vcpu_sys_reg(vcpu, r->reg) = val;
vcpu              640 arch/arm64/kvm/sys_regs.c static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
vcpu              642 arch/arm64/kvm/sys_regs.c 	u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
vcpu              643 arch/arm64/kvm/sys_regs.c 	bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
vcpu              646 arch/arm64/kvm/sys_regs.c 		kvm_inject_undefined(vcpu);
vcpu              651 arch/arm64/kvm/sys_regs.c static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
vcpu              653 arch/arm64/kvm/sys_regs.c 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
vcpu              656 arch/arm64/kvm/sys_regs.c static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
vcpu              658 arch/arm64/kvm/sys_regs.c 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
vcpu              661 arch/arm64/kvm/sys_regs.c static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
vcpu              663 arch/arm64/kvm/sys_regs.c 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
vcpu              666 arch/arm64/kvm/sys_regs.c static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
vcpu              668 arch/arm64/kvm/sys_regs.c 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
vcpu              671 arch/arm64/kvm/sys_regs.c static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
vcpu              676 arch/arm64/kvm/sys_regs.c 	if (!kvm_arm_pmu_v3_ready(vcpu))
vcpu              677 arch/arm64/kvm/sys_regs.c 		return trap_raz_wi(vcpu, p, r);
vcpu              679 arch/arm64/kvm/sys_regs.c 	if (pmu_access_el0_disabled(vcpu))
vcpu              684 arch/arm64/kvm/sys_regs.c 		val = __vcpu_sys_reg(vcpu, PMCR_EL0);
vcpu              689 arch/arm64/kvm/sys_regs.c 		__vcpu_sys_reg(vcpu, PMCR_EL0) = val;
vcpu              690 arch/arm64/kvm/sys_regs.c 		kvm_pmu_handle_pmcr(vcpu, val);
vcpu              691 arch/arm64/kvm/sys_regs.c 		kvm_vcpu_pmu_restore_guest(vcpu);
vcpu              694 arch/arm64/kvm/sys_regs.c 		val = __vcpu_sys_reg(vcpu, PMCR_EL0)
vcpu              702 arch/arm64/kvm/sys_regs.c static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
vcpu              705 arch/arm64/kvm/sys_regs.c 	if (!kvm_arm_pmu_v3_ready(vcpu))
vcpu              706 arch/arm64/kvm/sys_regs.c 		return trap_raz_wi(vcpu, p, r);
vcpu              708 arch/arm64/kvm/sys_regs.c 	if (pmu_access_event_counter_el0_disabled(vcpu))
vcpu              712 arch/arm64/kvm/sys_regs.c 		__vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
vcpu              715 arch/arm64/kvm/sys_regs.c 		p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
vcpu              721 arch/arm64/kvm/sys_regs.c static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
vcpu              726 arch/arm64/kvm/sys_regs.c 	if (!kvm_arm_pmu_v3_ready(vcpu))
vcpu              727 arch/arm64/kvm/sys_regs.c 		return trap_raz_wi(vcpu, p, r);
vcpu              731 arch/arm64/kvm/sys_regs.c 	if (pmu_access_el0_disabled(vcpu))
vcpu              744 arch/arm64/kvm/sys_regs.c static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
vcpu              748 arch/arm64/kvm/sys_regs.c 	pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
vcpu              751 arch/arm64/kvm/sys_regs.c 		kvm_inject_undefined(vcpu);
vcpu              758 arch/arm64/kvm/sys_regs.c static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
vcpu              764 arch/arm64/kvm/sys_regs.c 	if (!kvm_arm_pmu_v3_ready(vcpu))
vcpu              765 arch/arm64/kvm/sys_regs.c 		return trap_raz_wi(vcpu, p, r);
vcpu              770 arch/arm64/kvm/sys_regs.c 			if (pmu_access_event_counter_el0_disabled(vcpu))
vcpu              773 arch/arm64/kvm/sys_regs.c 			idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
vcpu              777 arch/arm64/kvm/sys_regs.c 			if (pmu_access_cycle_counter_el0_disabled(vcpu))
vcpu              786 arch/arm64/kvm/sys_regs.c 		if (pmu_access_event_counter_el0_disabled(vcpu))
vcpu              792 arch/arm64/kvm/sys_regs.c 		if (pmu_access_event_counter_el0_disabled(vcpu))
vcpu              800 arch/arm64/kvm/sys_regs.c 	if (!pmu_counter_idx_valid(vcpu, idx))
vcpu              804 arch/arm64/kvm/sys_regs.c 		if (pmu_access_el0_disabled(vcpu))
vcpu              807 arch/arm64/kvm/sys_regs.c 		kvm_pmu_set_counter_value(vcpu, idx, p->regval);
vcpu              809 arch/arm64/kvm/sys_regs.c 		p->regval = kvm_pmu_get_counter_value(vcpu, idx);
vcpu              815 arch/arm64/kvm/sys_regs.c static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
vcpu              820 arch/arm64/kvm/sys_regs.c 	if (!kvm_arm_pmu_v3_ready(vcpu))
vcpu              821 arch/arm64/kvm/sys_regs.c 		return trap_raz_wi(vcpu, p, r);
vcpu              823 arch/arm64/kvm/sys_regs.c 	if (pmu_access_el0_disabled(vcpu))
vcpu              828 arch/arm64/kvm/sys_regs.c 		idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
vcpu              841 arch/arm64/kvm/sys_regs.c 	if (!pmu_counter_idx_valid(vcpu, idx))
vcpu              845 arch/arm64/kvm/sys_regs.c 		kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
vcpu              846 arch/arm64/kvm/sys_regs.c 		__vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
vcpu              847 arch/arm64/kvm/sys_regs.c 		kvm_vcpu_pmu_restore_guest(vcpu);
vcpu              849 arch/arm64/kvm/sys_regs.c 		p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
vcpu              855 arch/arm64/kvm/sys_regs.c static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
vcpu              860 arch/arm64/kvm/sys_regs.c 	if (!kvm_arm_pmu_v3_ready(vcpu))
vcpu              861 arch/arm64/kvm/sys_regs.c 		return trap_raz_wi(vcpu, p, r);
vcpu              863 arch/arm64/kvm/sys_regs.c 	if (pmu_access_el0_disabled(vcpu))
vcpu              866 arch/arm64/kvm/sys_regs.c 	mask = kvm_pmu_valid_counter_mask(vcpu);
vcpu              871 arch/arm64/kvm/sys_regs.c 			__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
vcpu              872 arch/arm64/kvm/sys_regs.c 			kvm_pmu_enable_counter_mask(vcpu, val);
vcpu              873 arch/arm64/kvm/sys_regs.c 			kvm_vcpu_pmu_restore_guest(vcpu);
vcpu              876 arch/arm64/kvm/sys_regs.c 			__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
vcpu              877 arch/arm64/kvm/sys_regs.c 			kvm_pmu_disable_counter_mask(vcpu, val);
vcpu              880 arch/arm64/kvm/sys_regs.c 		p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
vcpu              886 arch/arm64/kvm/sys_regs.c static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
vcpu              889 arch/arm64/kvm/sys_regs.c 	u64 mask = kvm_pmu_valid_counter_mask(vcpu);
vcpu              891 arch/arm64/kvm/sys_regs.c 	if (!kvm_arm_pmu_v3_ready(vcpu))
vcpu              892 arch/arm64/kvm/sys_regs.c 		return trap_raz_wi(vcpu, p, r);
vcpu              894 arch/arm64/kvm/sys_regs.c 	if (!vcpu_mode_priv(vcpu)) {
vcpu              895 arch/arm64/kvm/sys_regs.c 		kvm_inject_undefined(vcpu);
vcpu              904 arch/arm64/kvm/sys_regs.c 			__vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
vcpu              907 arch/arm64/kvm/sys_regs.c 			__vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
vcpu              909 arch/arm64/kvm/sys_regs.c 		p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
vcpu              915 arch/arm64/kvm/sys_regs.c static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
vcpu              918 arch/arm64/kvm/sys_regs.c 	u64 mask = kvm_pmu_valid_counter_mask(vcpu);
vcpu              920 arch/arm64/kvm/sys_regs.c 	if (!kvm_arm_pmu_v3_ready(vcpu))
vcpu              921 arch/arm64/kvm/sys_regs.c 		return trap_raz_wi(vcpu, p, r);
vcpu              923 arch/arm64/kvm/sys_regs.c 	if (pmu_access_el0_disabled(vcpu))
vcpu              929 arch/arm64/kvm/sys_regs.c 			__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
vcpu              932 arch/arm64/kvm/sys_regs.c 			__vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
vcpu              934 arch/arm64/kvm/sys_regs.c 		p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
vcpu              940 arch/arm64/kvm/sys_regs.c static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
vcpu              945 arch/arm64/kvm/sys_regs.c 	if (!kvm_arm_pmu_v3_ready(vcpu))
vcpu              946 arch/arm64/kvm/sys_regs.c 		return trap_raz_wi(vcpu, p, r);
vcpu              949 arch/arm64/kvm/sys_regs.c 		return read_from_write_only(vcpu, p, r);
vcpu              951 arch/arm64/kvm/sys_regs.c 	if (pmu_write_swinc_el0_disabled(vcpu))
vcpu              954 arch/arm64/kvm/sys_regs.c 	mask = kvm_pmu_valid_counter_mask(vcpu);
vcpu              955 arch/arm64/kvm/sys_regs.c 	kvm_pmu_software_increment(vcpu, p->regval & mask);
vcpu              959 arch/arm64/kvm/sys_regs.c static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
vcpu              962 arch/arm64/kvm/sys_regs.c 	if (!kvm_arm_pmu_v3_ready(vcpu))
vcpu              963 arch/arm64/kvm/sys_regs.c 		return trap_raz_wi(vcpu, p, r);
vcpu              966 arch/arm64/kvm/sys_regs.c 		if (!vcpu_mode_priv(vcpu)) {
vcpu              967 arch/arm64/kvm/sys_regs.c 			kvm_inject_undefined(vcpu);
vcpu              971 arch/arm64/kvm/sys_regs.c 		__vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
vcpu              974 arch/arm64/kvm/sys_regs.c 		p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
vcpu             1006 arch/arm64/kvm/sys_regs.c static bool trap_ptrauth(struct kvm_vcpu *vcpu,
vcpu             1010 arch/arm64/kvm/sys_regs.c 	kvm_arm_vcpu_ptrauth_trap(vcpu);
vcpu             1023 arch/arm64/kvm/sys_regs.c static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
vcpu             1026 arch/arm64/kvm/sys_regs.c 	return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN_USER | REG_HIDDEN_GUEST;
vcpu             1037 arch/arm64/kvm/sys_regs.c static bool access_arch_timer(struct kvm_vcpu *vcpu,
vcpu             1066 arch/arm64/kvm/sys_regs.c 		kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
vcpu             1068 arch/arm64/kvm/sys_regs.c 		p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
vcpu             1074 arch/arm64/kvm/sys_regs.c static u64 read_id_reg(const struct kvm_vcpu *vcpu,
vcpu             1081 arch/arm64/kvm/sys_regs.c 	if (id == SYS_ID_AA64PFR0_EL1 && !vcpu_has_sve(vcpu)) {
vcpu             1083 arch/arm64/kvm/sys_regs.c 	} else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) {
vcpu             1095 arch/arm64/kvm/sys_regs.c static bool __access_id_reg(struct kvm_vcpu *vcpu,
vcpu             1101 arch/arm64/kvm/sys_regs.c 		return write_to_read_only(vcpu, p, r);
vcpu             1103 arch/arm64/kvm/sys_regs.c 	p->regval = read_id_reg(vcpu, r, raz);
vcpu             1107 arch/arm64/kvm/sys_regs.c static bool access_id_reg(struct kvm_vcpu *vcpu,
vcpu             1111 arch/arm64/kvm/sys_regs.c 	return __access_id_reg(vcpu, p, r, false);
vcpu             1114 arch/arm64/kvm/sys_regs.c static bool access_raz_id_reg(struct kvm_vcpu *vcpu,
vcpu             1118 arch/arm64/kvm/sys_regs.c 	return __access_id_reg(vcpu, p, r, true);
vcpu             1126 arch/arm64/kvm/sys_regs.c static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
vcpu             1129 arch/arm64/kvm/sys_regs.c 	if (vcpu_has_sve(vcpu))
vcpu             1136 arch/arm64/kvm/sys_regs.c static unsigned int sve_id_visibility(const struct kvm_vcpu *vcpu,
vcpu             1139 arch/arm64/kvm/sys_regs.c 	if (vcpu_has_sve(vcpu))
vcpu             1146 arch/arm64/kvm/sys_regs.c static u64 guest_id_aa64zfr0_el1(const struct kvm_vcpu *vcpu)
vcpu             1148 arch/arm64/kvm/sys_regs.c 	if (!vcpu_has_sve(vcpu))
vcpu             1154 arch/arm64/kvm/sys_regs.c static bool access_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
vcpu             1159 arch/arm64/kvm/sys_regs.c 		return write_to_read_only(vcpu, p, rd);
vcpu             1161 arch/arm64/kvm/sys_regs.c 	p->regval = guest_id_aa64zfr0_el1(vcpu);
vcpu             1165 arch/arm64/kvm/sys_regs.c static int get_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
vcpu             1171 arch/arm64/kvm/sys_regs.c 	if (WARN_ON(!vcpu_has_sve(vcpu)))
vcpu             1174 arch/arm64/kvm/sys_regs.c 	val = guest_id_aa64zfr0_el1(vcpu);
vcpu             1178 arch/arm64/kvm/sys_regs.c static int set_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
vcpu             1186 arch/arm64/kvm/sys_regs.c 	if (WARN_ON(!vcpu_has_sve(vcpu)))
vcpu             1194 arch/arm64/kvm/sys_regs.c 	if (val != guest_id_aa64zfr0_el1(vcpu))
vcpu             1207 arch/arm64/kvm/sys_regs.c static int __get_id_reg(const struct kvm_vcpu *vcpu,
vcpu             1212 arch/arm64/kvm/sys_regs.c 	const u64 val = read_id_reg(vcpu, rd, raz);
vcpu             1217 arch/arm64/kvm/sys_regs.c static int __set_id_reg(const struct kvm_vcpu *vcpu,
vcpu             1230 arch/arm64/kvm/sys_regs.c 	if (val != read_id_reg(vcpu, rd, raz))
vcpu             1236 arch/arm64/kvm/sys_regs.c static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
vcpu             1239 arch/arm64/kvm/sys_regs.c 	return __get_id_reg(vcpu, rd, uaddr, false);
vcpu             1242 arch/arm64/kvm/sys_regs.c static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
vcpu             1245 arch/arm64/kvm/sys_regs.c 	return __set_id_reg(vcpu, rd, uaddr, false);
vcpu             1248 arch/arm64/kvm/sys_regs.c static int get_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
vcpu             1251 arch/arm64/kvm/sys_regs.c 	return __get_id_reg(vcpu, rd, uaddr, true);
vcpu             1254 arch/arm64/kvm/sys_regs.c static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
vcpu             1257 arch/arm64/kvm/sys_regs.c 	return __set_id_reg(vcpu, rd, uaddr, true);
vcpu             1260 arch/arm64/kvm/sys_regs.c static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
vcpu             1264 arch/arm64/kvm/sys_regs.c 		return write_to_read_only(vcpu, p, r);
vcpu             1270 arch/arm64/kvm/sys_regs.c static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
vcpu             1274 arch/arm64/kvm/sys_regs.c 		return write_to_read_only(vcpu, p, r);
vcpu             1280 arch/arm64/kvm/sys_regs.c static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
vcpu             1290 arch/arm64/kvm/sys_regs.c 		vcpu_write_sys_reg(vcpu, p->regval, reg);
vcpu             1292 arch/arm64/kvm/sys_regs.c 		p->regval = vcpu_read_sys_reg(vcpu, reg);
vcpu             1296 arch/arm64/kvm/sys_regs.c static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
vcpu             1302 arch/arm64/kvm/sys_regs.c 		return write_to_read_only(vcpu, p, r);
vcpu             1304 arch/arm64/kvm/sys_regs.c 	csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
vcpu             1653 arch/arm64/kvm/sys_regs.c static bool trap_dbgidr(struct kvm_vcpu *vcpu,
vcpu             1658 arch/arm64/kvm/sys_regs.c 		return ignore_write(vcpu, p);
vcpu             1672 arch/arm64/kvm/sys_regs.c static bool trap_debug32(struct kvm_vcpu *vcpu,
vcpu             1677 arch/arm64/kvm/sys_regs.c 		vcpu_cp14(vcpu, r->reg) = p->regval;
vcpu             1678 arch/arm64/kvm/sys_regs.c 		vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
vcpu             1680 arch/arm64/kvm/sys_regs.c 		p->regval = vcpu_cp14(vcpu, r->reg);
vcpu             1697 arch/arm64/kvm/sys_regs.c static bool trap_xvr(struct kvm_vcpu *vcpu,
vcpu             1701 arch/arm64/kvm/sys_regs.c 	u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
vcpu             1710 arch/arm64/kvm/sys_regs.c 		vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
vcpu             2027 arch/arm64/kvm/sys_regs.c int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu             2029 arch/arm64/kvm/sys_regs.c 	kvm_inject_undefined(vcpu);
vcpu             2033 arch/arm64/kvm/sys_regs.c static void perform_access(struct kvm_vcpu *vcpu,
vcpu             2037 arch/arm64/kvm/sys_regs.c 	trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
vcpu             2040 arch/arm64/kvm/sys_regs.c 	if (sysreg_hidden_from_guest(vcpu, r)) {
vcpu             2041 arch/arm64/kvm/sys_regs.c 		kvm_inject_undefined(vcpu);
vcpu             2053 arch/arm64/kvm/sys_regs.c 	if (likely(r->access(vcpu, params, r)))
vcpu             2054 arch/arm64/kvm/sys_regs.c 		kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
vcpu             2067 arch/arm64/kvm/sys_regs.c static int emulate_cp(struct kvm_vcpu *vcpu,
vcpu             2080 arch/arm64/kvm/sys_regs.c 		perform_access(vcpu, params, r);
vcpu             2088 arch/arm64/kvm/sys_regs.c static void unhandled_cp_access(struct kvm_vcpu *vcpu,
vcpu             2091 arch/arm64/kvm/sys_regs.c 	u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
vcpu             2108 arch/arm64/kvm/sys_regs.c 		cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
vcpu             2110 arch/arm64/kvm/sys_regs.c 	kvm_inject_undefined(vcpu);
vcpu             2118 arch/arm64/kvm/sys_regs.c static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
vcpu             2125 arch/arm64/kvm/sys_regs.c 	u32 hsr = kvm_vcpu_get_hsr(vcpu);
vcpu             2126 arch/arm64/kvm/sys_regs.c 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
vcpu             2144 arch/arm64/kvm/sys_regs.c 		params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
vcpu             2145 arch/arm64/kvm/sys_regs.c 		params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
vcpu             2155 arch/arm64/kvm/sys_regs.c 	if (!emulate_cp(vcpu, &params, target_specific, nr_specific) ||
vcpu             2156 arch/arm64/kvm/sys_regs.c 	    !emulate_cp(vcpu, &params, global, nr_global)) {
vcpu             2159 arch/arm64/kvm/sys_regs.c 			vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
vcpu             2160 arch/arm64/kvm/sys_regs.c 			vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
vcpu             2166 arch/arm64/kvm/sys_regs.c 	unhandled_cp_access(vcpu, &params);
vcpu             2175 arch/arm64/kvm/sys_regs.c static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
vcpu             2182 arch/arm64/kvm/sys_regs.c 	u32 hsr = kvm_vcpu_get_hsr(vcpu);
vcpu             2183 arch/arm64/kvm/sys_regs.c 	int Rt  = kvm_vcpu_sys_get_rt(vcpu);
vcpu             2188 arch/arm64/kvm/sys_regs.c 	params.regval = vcpu_get_reg(vcpu, Rt);
vcpu             2195 arch/arm64/kvm/sys_regs.c 	if (!emulate_cp(vcpu, &params, target_specific, nr_specific) ||
vcpu             2196 arch/arm64/kvm/sys_regs.c 	    !emulate_cp(vcpu, &params, global, nr_global)) {
vcpu             2198 arch/arm64/kvm/sys_regs.c 			vcpu_set_reg(vcpu, Rt, params.regval);
vcpu             2202 arch/arm64/kvm/sys_regs.c 	unhandled_cp_access(vcpu, &params);
vcpu             2206 arch/arm64/kvm/sys_regs.c int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu             2211 arch/arm64/kvm/sys_regs.c 	target_specific = get_target_table(vcpu->arch.target, false, &num);
vcpu             2212 arch/arm64/kvm/sys_regs.c 	return kvm_handle_cp_64(vcpu,
vcpu             2217 arch/arm64/kvm/sys_regs.c int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu             2222 arch/arm64/kvm/sys_regs.c 	target_specific = get_target_table(vcpu->arch.target, false, &num);
vcpu             2223 arch/arm64/kvm/sys_regs.c 	return kvm_handle_cp_32(vcpu,
vcpu             2228 arch/arm64/kvm/sys_regs.c int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu             2230 arch/arm64/kvm/sys_regs.c 	return kvm_handle_cp_64(vcpu,
vcpu             2235 arch/arm64/kvm/sys_regs.c int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu             2237 arch/arm64/kvm/sys_regs.c 	return kvm_handle_cp_32(vcpu,
vcpu             2242 arch/arm64/kvm/sys_regs.c static int emulate_sys_reg(struct kvm_vcpu *vcpu,
vcpu             2248 arch/arm64/kvm/sys_regs.c 	table = get_target_table(vcpu->arch.target, true, &num);
vcpu             2256 arch/arm64/kvm/sys_regs.c 		perform_access(vcpu, params, r);
vcpu             2259 arch/arm64/kvm/sys_regs.c 			*vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
vcpu             2261 arch/arm64/kvm/sys_regs.c 		kvm_inject_undefined(vcpu);
vcpu             2266 arch/arm64/kvm/sys_regs.c static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
vcpu             2276 arch/arm64/kvm/sys_regs.c 			table[i].reset(vcpu, &table[i]);
vcpu             2287 arch/arm64/kvm/sys_regs.c int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu             2290 arch/arm64/kvm/sys_regs.c 	unsigned long esr = kvm_vcpu_get_hsr(vcpu);
vcpu             2291 arch/arm64/kvm/sys_regs.c 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
vcpu             2303 arch/arm64/kvm/sys_regs.c 	params.regval = vcpu_get_reg(vcpu, Rt);
vcpu             2306 arch/arm64/kvm/sys_regs.c 	ret = emulate_sys_reg(vcpu, &params);
vcpu             2309 arch/arm64/kvm/sys_regs.c 		vcpu_set_reg(vcpu, Rt, params.regval);
vcpu             2358 arch/arm64/kvm/sys_regs.c static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
vcpu             2372 arch/arm64/kvm/sys_regs.c 	table = get_target_table(vcpu->arch.target, true, &num);
vcpu             2550 arch/arm64/kvm/sys_regs.c int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
vcpu             2561 arch/arm64/kvm/sys_regs.c 	r = index_to_sys_reg_desc(vcpu, reg->id);
vcpu             2566 arch/arm64/kvm/sys_regs.c 	if (sysreg_hidden_from_user(vcpu, r))
vcpu             2570 arch/arm64/kvm/sys_regs.c 		return (r->get_user)(vcpu, r, reg, uaddr);
vcpu             2572 arch/arm64/kvm/sys_regs.c 	return reg_to_user(uaddr, &__vcpu_sys_reg(vcpu, r->reg), reg->id);
vcpu             2575 arch/arm64/kvm/sys_regs.c int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
vcpu             2586 arch/arm64/kvm/sys_regs.c 	r = index_to_sys_reg_desc(vcpu, reg->id);
vcpu             2591 arch/arm64/kvm/sys_regs.c 	if (sysreg_hidden_from_user(vcpu, r))
vcpu             2595 arch/arm64/kvm/sys_regs.c 		return (r->set_user)(vcpu, r, reg, uaddr);
vcpu             2597 arch/arm64/kvm/sys_regs.c 	return reg_from_user(&__vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
vcpu             2650 arch/arm64/kvm/sys_regs.c static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
vcpu             2662 arch/arm64/kvm/sys_regs.c 	if (sysreg_hidden_from_user(vcpu, rd))
vcpu             2673 arch/arm64/kvm/sys_regs.c static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
vcpu             2681 arch/arm64/kvm/sys_regs.c 	i1 = get_target_table(vcpu->arch.target, true, &num);
vcpu             2693 arch/arm64/kvm/sys_regs.c 			err = walk_one_sys_reg(vcpu, i1, &uind, &total);
vcpu             2695 arch/arm64/kvm/sys_regs.c 			err = walk_one_sys_reg(vcpu, i2, &uind, &total);
vcpu             2708 arch/arm64/kvm/sys_regs.c unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
vcpu             2712 arch/arm64/kvm/sys_regs.c 		+ walk_sys_regs(vcpu, (u64 __user *)NULL);
vcpu             2715 arch/arm64/kvm/sys_regs.c int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
vcpu             2727 arch/arm64/kvm/sys_regs.c 	err = walk_sys_regs(vcpu, uindices);
vcpu             2792 arch/arm64/kvm/sys_regs.c void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
vcpu             2799 arch/arm64/kvm/sys_regs.c 	reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs), bmap);
vcpu             2801 arch/arm64/kvm/sys_regs.c 	table = get_target_table(vcpu->arch.target, true, &num);
vcpu             2802 arch/arm64/kvm/sys_regs.c 	reset_sys_reg_descs(vcpu, table, num, bmap);
vcpu               52 arch/arm64/kvm/sys_regs.h 	int (*get_user)(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
vcpu               54 arch/arm64/kvm/sys_regs.h 	int (*set_user)(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
vcpu               58 arch/arm64/kvm/sys_regs.h 	unsigned int (*visibility)(const struct kvm_vcpu *vcpu,
vcpu               72 arch/arm64/kvm/sys_regs.h static inline bool ignore_write(struct kvm_vcpu *vcpu,
vcpu               78 arch/arm64/kvm/sys_regs.h static inline bool read_zero(struct kvm_vcpu *vcpu,
vcpu               86 arch/arm64/kvm/sys_regs.h static inline void reset_unknown(struct kvm_vcpu *vcpu,
vcpu               91 arch/arm64/kvm/sys_regs.h 	__vcpu_sys_reg(vcpu, r->reg) = 0x1de7ec7edbadc0deULL;
vcpu               94 arch/arm64/kvm/sys_regs.h static inline void reset_val(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
vcpu               98 arch/arm64/kvm/sys_regs.h 	__vcpu_sys_reg(vcpu, r->reg) = r->val;
vcpu              101 arch/arm64/kvm/sys_regs.h static inline bool sysreg_hidden_from_guest(const struct kvm_vcpu *vcpu,
vcpu              107 arch/arm64/kvm/sys_regs.h 	return r->visibility(vcpu, r) & REG_HIDDEN_GUEST;
vcpu              110 arch/arm64/kvm/sys_regs.h static inline bool sysreg_hidden_from_user(const struct kvm_vcpu *vcpu,
vcpu              116 arch/arm64/kvm/sys_regs.h 	return r->visibility(vcpu, r) & REG_HIDDEN_USER;
vcpu               23 arch/arm64/kvm/sys_regs_generic_v8.c static bool access_actlr(struct kvm_vcpu *vcpu,
vcpu               28 arch/arm64/kvm/sys_regs_generic_v8.c 		return ignore_write(vcpu, p);
vcpu               30 arch/arm64/kvm/sys_regs_generic_v8.c 	p->regval = vcpu_read_sys_reg(vcpu, ACTLR_EL1);
vcpu               34 arch/arm64/kvm/sys_regs_generic_v8.c static void reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
vcpu               36 arch/arm64/kvm/sys_regs_generic_v8.c 	__vcpu_sys_reg(vcpu, ACTLR_EL1) = read_sysreg(actlr_el1);
vcpu               50 arch/arm64/kvm/trace.h 	TP_PROTO(struct kvm_vcpu *vcpu, __u32 guest_debug),
vcpu               51 arch/arm64/kvm/trace.h 	TP_ARGS(vcpu, guest_debug),
vcpu               54 arch/arm64/kvm/trace.h 		__field(struct kvm_vcpu *, vcpu)
vcpu               59 arch/arm64/kvm/trace.h 		__entry->vcpu = vcpu;
vcpu               63 arch/arm64/kvm/trace.h 	TP_printk("vcpu: %p, flags: 0x%08x", __entry->vcpu, __entry->guest_debug)
vcpu              191 arch/arm64/kvm/trace.h 	TP_PROTO(struct kvm_vcpu *vcpu, __u32 guest_debug),
vcpu              192 arch/arm64/kvm/trace.h 	TP_ARGS(vcpu, guest_debug),
vcpu              195 arch/arm64/kvm/trace.h 		__field(struct kvm_vcpu *, vcpu)
vcpu              200 arch/arm64/kvm/trace.h 		__entry->vcpu = vcpu;
vcpu              204 arch/arm64/kvm/trace.h 	TP_printk("vcpu: %p, flags: 0x%08x", __entry->vcpu, __entry->guest_debug)
vcpu               13 arch/arm64/kvm/vgic-sys-reg-v3.c static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
vcpu               17 arch/arm64/kvm/vgic-sys-reg-v3.c 	struct vgic_cpu *vgic_v3_cpu = &vcpu->arch.vgic_cpu;
vcpu               21 arch/arm64/kvm/vgic-sys-reg-v3.c 	vgic_get_vmcr(vcpu, &vmcr);
vcpu               62 arch/arm64/kvm/vgic-sys-reg-v3.c 		vgic_set_vmcr(vcpu, &vmcr);
vcpu               87 arch/arm64/kvm/vgic-sys-reg-v3.c static bool access_gic_pmr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
vcpu               92 arch/arm64/kvm/vgic-sys-reg-v3.c 	vgic_get_vmcr(vcpu, &vmcr);
vcpu               95 arch/arm64/kvm/vgic-sys-reg-v3.c 		vgic_set_vmcr(vcpu, &vmcr);
vcpu              103 arch/arm64/kvm/vgic-sys-reg-v3.c static bool access_gic_bpr0(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
vcpu              108 arch/arm64/kvm/vgic-sys-reg-v3.c 	vgic_get_vmcr(vcpu, &vmcr);
vcpu              112 arch/arm64/kvm/vgic-sys-reg-v3.c 		vgic_set_vmcr(vcpu, &vmcr);
vcpu              121 arch/arm64/kvm/vgic-sys-reg-v3.c static bool access_gic_bpr1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
vcpu              129 arch/arm64/kvm/vgic-sys-reg-v3.c 	vgic_get_vmcr(vcpu, &vmcr);
vcpu              134 arch/arm64/kvm/vgic-sys-reg-v3.c 			vgic_set_vmcr(vcpu, &vmcr);
vcpu              147 arch/arm64/kvm/vgic-sys-reg-v3.c static bool access_gic_grpen0(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
vcpu              152 arch/arm64/kvm/vgic-sys-reg-v3.c 	vgic_get_vmcr(vcpu, &vmcr);
vcpu              156 arch/arm64/kvm/vgic-sys-reg-v3.c 		vgic_set_vmcr(vcpu, &vmcr);
vcpu              165 arch/arm64/kvm/vgic-sys-reg-v3.c static bool access_gic_grpen1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
vcpu              170 arch/arm64/kvm/vgic-sys-reg-v3.c 	vgic_get_vmcr(vcpu, &vmcr);
vcpu              174 arch/arm64/kvm/vgic-sys-reg-v3.c 		vgic_set_vmcr(vcpu, &vmcr);
vcpu              183 arch/arm64/kvm/vgic-sys-reg-v3.c static void vgic_v3_access_apr_reg(struct kvm_vcpu *vcpu,
vcpu              186 arch/arm64/kvm/vgic-sys-reg-v3.c 	struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
vcpu              200 arch/arm64/kvm/vgic-sys-reg-v3.c static bool access_gic_aprn(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
vcpu              205 arch/arm64/kvm/vgic-sys-reg-v3.c 	if (idx > vgic_v3_max_apr_idx(vcpu))
vcpu              208 arch/arm64/kvm/vgic-sys-reg-v3.c 	vgic_v3_access_apr_reg(vcpu, p, apr, idx);
vcpu              217 arch/arm64/kvm/vgic-sys-reg-v3.c static bool access_gic_ap0r(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
vcpu              221 arch/arm64/kvm/vgic-sys-reg-v3.c 	return access_gic_aprn(vcpu, p, r, 0);
vcpu              224 arch/arm64/kvm/vgic-sys-reg-v3.c static bool access_gic_ap1r(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
vcpu              227 arch/arm64/kvm/vgic-sys-reg-v3.c 	return access_gic_aprn(vcpu, p, r, 1);
vcpu              230 arch/arm64/kvm/vgic-sys-reg-v3.c static bool access_gic_sre(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
vcpu              233 arch/arm64/kvm/vgic-sys-reg-v3.c 	struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
vcpu              263 arch/arm64/kvm/vgic-sys-reg-v3.c int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, bool is_write, u64 id,
vcpu              281 arch/arm64/kvm/vgic-sys-reg-v3.c int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu, bool is_write, u64 id,
vcpu              299 arch/arm64/kvm/vgic-sys-reg-v3.c 	if (!r->access(vcpu, &params, r))
vcpu              104 arch/mips/include/asm/kvm_host.h #define KVM_GUEST_KERNEL_MODE(vcpu)	((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \
vcpu              105 arch/mips/include/asm/kvm_host.h 					((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0))
vcpu              317 arch/mips/include/asm/kvm_host.h 	int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
vcpu              747 arch/mips/include/asm/kvm_host.h static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu)
vcpu              750 arch/mips/include/asm/kvm_host.h 		vcpu->fpu_enabled;
vcpu              753 arch/mips/include/asm/kvm_host.h static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu)
vcpu              755 arch/mips/include/asm/kvm_host.h 	return kvm_mips_guest_can_have_fpu(vcpu) &&
vcpu              756 arch/mips/include/asm/kvm_host.h 		kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP;
vcpu              759 arch/mips/include/asm/kvm_host.h static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu)
vcpu              762 arch/mips/include/asm/kvm_host.h 		vcpu->msa_enabled;
vcpu              765 arch/mips/include/asm/kvm_host.h static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu)
vcpu              767 arch/mips/include/asm/kvm_host.h 	return kvm_mips_guest_can_have_msa(vcpu) &&
vcpu              768 arch/mips/include/asm/kvm_host.h 		kvm_read_c0_guest_config3(vcpu->cop0) & MIPS_CONF3_MSA;
vcpu              772 arch/mips/include/asm/kvm_host.h 	int (*handle_cop_unusable)(struct kvm_vcpu *vcpu);
vcpu              773 arch/mips/include/asm/kvm_host.h 	int (*handle_tlb_mod)(struct kvm_vcpu *vcpu);
vcpu              774 arch/mips/include/asm/kvm_host.h 	int (*handle_tlb_ld_miss)(struct kvm_vcpu *vcpu);
vcpu              775 arch/mips/include/asm/kvm_host.h 	int (*handle_tlb_st_miss)(struct kvm_vcpu *vcpu);
vcpu              776 arch/mips/include/asm/kvm_host.h 	int (*handle_addr_err_st)(struct kvm_vcpu *vcpu);
vcpu              777 arch/mips/include/asm/kvm_host.h 	int (*handle_addr_err_ld)(struct kvm_vcpu *vcpu);
vcpu              778 arch/mips/include/asm/kvm_host.h 	int (*handle_syscall)(struct kvm_vcpu *vcpu);
vcpu              779 arch/mips/include/asm/kvm_host.h 	int (*handle_res_inst)(struct kvm_vcpu *vcpu);
vcpu              780 arch/mips/include/asm/kvm_host.h 	int (*handle_break)(struct kvm_vcpu *vcpu);
vcpu              781 arch/mips/include/asm/kvm_host.h 	int (*handle_trap)(struct kvm_vcpu *vcpu);
vcpu              782 arch/mips/include/asm/kvm_host.h 	int (*handle_msa_fpe)(struct kvm_vcpu *vcpu);
vcpu              783 arch/mips/include/asm/kvm_host.h 	int (*handle_fpe)(struct kvm_vcpu *vcpu);
vcpu              784 arch/mips/include/asm/kvm_host.h 	int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
vcpu              785 arch/mips/include/asm/kvm_host.h 	int (*handle_guest_exit)(struct kvm_vcpu *vcpu);
vcpu              789 arch/mips/include/asm/kvm_host.h 	int (*vcpu_init)(struct kvm_vcpu *vcpu);
vcpu              790 arch/mips/include/asm/kvm_host.h 	void (*vcpu_uninit)(struct kvm_vcpu *vcpu);
vcpu              791 arch/mips/include/asm/kvm_host.h 	int (*vcpu_setup)(struct kvm_vcpu *vcpu);
vcpu              801 arch/mips/include/asm/kvm_host.h 	void (*queue_timer_int)(struct kvm_vcpu *vcpu);
vcpu              802 arch/mips/include/asm/kvm_host.h 	void (*dequeue_timer_int)(struct kvm_vcpu *vcpu);
vcpu              803 arch/mips/include/asm/kvm_host.h 	void (*queue_io_int)(struct kvm_vcpu *vcpu,
vcpu              805 arch/mips/include/asm/kvm_host.h 	void (*dequeue_io_int)(struct kvm_vcpu *vcpu,
vcpu              807 arch/mips/include/asm/kvm_host.h 	int (*irq_deliver)(struct kvm_vcpu *vcpu, unsigned int priority,
vcpu              809 arch/mips/include/asm/kvm_host.h 	int (*irq_clear)(struct kvm_vcpu *vcpu, unsigned int priority,
vcpu              811 arch/mips/include/asm/kvm_host.h 	unsigned long (*num_regs)(struct kvm_vcpu *vcpu);
vcpu              812 arch/mips/include/asm/kvm_host.h 	int (*copy_reg_indices)(struct kvm_vcpu *vcpu, u64 __user *indices);
vcpu              813 arch/mips/include/asm/kvm_host.h 	int (*get_one_reg)(struct kvm_vcpu *vcpu,
vcpu              815 arch/mips/include/asm/kvm_host.h 	int (*set_one_reg)(struct kvm_vcpu *vcpu,
vcpu              817 arch/mips/include/asm/kvm_host.h 	int (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
vcpu              818 arch/mips/include/asm/kvm_host.h 	int (*vcpu_put)(struct kvm_vcpu *vcpu, int cpu);
vcpu              819 arch/mips/include/asm/kvm_host.h 	int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
vcpu              820 arch/mips/include/asm/kvm_host.h 	void (*vcpu_reenter)(struct kvm_run *run, struct kvm_vcpu *vcpu);
vcpu              826 arch/mips/include/asm/kvm_host.h int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
vcpu              828 arch/mips/include/asm/kvm_host.h extern int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu);
vcpu              838 arch/mips/include/asm/kvm_host.h void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu);
vcpu              839 arch/mips/include/asm/kvm_host.h void __kvm_restore_fpu(struct kvm_vcpu_arch *vcpu);
vcpu              840 arch/mips/include/asm/kvm_host.h void __kvm_restore_fcsr(struct kvm_vcpu_arch *vcpu);
vcpu              841 arch/mips/include/asm/kvm_host.h void __kvm_save_msa(struct kvm_vcpu_arch *vcpu);
vcpu              842 arch/mips/include/asm/kvm_host.h void __kvm_restore_msa(struct kvm_vcpu_arch *vcpu);
vcpu              843 arch/mips/include/asm/kvm_host.h void __kvm_restore_msa_upper(struct kvm_vcpu_arch *vcpu);
vcpu              844 arch/mips/include/asm/kvm_host.h void __kvm_restore_msacsr(struct kvm_vcpu_arch *vcpu);
vcpu              845 arch/mips/include/asm/kvm_host.h void kvm_own_fpu(struct kvm_vcpu *vcpu);
vcpu              846 arch/mips/include/asm/kvm_host.h void kvm_own_msa(struct kvm_vcpu *vcpu);
vcpu              847 arch/mips/include/asm/kvm_host.h void kvm_drop_fpu(struct kvm_vcpu *vcpu);
vcpu              848 arch/mips/include/asm/kvm_host.h void kvm_lose_fpu(struct kvm_vcpu *vcpu);
vcpu              851 arch/mips/include/asm/kvm_host.h u32 kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
vcpu              853 arch/mips/include/asm/kvm_host.h u32 kvm_get_user_asid(struct kvm_vcpu *vcpu);
vcpu              855 arch/mips/include/asm/kvm_host.h u32 kvm_get_commpage_asid (struct kvm_vcpu *vcpu);
vcpu              859 arch/mips/include/asm/kvm_host.h 				      struct kvm_vcpu *vcpu, bool write_fault);
vcpu              862 arch/mips/include/asm/kvm_host.h 					   struct kvm_vcpu *vcpu,
vcpu              866 arch/mips/include/asm/kvm_host.h 					      struct kvm_vcpu *vcpu);
vcpu              868 arch/mips/include/asm/kvm_host.h extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
vcpu              876 arch/mips/include/asm/kvm_host.h 						     struct kvm_vcpu *vcpu,
vcpu              880 arch/mips/include/asm/kvm_host.h extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu);
vcpu              881 arch/mips/include/asm/kvm_host.h extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi,
vcpu              884 arch/mips/include/asm/kvm_host.h extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu,
vcpu              888 arch/mips/include/asm/kvm_host.h int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
vcpu              889 arch/mips/include/asm/kvm_host.h int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva,
vcpu              922 arch/mips/include/asm/kvm_host.h void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
vcpu              923 arch/mips/include/asm/kvm_host.h void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
vcpu              925 arch/mips/include/asm/kvm_host.h void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu);
vcpu              926 arch/mips/include/asm/kvm_host.h void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu);
vcpu              936 arch/mips/include/asm/kvm_host.h enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
vcpu              948 arch/mips/include/asm/kvm_host.h int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
vcpu              949 arch/mips/include/asm/kvm_host.h enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause);
vcpu              950 arch/mips/include/asm/kvm_host.h int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
vcpu              951 arch/mips/include/asm/kvm_host.h int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
vcpu              960 arch/mips/include/asm/kvm_host.h static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *vcpu)
vcpu              962 arch/mips/include/asm/kvm_host.h 	unsigned long badvaddr = vcpu->host_cp0_badvaddr;
vcpu              963 arch/mips/include/asm/kvm_host.h 	unsigned long epc = msk_isa16_mode(vcpu->pc);
vcpu              964 arch/mips/include/asm/kvm_host.h 	u32 cause = vcpu->host_cp0_cause;
vcpu              983 arch/mips/include/asm/kvm_host.h 						   struct kvm_vcpu *vcpu);
vcpu              985 arch/mips/include/asm/kvm_host.h long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu);
vcpu              990 arch/mips/include/asm/kvm_host.h 						      struct kvm_vcpu *vcpu);
vcpu              995 arch/mips/include/asm/kvm_host.h 							 struct kvm_vcpu *vcpu);
vcpu             1000 arch/mips/include/asm/kvm_host.h 							struct kvm_vcpu *vcpu);
vcpu             1005 arch/mips/include/asm/kvm_host.h 							 struct kvm_vcpu *vcpu);
vcpu             1010 arch/mips/include/asm/kvm_host.h 							struct kvm_vcpu *vcpu);
vcpu             1015 arch/mips/include/asm/kvm_host.h 						     struct kvm_vcpu *vcpu);
vcpu             1020 arch/mips/include/asm/kvm_host.h 						      struct kvm_vcpu *vcpu);
vcpu             1025 arch/mips/include/asm/kvm_host.h 						struct kvm_vcpu *vcpu);
vcpu             1030 arch/mips/include/asm/kvm_host.h 						     struct kvm_vcpu *vcpu);
vcpu             1035 arch/mips/include/asm/kvm_host.h 						     struct kvm_vcpu *vcpu);
vcpu             1040 arch/mips/include/asm/kvm_host.h 						       struct kvm_vcpu *vcpu);
vcpu             1045 arch/mips/include/asm/kvm_host.h 							 struct kvm_vcpu *vcpu);
vcpu             1050 arch/mips/include/asm/kvm_host.h 						      struct kvm_vcpu *vcpu);
vcpu             1055 arch/mips/include/asm/kvm_host.h 							 struct kvm_vcpu *vcpu);
vcpu             1057 arch/mips/include/asm/kvm_host.h extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
vcpu             1060 arch/mips/include/asm/kvm_host.h u32 kvm_mips_read_count(struct kvm_vcpu *vcpu);
vcpu             1061 arch/mips/include/asm/kvm_host.h void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count);
vcpu             1062 arch/mips/include/asm/kvm_host.h void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack);
vcpu             1063 arch/mips/include/asm/kvm_host.h void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz);
vcpu             1064 arch/mips/include/asm/kvm_host.h int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
vcpu             1065 arch/mips/include/asm/kvm_host.h int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
vcpu             1066 arch/mips/include/asm/kvm_host.h int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz);
vcpu             1067 arch/mips/include/asm/kvm_host.h void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu);
vcpu             1068 arch/mips/include/asm/kvm_host.h void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu);
vcpu             1069 arch/mips/include/asm/kvm_host.h enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu);
vcpu             1072 arch/mips/include/asm/kvm_host.h int kvm_mips_count_disabled(struct kvm_vcpu *vcpu);
vcpu             1073 arch/mips/include/asm/kvm_host.h ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count);
vcpu             1074 arch/mips/include/asm/kvm_host.h int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before,
vcpu             1078 arch/mips/include/asm/kvm_host.h void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu);
vcpu             1079 arch/mips/include/asm/kvm_host.h void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu);
vcpu             1081 arch/mips/include/asm/kvm_host.h static inline void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu) {}
vcpu             1082 arch/mips/include/asm/kvm_host.h static inline void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu) {}
vcpu             1088 arch/mips/include/asm/kvm_host.h 					       struct kvm_vcpu *vcpu);
vcpu             1094 arch/mips/include/asm/kvm_host.h 					     struct kvm_vcpu *vcpu);
vcpu             1099 arch/mips/include/asm/kvm_host.h 					   struct kvm_vcpu *vcpu);
vcpu             1103 arch/mips/include/asm/kvm_host.h 					     struct kvm_vcpu *vcpu);
vcpu             1107 arch/mips/include/asm/kvm_host.h 					    struct kvm_vcpu *vcpu);
vcpu             1110 arch/mips/include/asm/kvm_host.h enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu);
vcpu             1112 arch/mips/include/asm/kvm_host.h unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu);
vcpu             1113 arch/mips/include/asm/kvm_host.h unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu);
vcpu             1114 arch/mips/include/asm/kvm_host.h unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu);
vcpu             1115 arch/mips/include/asm/kvm_host.h unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu);
vcpu             1119 arch/mips/include/asm/kvm_host.h enum emulation_result kvm_mips_emul_hypcall(struct kvm_vcpu *vcpu,
vcpu             1121 arch/mips/include/asm/kvm_host.h int kvm_mips_handle_hypcall(struct kvm_vcpu *vcpu);
vcpu             1125 arch/mips/include/asm/kvm_host.h 				      u32 *opc, struct kvm_vcpu *vcpu);
vcpu             1127 arch/mips/include/asm/kvm_host.h 				   struct kvm_vcpu *vcpu);
vcpu             1129 arch/mips/include/asm/kvm_host.h 			       struct kvm_vcpu *vcpu);
vcpu             1131 arch/mips/include/asm/kvm_host.h 			       struct kvm_vcpu *vcpu);
vcpu             1134 arch/mips/include/asm/kvm_host.h extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
vcpu             1142 arch/mips/include/asm/kvm_host.h static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
vcpu             1143 arch/mips/include/asm/kvm_host.h static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
vcpu             1144 arch/mips/include/asm/kvm_host.h static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
vcpu             1145 arch/mips/include/asm/kvm_host.h static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
vcpu               26 arch/mips/kvm/commpage.c void kvm_mips_commpage_init(struct kvm_vcpu *vcpu)
vcpu               28 arch/mips/kvm/commpage.c 	struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage;
vcpu               31 arch/mips/kvm/commpage.c 	vcpu->arch.cop0 = &page->cop0;
vcpu               22 arch/mips/kvm/commpage.h extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu);
vcpu               30 arch/mips/kvm/dyntrans.c static int kvm_mips_trans_replace(struct kvm_vcpu *vcpu, u32 *opc,
vcpu               38 arch/mips/kvm/dyntrans.c 	kvm_trap_emul_gva_lockless_begin(vcpu);
vcpu               40 arch/mips/kvm/dyntrans.c 	kvm_trap_emul_gva_lockless_end(vcpu);
vcpu               49 arch/mips/kvm/dyntrans.c 		err = kvm_trap_emul_gva_fault(vcpu, vaddr, true);
vcpu               69 arch/mips/kvm/dyntrans.c 			       struct kvm_vcpu *vcpu)
vcpu               74 arch/mips/kvm/dyntrans.c 	return kvm_mips_trans_replace(vcpu, opc, nop_inst);
vcpu               82 arch/mips/kvm/dyntrans.c 			    struct kvm_vcpu *vcpu)
vcpu               94 arch/mips/kvm/dyntrans.c 	return kvm_mips_trans_replace(vcpu, opc, synci_inst);
vcpu               98 arch/mips/kvm/dyntrans.c 			struct kvm_vcpu *vcpu)
vcpu              116 arch/mips/kvm/dyntrans.c 		if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8)
vcpu              121 arch/mips/kvm/dyntrans.c 	return kvm_mips_trans_replace(vcpu, opc, mfc0_inst);
vcpu              125 arch/mips/kvm/dyntrans.c 			struct kvm_vcpu *vcpu)
vcpu              138 arch/mips/kvm/dyntrans.c 	if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8)
vcpu              142 arch/mips/kvm/dyntrans.c 	return kvm_mips_trans_replace(vcpu, opc, mtc0_inst);
vcpu               41 arch/mips/kvm/emulate.c static int kvm_compute_return_epc(struct kvm_vcpu *vcpu, unsigned long instpc,
vcpu               46 arch/mips/kvm/emulate.c 	struct kvm_vcpu_arch *arch = &vcpu->arch;
vcpu               57 arch/mips/kvm/emulate.c 	err = kvm_get_badinstrp((u32 *)epc, vcpu, &insn.word);
vcpu              244 arch/mips/kvm/emulate.c enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause)
vcpu              249 arch/mips/kvm/emulate.c 		err = kvm_compute_return_epc(vcpu, vcpu->arch.pc,
vcpu              250 arch/mips/kvm/emulate.c 					     &vcpu->arch.pc);
vcpu              254 arch/mips/kvm/emulate.c 		vcpu->arch.pc += 4;
vcpu              257 arch/mips/kvm/emulate.c 	kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
vcpu              273 arch/mips/kvm/emulate.c int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
vcpu              276 arch/mips/kvm/emulate.c 		*out = vcpu->arch.host_cp0_badinstr;
vcpu              279 arch/mips/kvm/emulate.c 		return kvm_get_inst(opc, vcpu, out);
vcpu              294 arch/mips/kvm/emulate.c int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
vcpu              297 arch/mips/kvm/emulate.c 		*out = vcpu->arch.host_cp0_badinstrp;
vcpu              300 arch/mips/kvm/emulate.c 		return kvm_get_inst(opc, vcpu, out);
vcpu              312 arch/mips/kvm/emulate.c int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
vcpu              314 arch/mips/kvm/emulate.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu              316 arch/mips/kvm/emulate.c 	return	(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
vcpu              327 arch/mips/kvm/emulate.c static u32 kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
vcpu              333 arch/mips/kvm/emulate.c 	delta = now_ns + vcpu->arch.count_dyn_bias;
vcpu              335 arch/mips/kvm/emulate.c 	if (delta >= vcpu->arch.count_period) {
vcpu              337 arch/mips/kvm/emulate.c 		periods = div64_s64(now_ns, vcpu->arch.count_period);
vcpu              338 arch/mips/kvm/emulate.c 		vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
vcpu              340 arch/mips/kvm/emulate.c 		delta = now_ns + vcpu->arch.count_dyn_bias;
vcpu              353 arch/mips/kvm/emulate.c 	return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
vcpu              366 arch/mips/kvm/emulate.c static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
vcpu              368 arch/mips/kvm/emulate.c 	if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
vcpu              369 arch/mips/kvm/emulate.c 		return vcpu->arch.count_resume;
vcpu              384 arch/mips/kvm/emulate.c static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
vcpu              386 arch/mips/kvm/emulate.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu              392 arch/mips/kvm/emulate.c 	count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
vcpu              408 arch/mips/kvm/emulate.c 	expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
vcpu              409 arch/mips/kvm/emulate.c 	threshold = ktime_add_ns(now, vcpu->arch.count_period / 4);
vcpu              415 arch/mips/kvm/emulate.c 		running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
vcpu              418 arch/mips/kvm/emulate.c 		kvm_mips_callbacks->queue_timer_int(vcpu);
vcpu              426 arch/mips/kvm/emulate.c 					       vcpu->arch.count_period);
vcpu              427 arch/mips/kvm/emulate.c 			hrtimer_start(&vcpu->arch.comparecount_timer, expires,
vcpu              444 arch/mips/kvm/emulate.c u32 kvm_mips_read_count(struct kvm_vcpu *vcpu)
vcpu              446 arch/mips/kvm/emulate.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu              449 arch/mips/kvm/emulate.c 	if (kvm_mips_count_disabled(vcpu))
vcpu              452 arch/mips/kvm/emulate.c 	return kvm_mips_read_count_running(vcpu, ktime_get());
vcpu              471 arch/mips/kvm/emulate.c ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count)
vcpu              476 arch/mips/kvm/emulate.c 	hrtimer_cancel(&vcpu->arch.comparecount_timer);
vcpu              480 arch/mips/kvm/emulate.c 	*count = kvm_mips_read_count_running(vcpu, now);
vcpu              501 arch/mips/kvm/emulate.c static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
vcpu              504 arch/mips/kvm/emulate.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu              512 arch/mips/kvm/emulate.c 	delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
vcpu              516 arch/mips/kvm/emulate.c 	hrtimer_cancel(&vcpu->arch.comparecount_timer);
vcpu              517 arch/mips/kvm/emulate.c 	hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
vcpu              542 arch/mips/kvm/emulate.c int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before,
vcpu              551 arch/mips/kvm/emulate.c 	before_count = vcpu->arch.count_bias +
vcpu              552 arch/mips/kvm/emulate.c 			kvm_mips_ktime_to_count(vcpu, before);
vcpu              564 arch/mips/kvm/emulate.c 		vcpu->arch.count_bias += drift;
vcpu              571 arch/mips/kvm/emulate.c 	now_count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
vcpu              580 arch/mips/kvm/emulate.c 		vcpu->arch.count_bias += drift;
vcpu              587 arch/mips/kvm/emulate.c 	delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
vcpu              592 arch/mips/kvm/emulate.c 	kvm_mips_resume_hrtimer(vcpu, count_time, count);
vcpu              603 arch/mips/kvm/emulate.c void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count)
vcpu              605 arch/mips/kvm/emulate.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu              609 arch/mips/kvm/emulate.c 	now = kvm_mips_count_time(vcpu);
vcpu              610 arch/mips/kvm/emulate.c 	vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
vcpu              612 arch/mips/kvm/emulate.c 	if (kvm_mips_count_disabled(vcpu))
vcpu              617 arch/mips/kvm/emulate.c 		kvm_mips_resume_hrtimer(vcpu, now, count);
vcpu              628 arch/mips/kvm/emulate.c void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz)
vcpu              630 arch/mips/kvm/emulate.c 	vcpu->arch.count_hz = count_hz;
vcpu              631 arch/mips/kvm/emulate.c 	vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
vcpu              632 arch/mips/kvm/emulate.c 	vcpu->arch.count_dyn_bias = 0;
vcpu              635 arch/mips/kvm/emulate.c 	kvm_mips_write_count(vcpu, 0);
vcpu              649 arch/mips/kvm/emulate.c int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
vcpu              651 arch/mips/kvm/emulate.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu              660 arch/mips/kvm/emulate.c 	if (vcpu->arch.count_hz == count_hz)
vcpu              664 arch/mips/kvm/emulate.c 	dc = kvm_mips_count_disabled(vcpu);
vcpu              666 arch/mips/kvm/emulate.c 		now = kvm_mips_count_time(vcpu);
vcpu              669 arch/mips/kvm/emulate.c 		now = kvm_mips_freeze_hrtimer(vcpu, &count);
vcpu              673 arch/mips/kvm/emulate.c 	vcpu->arch.count_hz = count_hz;
vcpu              674 arch/mips/kvm/emulate.c 	vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
vcpu              675 arch/mips/kvm/emulate.c 	vcpu->arch.count_dyn_bias = 0;
vcpu              678 arch/mips/kvm/emulate.c 	vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
vcpu              682 arch/mips/kvm/emulate.c 		kvm_mips_resume_hrtimer(vcpu, now, count);
vcpu              696 arch/mips/kvm/emulate.c void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
vcpu              698 arch/mips/kvm/emulate.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu              710 arch/mips/kvm/emulate.c 		kvm_mips_callbacks->dequeue_timer_int(vcpu);
vcpu              731 arch/mips/kvm/emulate.c 	dc = kvm_mips_count_disabled(vcpu);
vcpu              733 arch/mips/kvm/emulate.c 		now = kvm_mips_freeze_hrtimer(vcpu, &count);
vcpu              736 arch/mips/kvm/emulate.c 		kvm_mips_callbacks->dequeue_timer_int(vcpu);
vcpu              758 arch/mips/kvm/emulate.c 		kvm_mips_resume_hrtimer(vcpu, now, count);
vcpu              781 arch/mips/kvm/emulate.c static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
vcpu              783 arch/mips/kvm/emulate.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu              788 arch/mips/kvm/emulate.c 	hrtimer_cancel(&vcpu->arch.comparecount_timer);
vcpu              792 arch/mips/kvm/emulate.c 	count = kvm_mips_read_count_running(vcpu, now);
vcpu              808 arch/mips/kvm/emulate.c void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
vcpu              810 arch/mips/kvm/emulate.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu              813 arch/mips/kvm/emulate.c 	if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
vcpu              814 arch/mips/kvm/emulate.c 		kvm_mips_count_disable(vcpu);
vcpu              828 arch/mips/kvm/emulate.c void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
vcpu              830 arch/mips/kvm/emulate.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu              841 arch/mips/kvm/emulate.c 	kvm_mips_write_count(vcpu, count);
vcpu              854 arch/mips/kvm/emulate.c int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
vcpu              856 arch/mips/kvm/emulate.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu              857 arch/mips/kvm/emulate.c 	s64 changed = count_ctl ^ vcpu->arch.count_ctl;
vcpu              867 arch/mips/kvm/emulate.c 	vcpu->arch.count_ctl = count_ctl;
vcpu              875 arch/mips/kvm/emulate.c 				vcpu->arch.count_resume = ktime_get();
vcpu              878 arch/mips/kvm/emulate.c 			vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
vcpu              888 arch/mips/kvm/emulate.c 					vcpu->arch.count_hz);
vcpu              889 arch/mips/kvm/emulate.c 			expire = ktime_add_ns(vcpu->arch.count_resume, delta);
vcpu              895 arch/mips/kvm/emulate.c 				kvm_mips_callbacks->queue_timer_int(vcpu);
vcpu              898 arch/mips/kvm/emulate.c 			count = kvm_mips_read_count_running(vcpu, now);
vcpu              899 arch/mips/kvm/emulate.c 			kvm_mips_resume_hrtimer(vcpu, now, count);
vcpu              916 arch/mips/kvm/emulate.c int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
vcpu              926 arch/mips/kvm/emulate.c 	vcpu->arch.count_resume = ns_to_ktime(count_resume);
vcpu              938 arch/mips/kvm/emulate.c enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
vcpu              941 arch/mips/kvm/emulate.c 	hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
vcpu              942 arch/mips/kvm/emulate.c 			       vcpu->arch.count_period);
vcpu              946 arch/mips/kvm/emulate.c enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
vcpu              948 arch/mips/kvm/emulate.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu              953 arch/mips/kvm/emulate.c 		vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
vcpu              955 arch/mips/kvm/emulate.c 		kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
vcpu              958 arch/mips/kvm/emulate.c 		vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
vcpu              962 arch/mips/kvm/emulate.c 			vcpu->arch.pc);
vcpu              969 arch/mips/kvm/emulate.c enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
vcpu              971 arch/mips/kvm/emulate.c 	kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
vcpu              972 arch/mips/kvm/emulate.c 		  vcpu->arch.pending_exceptions);
vcpu              974 arch/mips/kvm/emulate.c 	++vcpu->stat.wait_exits;
vcpu              975 arch/mips/kvm/emulate.c 	trace_kvm_exit(vcpu, KVM_TRACE_EXIT_WAIT);
vcpu              976 arch/mips/kvm/emulate.c 	if (!vcpu->arch.pending_exceptions) {
vcpu              977 arch/mips/kvm/emulate.c 		kvm_vz_lose_htimer(vcpu);
vcpu              978 arch/mips/kvm/emulate.c 		vcpu->arch.wait = 1;
vcpu              979 arch/mips/kvm/emulate.c 		kvm_vcpu_block(vcpu);
vcpu              985 arch/mips/kvm/emulate.c 		if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
vcpu              986 arch/mips/kvm/emulate.c 			kvm_clear_request(KVM_REQ_UNHALT, vcpu);
vcpu              987 arch/mips/kvm/emulate.c 			vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
vcpu              994 arch/mips/kvm/emulate.c static void kvm_mips_change_entryhi(struct kvm_vcpu *vcpu,
vcpu              997 arch/mips/kvm/emulate.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu              998 arch/mips/kvm/emulate.c 	struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
vcpu             1003 arch/mips/kvm/emulate.c 		trace_kvm_asid_change(vcpu, kvm_read_c0_guest_entryhi(cop0) &
vcpu             1029 arch/mips/kvm/emulate.c enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
vcpu             1031 arch/mips/kvm/emulate.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu             1033 arch/mips/kvm/emulate.c 	unsigned long pc = vcpu->arch.pc;
vcpu             1043 arch/mips/kvm/emulate.c 	tlb = &vcpu->arch.guest_tlb[index];
vcpu             1047 arch/mips/kvm/emulate.c 	kvm_mips_change_entryhi(vcpu, tlb->tlb_hi);
vcpu             1060 arch/mips/kvm/emulate.c static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
vcpu             1063 arch/mips/kvm/emulate.c 	struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
vcpu             1064 arch/mips/kvm/emulate.c 	struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
vcpu             1080 arch/mips/kvm/emulate.c 	kvm_trap_emul_invalidate_gva(vcpu, tlb->tlb_hi & VPN2_MASK, user);
vcpu             1086 arch/mips/kvm/emulate.c 	kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi, user, true);
vcpu             1102 arch/mips/kvm/emulate.c enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
vcpu             1104 arch/mips/kvm/emulate.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu             1107 arch/mips/kvm/emulate.c 	unsigned long pc = vcpu->arch.pc;
vcpu             1119 arch/mips/kvm/emulate.c 	tlb = &vcpu->arch.guest_tlb[index];
vcpu             1121 arch/mips/kvm/emulate.c 	kvm_mips_invalidate_guest_tlb(vcpu, tlb);
vcpu             1138 arch/mips/kvm/emulate.c enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
vcpu             1140 arch/mips/kvm/emulate.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu             1142 arch/mips/kvm/emulate.c 	unsigned long pc = vcpu->arch.pc;
vcpu             1146 arch/mips/kvm/emulate.c 	tlb = &vcpu->arch.guest_tlb[index];
vcpu             1148 arch/mips/kvm/emulate.c 	kvm_mips_invalidate_guest_tlb(vcpu, tlb);
vcpu             1163 arch/mips/kvm/emulate.c enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
vcpu             1165 arch/mips/kvm/emulate.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu             1167 arch/mips/kvm/emulate.c 	unsigned long pc = vcpu->arch.pc;
vcpu             1170 arch/mips/kvm/emulate.c 	index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
vcpu             1187 arch/mips/kvm/emulate.c unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu)
vcpu             1192 arch/mips/kvm/emulate.c 	if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
vcpu             1205 arch/mips/kvm/emulate.c unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu)
vcpu             1211 arch/mips/kvm/emulate.c 	if (kvm_mips_guest_can_have_msa(&vcpu->arch))
vcpu             1224 arch/mips/kvm/emulate.c unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu)
vcpu             1242 arch/mips/kvm/emulate.c unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu)
vcpu             1247 arch/mips/kvm/emulate.c 	if (kvm_mips_guest_has_msa(&vcpu->arch))
vcpu             1254 arch/mips/kvm/emulate.c 	if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
vcpu             1266 arch/mips/kvm/emulate.c 					   struct kvm_vcpu *vcpu)
vcpu             1268 arch/mips/kvm/emulate.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu             1277 arch/mips/kvm/emulate.c 	curr_pc = vcpu->arch.pc;
vcpu             1278 arch/mips/kvm/emulate.c 	er = update_pc(vcpu, cause);
vcpu             1285 arch/mips/kvm/emulate.c 			er = kvm_mips_emul_tlbr(vcpu);
vcpu             1288 arch/mips/kvm/emulate.c 			er = kvm_mips_emul_tlbwi(vcpu);
vcpu             1291 arch/mips/kvm/emulate.c 			er = kvm_mips_emul_tlbwr(vcpu);
vcpu             1294 arch/mips/kvm/emulate.c 			er = kvm_mips_emul_tlbp(vcpu);
vcpu             1300 arch/mips/kvm/emulate.c 			er = kvm_mips_emul_eret(vcpu);
vcpu             1303 arch/mips/kvm/emulate.c 			er = kvm_mips_emul_wait(vcpu);
vcpu             1306 arch/mips/kvm/emulate.c 			er = kvm_mips_emul_hypcall(vcpu, inst);
vcpu             1321 arch/mips/kvm/emulate.c 				vcpu->arch.gprs[rt] =
vcpu             1322 arch/mips/kvm/emulate.c 				    (s32)kvm_mips_read_count(vcpu);
vcpu             1324 arch/mips/kvm/emulate.c 				vcpu->arch.gprs[rt] = 0x0;
vcpu             1326 arch/mips/kvm/emulate.c 				kvm_mips_trans_mfc0(inst, opc, vcpu);
vcpu             1329 arch/mips/kvm/emulate.c 				vcpu->arch.gprs[rt] = (s32)cop0->reg[rd][sel];
vcpu             1332 arch/mips/kvm/emulate.c 				kvm_mips_trans_mfc0(inst, opc, vcpu);
vcpu             1336 arch/mips/kvm/emulate.c 			trace_kvm_hwr(vcpu, KVM_TRACE_MFC0,
vcpu             1338 arch/mips/kvm/emulate.c 				      vcpu->arch.gprs[rt]);
vcpu             1342 arch/mips/kvm/emulate.c 			vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
vcpu             1344 arch/mips/kvm/emulate.c 			trace_kvm_hwr(vcpu, KVM_TRACE_DMFC0,
vcpu             1346 arch/mips/kvm/emulate.c 				      vcpu->arch.gprs[rt]);
vcpu             1353 arch/mips/kvm/emulate.c 			trace_kvm_hwr(vcpu, KVM_TRACE_MTC0,
vcpu             1355 arch/mips/kvm/emulate.c 				      vcpu->arch.gprs[rt]);
vcpu             1358 arch/mips/kvm/emulate.c 			    && (vcpu->arch.gprs[rt] >=
vcpu             1361 arch/mips/kvm/emulate.c 					vcpu->arch.gprs[rt]);
vcpu             1371 arch/mips/kvm/emulate.c 							  vcpu->arch.gprs[rt]);
vcpu             1373 arch/mips/kvm/emulate.c 				kvm_mips_change_entryhi(vcpu,
vcpu             1374 arch/mips/kvm/emulate.c 							vcpu->arch.gprs[rt]);
vcpu             1378 arch/mips/kvm/emulate.c 				kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
vcpu             1383 arch/mips/kvm/emulate.c 				kvm_mips_write_compare(vcpu,
vcpu             1384 arch/mips/kvm/emulate.c 						       vcpu->arch.gprs[rt],
vcpu             1390 arch/mips/kvm/emulate.c 				val = vcpu->arch.gprs[rt];
vcpu             1401 arch/mips/kvm/emulate.c 				if (!kvm_mips_guest_has_fpu(&vcpu->arch))
vcpu             1421 arch/mips/kvm/emulate.c 					kvm_drop_fpu(vcpu);
vcpu             1432 arch/mips/kvm/emulate.c 				    vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
vcpu             1433 arch/mips/kvm/emulate.c 					kvm_lose_fpu(vcpu);
vcpu             1443 arch/mips/kvm/emulate.c 				    vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
vcpu             1455 arch/mips/kvm/emulate.c 				if (!kvm_mips_guest_has_fpu(&vcpu->arch))
vcpu             1456 arch/mips/kvm/emulate.c 					kvm_mips_trans_mtc0(inst, opc, vcpu);
vcpu             1462 arch/mips/kvm/emulate.c 				val = vcpu->arch.gprs[rt];
vcpu             1465 arch/mips/kvm/emulate.c 				wrmask = kvm_mips_config5_wrmask(vcpu);
vcpu             1478 arch/mips/kvm/emulate.c 				    vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
vcpu             1488 arch/mips/kvm/emulate.c 				    vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
vcpu             1499 arch/mips/kvm/emulate.c 				new_cause = vcpu->arch.gprs[rt];
vcpu             1506 arch/mips/kvm/emulate.c 						kvm_mips_count_disable_cause(vcpu);
vcpu             1508 arch/mips/kvm/emulate.c 						kvm_mips_count_enable_cause(vcpu);
vcpu             1519 arch/mips/kvm/emulate.c 				cop0->reg[rd][sel] = vcpu->arch.gprs[rt] & mask;
vcpu             1521 arch/mips/kvm/emulate.c 				cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
vcpu             1523 arch/mips/kvm/emulate.c 				kvm_mips_trans_mtc0(inst, opc, vcpu);
vcpu             1530 arch/mips/kvm/emulate.c 				vcpu->arch.pc, rt, rd, sel);
vcpu             1531 arch/mips/kvm/emulate.c 			trace_kvm_hwr(vcpu, KVM_TRACE_DMTC0,
vcpu             1533 arch/mips/kvm/emulate.c 				      vcpu->arch.gprs[rt]);
vcpu             1542 arch/mips/kvm/emulate.c 				vcpu->arch.gprs[rt] =
vcpu             1547 arch/mips/kvm/emulate.c 					  vcpu->arch.pc);
vcpu             1551 arch/mips/kvm/emulate.c 					  vcpu->arch.pc);
vcpu             1571 arch/mips/kvm/emulate.c 					  vcpu->arch.gprs[rt]);
vcpu             1572 arch/mips/kvm/emulate.c 				vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
vcpu             1577 arch/mips/kvm/emulate.c 				vcpu->arch.pc, inst.c0r_format.rs);
vcpu             1586 arch/mips/kvm/emulate.c 		vcpu->arch.pc = curr_pc;
vcpu             1601 arch/mips/kvm/emulate.c 					     struct kvm_vcpu *vcpu)
vcpu             1612 arch/mips/kvm/emulate.c 	curr_pc = vcpu->arch.pc;
vcpu             1613 arch/mips/kvm/emulate.c 	er = update_pc(vcpu, cause);
vcpu             1620 arch/mips/kvm/emulate.c 						vcpu->arch.host_cp0_badvaddr);
vcpu             1628 arch/mips/kvm/emulate.c 		*(u64 *)data = vcpu->arch.gprs[rt];
vcpu             1631 arch/mips/kvm/emulate.c 			  vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
vcpu             1632 arch/mips/kvm/emulate.c 			  vcpu->arch.gprs[rt], *(u64 *)data);
vcpu             1638 arch/mips/kvm/emulate.c 		*(u32 *)data = vcpu->arch.gprs[rt];
vcpu             1641 arch/mips/kvm/emulate.c 			  vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
vcpu             1642 arch/mips/kvm/emulate.c 			  vcpu->arch.gprs[rt], *(u32 *)data);
vcpu             1647 arch/mips/kvm/emulate.c 		*(u16 *)data = vcpu->arch.gprs[rt];
vcpu             1650 arch/mips/kvm/emulate.c 			  vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
vcpu             1651 arch/mips/kvm/emulate.c 			  vcpu->arch.gprs[rt], *(u16 *)data);
vcpu             1656 arch/mips/kvm/emulate.c 		*(u8 *)data = vcpu->arch.gprs[rt];
vcpu             1659 arch/mips/kvm/emulate.c 			  vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
vcpu             1660 arch/mips/kvm/emulate.c 			  vcpu->arch.gprs[rt], *(u8 *)data);
vcpu             1670 arch/mips/kvm/emulate.c 	vcpu->mmio_needed = 1;
vcpu             1671 arch/mips/kvm/emulate.c 	vcpu->mmio_is_write = 1;
vcpu             1676 arch/mips/kvm/emulate.c 	vcpu->arch.pc = curr_pc;
vcpu             1682 arch/mips/kvm/emulate.c 					    struct kvm_vcpu *vcpu)
vcpu             1696 arch/mips/kvm/emulate.c 	curr_pc = vcpu->arch.pc;
vcpu             1697 arch/mips/kvm/emulate.c 	er = update_pc(vcpu, cause);
vcpu             1700 arch/mips/kvm/emulate.c 	vcpu->arch.io_pc = vcpu->arch.pc;
vcpu             1701 arch/mips/kvm/emulate.c 	vcpu->arch.pc = curr_pc;
vcpu             1703 arch/mips/kvm/emulate.c 	vcpu->arch.io_gpr = rt;
vcpu             1706 arch/mips/kvm/emulate.c 						vcpu->arch.host_cp0_badvaddr);
vcpu             1710 arch/mips/kvm/emulate.c 	vcpu->mmio_needed = 2;	/* signed */
vcpu             1718 arch/mips/kvm/emulate.c 		vcpu->mmio_needed = 1;	/* unsigned */
vcpu             1726 arch/mips/kvm/emulate.c 		vcpu->mmio_needed = 1;	/* unsigned */
vcpu             1733 arch/mips/kvm/emulate.c 		vcpu->mmio_needed = 1;	/* unsigned */
vcpu             1742 arch/mips/kvm/emulate.c 		vcpu->mmio_needed = 0;
vcpu             1747 arch/mips/kvm/emulate.c 	vcpu->mmio_is_write = 0;
vcpu             1756 arch/mips/kvm/emulate.c 						     struct kvm_vcpu *vcpu,
vcpu             1763 arch/mips/kvm/emulate.c 		kvm_trap_emul_gva_lockless_begin(vcpu);
vcpu             1765 arch/mips/kvm/emulate.c 		kvm_trap_emul_gva_lockless_end(vcpu);
vcpu             1774 arch/mips/kvm/emulate.c 		switch (kvm_trap_emul_gva_fault(vcpu, addr, false)) {
vcpu             1781 arch/mips/kvm/emulate.c 			vcpu->arch.host_cp0_badvaddr = addr;
vcpu             1782 arch/mips/kvm/emulate.c 			vcpu->arch.pc = curr_pc;
vcpu             1783 arch/mips/kvm/emulate.c 			kvm_mips_emulate_tlbmiss_ld(cause, NULL, run, vcpu);
vcpu             1787 arch/mips/kvm/emulate.c 			vcpu->arch.host_cp0_badvaddr = addr;
vcpu             1788 arch/mips/kvm/emulate.c 			vcpu->arch.pc = curr_pc;
vcpu             1789 arch/mips/kvm/emulate.c 			kvm_mips_emulate_tlbinv_ld(cause, NULL, run, vcpu);
vcpu             1800 arch/mips/kvm/emulate.c 					     struct kvm_vcpu *vcpu)
vcpu             1805 arch/mips/kvm/emulate.c 	struct kvm_vcpu_arch *arch = &vcpu->arch;
vcpu             1813 arch/mips/kvm/emulate.c 	curr_pc = vcpu->arch.pc;
vcpu             1814 arch/mips/kvm/emulate.c 	er = update_pc(vcpu, cause);
vcpu             1839 arch/mips/kvm/emulate.c 			  vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
vcpu             1877 arch/mips/kvm/emulate.c 		kvm_mips_trans_cache_index(inst, opc, vcpu);
vcpu             1889 arch/mips/kvm/emulate.c 					     curr_pc, va, run, vcpu, cause);
vcpu             1897 arch/mips/kvm/emulate.c 		kvm_mips_trans_cache_va(inst, opc, vcpu);
vcpu             1902 arch/mips/kvm/emulate.c 					     curr_pc, va, run, vcpu, cause);
vcpu             1906 arch/mips/kvm/emulate.c 					     curr_pc, va, run, vcpu, cause);
vcpu             1912 arch/mips/kvm/emulate.c 		kvm_mips_trans_cache_va(inst, opc, vcpu);
vcpu             1923 arch/mips/kvm/emulate.c 		vcpu->arch.pc = curr_pc;
vcpu             1933 arch/mips/kvm/emulate.c 					    struct kvm_vcpu *vcpu)
vcpu             1942 arch/mips/kvm/emulate.c 	err = kvm_get_badinstr(opc, vcpu, &inst.word);
vcpu             1948 arch/mips/kvm/emulate.c 		er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
vcpu             1953 arch/mips/kvm/emulate.c 		++vcpu->stat.cache_exits;
vcpu             1954 arch/mips/kvm/emulate.c 		trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
vcpu             1955 arch/mips/kvm/emulate.c 		er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
vcpu             1961 arch/mips/kvm/emulate.c 			++vcpu->stat.cache_exits;
vcpu             1962 arch/mips/kvm/emulate.c 			trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
vcpu             1964 arch/mips/kvm/emulate.c 						    vcpu);
vcpu             1976 arch/mips/kvm/emulate.c 		kvm_arch_vcpu_dump_regs(vcpu);
vcpu             1991 arch/mips/kvm/emulate.c long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu)
vcpu             1993 arch/mips/kvm/emulate.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu             2004 arch/mips/kvm/emulate.c 					       struct kvm_vcpu *vcpu)
vcpu             2006 arch/mips/kvm/emulate.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu             2007 arch/mips/kvm/emulate.c 	struct kvm_vcpu_arch *arch = &vcpu->arch;
vcpu             2026 arch/mips/kvm/emulate.c 		arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
vcpu             2039 arch/mips/kvm/emulate.c 						  struct kvm_vcpu *vcpu)
vcpu             2041 arch/mips/kvm/emulate.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu             2042 arch/mips/kvm/emulate.c 	struct kvm_vcpu_arch *arch = &vcpu->arch;
vcpu             2043 arch/mips/kvm/emulate.c 	unsigned long entryhi = (vcpu->arch.  host_cp0_badvaddr & VPN2_MASK) |
vcpu             2060 arch/mips/kvm/emulate.c 		arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x0;
vcpu             2066 arch/mips/kvm/emulate.c 		arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
vcpu             2073 arch/mips/kvm/emulate.c 	kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
vcpu             2083 arch/mips/kvm/emulate.c 						 struct kvm_vcpu *vcpu)
vcpu             2085 arch/mips/kvm/emulate.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu             2086 arch/mips/kvm/emulate.c 	struct kvm_vcpu_arch *arch = &vcpu->arch;
vcpu             2088 arch/mips/kvm/emulate.c 		(vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
vcpu             2109 arch/mips/kvm/emulate.c 	arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
vcpu             2115 arch/mips/kvm/emulate.c 	kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
vcpu             2125 arch/mips/kvm/emulate.c 						  struct kvm_vcpu *vcpu)
vcpu             2127 arch/mips/kvm/emulate.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu             2128 arch/mips/kvm/emulate.c 	struct kvm_vcpu_arch *arch = &vcpu->arch;
vcpu             2129 arch/mips/kvm/emulate.c 	unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
vcpu             2146 arch/mips/kvm/emulate.c 		arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x0;
vcpu             2150 arch/mips/kvm/emulate.c 		arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
vcpu             2157 arch/mips/kvm/emulate.c 	kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
vcpu             2167 arch/mips/kvm/emulate.c 						 struct kvm_vcpu *vcpu)
vcpu             2169 arch/mips/kvm/emulate.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu             2170 arch/mips/kvm/emulate.c 	struct kvm_vcpu_arch *arch = &vcpu->arch;
vcpu             2171 arch/mips/kvm/emulate.c 	unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
vcpu             2192 arch/mips/kvm/emulate.c 	arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
vcpu             2198 arch/mips/kvm/emulate.c 	kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
vcpu             2208 arch/mips/kvm/emulate.c 					      struct kvm_vcpu *vcpu)
vcpu             2210 arch/mips/kvm/emulate.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu             2211 arch/mips/kvm/emulate.c 	unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
vcpu             2213 arch/mips/kvm/emulate.c 	struct kvm_vcpu_arch *arch = &vcpu->arch;
vcpu             2232 arch/mips/kvm/emulate.c 	arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
vcpu             2238 arch/mips/kvm/emulate.c 	kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
vcpu             2248 arch/mips/kvm/emulate.c 					       struct kvm_vcpu *vcpu)
vcpu             2250 arch/mips/kvm/emulate.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu             2251 arch/mips/kvm/emulate.c 	struct kvm_vcpu_arch *arch = &vcpu->arch;
vcpu             2265 arch/mips/kvm/emulate.c 	arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
vcpu             2277 arch/mips/kvm/emulate.c 					      struct kvm_vcpu *vcpu)
vcpu             2279 arch/mips/kvm/emulate.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu             2280 arch/mips/kvm/emulate.c 	struct kvm_vcpu_arch *arch = &vcpu->arch;
vcpu             2299 arch/mips/kvm/emulate.c 		arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
vcpu             2312 arch/mips/kvm/emulate.c 					      struct kvm_vcpu *vcpu)
vcpu             2314 arch/mips/kvm/emulate.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu             2315 arch/mips/kvm/emulate.c 	struct kvm_vcpu_arch *arch = &vcpu->arch;
vcpu             2334 arch/mips/kvm/emulate.c 		arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
vcpu             2347 arch/mips/kvm/emulate.c 						struct kvm_vcpu *vcpu)
vcpu             2349 arch/mips/kvm/emulate.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu             2350 arch/mips/kvm/emulate.c 	struct kvm_vcpu_arch *arch = &vcpu->arch;
vcpu             2369 arch/mips/kvm/emulate.c 		arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
vcpu             2382 arch/mips/kvm/emulate.c 						  struct kvm_vcpu *vcpu)
vcpu             2384 arch/mips/kvm/emulate.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu             2385 arch/mips/kvm/emulate.c 	struct kvm_vcpu_arch *arch = &vcpu->arch;
vcpu             2404 arch/mips/kvm/emulate.c 		arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
vcpu             2417 arch/mips/kvm/emulate.c 					       struct kvm_vcpu *vcpu)
vcpu             2419 arch/mips/kvm/emulate.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu             2420 arch/mips/kvm/emulate.c 	struct kvm_vcpu_arch *arch = &vcpu->arch;
vcpu             2439 arch/mips/kvm/emulate.c 		arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
vcpu             2452 arch/mips/kvm/emulate.c 						  struct kvm_vcpu *vcpu)
vcpu             2454 arch/mips/kvm/emulate.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu             2455 arch/mips/kvm/emulate.c 	struct kvm_vcpu_arch *arch = &vcpu->arch;
vcpu             2474 arch/mips/kvm/emulate.c 		arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
vcpu             2486 arch/mips/kvm/emulate.c 					 struct kvm_vcpu *vcpu)
vcpu             2488 arch/mips/kvm/emulate.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu             2489 arch/mips/kvm/emulate.c 	struct kvm_vcpu_arch *arch = &vcpu->arch;
vcpu             2499 arch/mips/kvm/emulate.c 	curr_pc = vcpu->arch.pc;
vcpu             2500 arch/mips/kvm/emulate.c 	er = update_pc(vcpu, cause);
vcpu             2507 arch/mips/kvm/emulate.c 	err = kvm_get_badinstr(opc, vcpu, &inst.word);
vcpu             2517 arch/mips/kvm/emulate.c 		int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
vcpu             2530 arch/mips/kvm/emulate.c 			arch->gprs[rt] = vcpu->vcpu_id;
vcpu             2537 arch/mips/kvm/emulate.c 			arch->gprs[rt] = (s32)kvm_mips_read_count(vcpu);
vcpu             2558 arch/mips/kvm/emulate.c 		trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, KVM_TRACE_HWR(rd, sel),
vcpu             2559 arch/mips/kvm/emulate.c 			      vcpu->arch.gprs[rt]);
vcpu             2573 arch/mips/kvm/emulate.c 	vcpu->arch.pc = curr_pc;
vcpu             2574 arch/mips/kvm/emulate.c 	return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
vcpu             2577 arch/mips/kvm/emulate.c enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
vcpu             2580 arch/mips/kvm/emulate.c 	unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
vcpu             2590 arch/mips/kvm/emulate.c 	vcpu->arch.pc = vcpu->arch.io_pc;
vcpu             2598 arch/mips/kvm/emulate.c 		if (vcpu->mmio_needed == 2)
vcpu             2605 arch/mips/kvm/emulate.c 		if (vcpu->mmio_needed == 2)
vcpu             2612 arch/mips/kvm/emulate.c 		if (vcpu->mmio_needed == 2)
vcpu             2626 arch/mips/kvm/emulate.c 						  struct kvm_vcpu *vcpu)
vcpu             2629 arch/mips/kvm/emulate.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu             2630 arch/mips/kvm/emulate.c 	struct kvm_vcpu_arch *arch = &vcpu->arch;
vcpu             2647 arch/mips/kvm/emulate.c 		arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
vcpu             2648 arch/mips/kvm/emulate.c 		kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
vcpu             2664 arch/mips/kvm/emulate.c 					       struct kvm_vcpu *vcpu)
vcpu             2668 arch/mips/kvm/emulate.c 	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
vcpu             2670 arch/mips/kvm/emulate.c 	int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
vcpu             2745 arch/mips/kvm/emulate.c 		kvm_mips_emulate_exc(cause, opc, run, vcpu);
vcpu             2760 arch/mips/kvm/emulate.c 					      struct kvm_vcpu *vcpu,
vcpu             2765 arch/mips/kvm/emulate.c 	unsigned long va = vcpu->arch.host_cp0_badvaddr;
vcpu             2769 arch/mips/kvm/emulate.c 		  vcpu->arch.host_cp0_badvaddr);
vcpu             2777 arch/mips/kvm/emulate.c 	index = kvm_mips_guest_tlb_lookup(vcpu,
vcpu             2779 arch/mips/kvm/emulate.c 		      (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) &
vcpu             2783 arch/mips/kvm/emulate.c 			er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
vcpu             2785 arch/mips/kvm/emulate.c 			er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
vcpu             2792 arch/mips/kvm/emulate.c 		struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
vcpu             2801 arch/mips/kvm/emulate.c 								vcpu);
vcpu             2804 arch/mips/kvm/emulate.c 								vcpu);
vcpu             2817 arch/mips/kvm/emulate.c 			if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, va,
vcpu             2820 arch/mips/kvm/emulate.c 					__func__, va, index, vcpu,
vcpu               17 arch/mips/kvm/hypcall.c enum emulation_result kvm_mips_emul_hypcall(struct kvm_vcpu *vcpu,
vcpu               22 arch/mips/kvm/hypcall.c 	kvm_debug("[%#lx] HYPCALL %#03x\n", vcpu->arch.pc, code);
vcpu               32 arch/mips/kvm/hypcall.c static int kvm_mips_hypercall(struct kvm_vcpu *vcpu, unsigned long num,
vcpu               40 arch/mips/kvm/hypcall.c int kvm_mips_handle_hypcall(struct kvm_vcpu *vcpu)
vcpu               45 arch/mips/kvm/hypcall.c 	num = vcpu->arch.gprs[2];	/* v0 */
vcpu               46 arch/mips/kvm/hypcall.c 	args[0] = vcpu->arch.gprs[4];	/* a0 */
vcpu               47 arch/mips/kvm/hypcall.c 	args[1] = vcpu->arch.gprs[5];	/* a1 */
vcpu               48 arch/mips/kvm/hypcall.c 	args[2] = vcpu->arch.gprs[6];	/* a2 */
vcpu               49 arch/mips/kvm/hypcall.c 	args[3] = vcpu->arch.gprs[7];	/* a3 */
vcpu               51 arch/mips/kvm/hypcall.c 	return kvm_mips_hypercall(vcpu, num,
vcpu               52 arch/mips/kvm/hypcall.c 				  args, &vcpu->arch.gprs[2] /* v0 */);
vcpu               24 arch/mips/kvm/interrupt.c void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
vcpu               26 arch/mips/kvm/interrupt.c 	set_bit(priority, &vcpu->arch.pending_exceptions);
vcpu               29 arch/mips/kvm/interrupt.c void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
vcpu               31 arch/mips/kvm/interrupt.c 	clear_bit(priority, &vcpu->arch.pending_exceptions);
vcpu               34 arch/mips/kvm/interrupt.c void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu)
vcpu               41 arch/mips/kvm/interrupt.c 	kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
vcpu               44 arch/mips/kvm/interrupt.c 	kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
vcpu               48 arch/mips/kvm/interrupt.c void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
vcpu               50 arch/mips/kvm/interrupt.c 	kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
vcpu               51 arch/mips/kvm/interrupt.c 	kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
vcpu               54 arch/mips/kvm/interrupt.c void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
vcpu               66 arch/mips/kvm/interrupt.c 		kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
vcpu               68 arch/mips/kvm/interrupt.c 		kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IO);
vcpu               72 arch/mips/kvm/interrupt.c 		kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
vcpu               73 arch/mips/kvm/interrupt.c 		kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_1);
vcpu               77 arch/mips/kvm/interrupt.c 		kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
vcpu               78 arch/mips/kvm/interrupt.c 		kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_2);
vcpu               87 arch/mips/kvm/interrupt.c void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
vcpu               94 arch/mips/kvm/interrupt.c 		kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
vcpu               95 arch/mips/kvm/interrupt.c 		kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IO);
vcpu               99 arch/mips/kvm/interrupt.c 		kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
vcpu              100 arch/mips/kvm/interrupt.c 		kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1);
vcpu              104 arch/mips/kvm/interrupt.c 		kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
vcpu              105 arch/mips/kvm/interrupt.c 		kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2);
vcpu              115 arch/mips/kvm/interrupt.c int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
vcpu              121 arch/mips/kvm/interrupt.c 	struct kvm_vcpu_arch *arch = &vcpu->arch;
vcpu              122 arch/mips/kvm/interrupt.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu              186 arch/mips/kvm/interrupt.c 		arch->pc = kvm_mips_guest_exception_base(vcpu);
vcpu              192 arch/mips/kvm/interrupt.c 		clear_bit(priority, &vcpu->arch.pending_exceptions);
vcpu              198 arch/mips/kvm/interrupt.c int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
vcpu              204 arch/mips/kvm/interrupt.c void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause)
vcpu              206 arch/mips/kvm/interrupt.c 	unsigned long *pending = &vcpu->arch.pending_exceptions;
vcpu              207 arch/mips/kvm/interrupt.c 	unsigned long *pending_clr = &vcpu->arch.pending_exceptions_clr;
vcpu              215 arch/mips/kvm/interrupt.c 		if (kvm_mips_callbacks->irq_clear(vcpu, priority, cause)) {
vcpu              227 arch/mips/kvm/interrupt.c 		if (kvm_mips_callbacks->irq_deliver(vcpu, priority, cause)) {
vcpu              239 arch/mips/kvm/interrupt.c int kvm_mips_pending_timer(struct kvm_vcpu *vcpu)
vcpu              241 arch/mips/kvm/interrupt.c 	return test_bit(MIPS_EXC_INT_TIMER, &vcpu->arch.pending_exceptions);
vcpu               41 arch/mips/kvm/interrupt.h void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority);
vcpu               42 arch/mips/kvm/interrupt.h void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority);
vcpu               43 arch/mips/kvm/interrupt.h int kvm_mips_pending_timer(struct kvm_vcpu *vcpu);
vcpu               45 arch/mips/kvm/interrupt.h void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu);
vcpu               46 arch/mips/kvm/interrupt.h void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu);
vcpu               47 arch/mips/kvm/interrupt.h void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
vcpu               49 arch/mips/kvm/interrupt.h void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
vcpu               51 arch/mips/kvm/interrupt.h int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
vcpu               53 arch/mips/kvm/interrupt.h int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
vcpu               55 arch/mips/kvm/interrupt.h void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause);
vcpu               96 arch/mips/kvm/mips.c int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
vcpu               98 arch/mips/kvm/mips.c 	return !!(vcpu->arch.pending_exceptions);
vcpu              101 arch/mips/kvm/mips.c bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
vcpu              106 arch/mips/kvm/mips.c int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
vcpu              156 arch/mips/kvm/mips.c 	struct kvm_vcpu *vcpu;
vcpu              158 arch/mips/kvm/mips.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu              159 arch/mips/kvm/mips.c 		kvm_arch_vcpu_free(vcpu);
vcpu              289 arch/mips/kvm/mips.c 	struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
vcpu              291 arch/mips/kvm/mips.c 	if (!vcpu) {
vcpu              296 arch/mips/kvm/mips.c 	err = kvm_vcpu_init(vcpu, kvm, id);
vcpu              301 arch/mips/kvm/mips.c 	kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
vcpu              334 arch/mips/kvm/mips.c 	vcpu->arch.guest_ebase = gebase;
vcpu              361 arch/mips/kvm/mips.c 	vcpu->arch.vcpu_run = p;
vcpu              368 arch/mips/kvm/mips.c 	dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p);
vcpu              371 arch/mips/kvm/mips.c 	dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run);
vcpu              381 arch/mips/kvm/mips.c 	vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
vcpu              383 arch/mips/kvm/mips.c 	if (!vcpu->arch.kseg0_commpage) {
vcpu              388 arch/mips/kvm/mips.c 	kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
vcpu              389 arch/mips/kvm/mips.c 	kvm_mips_commpage_init(vcpu);
vcpu              392 arch/mips/kvm/mips.c 	vcpu->arch.last_sched_cpu = -1;
vcpu              393 arch/mips/kvm/mips.c 	vcpu->arch.last_exec_cpu = -1;
vcpu              395 arch/mips/kvm/mips.c 	return vcpu;
vcpu              401 arch/mips/kvm/mips.c 	kvm_vcpu_uninit(vcpu);
vcpu              404 arch/mips/kvm/mips.c 	kfree(vcpu);
vcpu              410 arch/mips/kvm/mips.c void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
vcpu              412 arch/mips/kvm/mips.c 	hrtimer_cancel(&vcpu->arch.comparecount_timer);
vcpu              414 arch/mips/kvm/mips.c 	kvm_vcpu_uninit(vcpu);
vcpu              416 arch/mips/kvm/mips.c 	kvm_mips_dump_stats(vcpu);
vcpu              418 arch/mips/kvm/mips.c 	kvm_mmu_free_memory_caches(vcpu);
vcpu              419 arch/mips/kvm/mips.c 	kfree(vcpu->arch.guest_ebase);
vcpu              420 arch/mips/kvm/mips.c 	kfree(vcpu->arch.kseg0_commpage);
vcpu              421 arch/mips/kvm/mips.c 	kfree(vcpu);
vcpu              424 arch/mips/kvm/mips.c void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
vcpu              426 arch/mips/kvm/mips.c 	kvm_arch_vcpu_free(vcpu);
vcpu              429 arch/mips/kvm/mips.c int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
vcpu              435 arch/mips/kvm/mips.c int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu              439 arch/mips/kvm/mips.c 	vcpu_load(vcpu);
vcpu              441 arch/mips/kvm/mips.c 	kvm_sigset_activate(vcpu);
vcpu              443 arch/mips/kvm/mips.c 	if (vcpu->mmio_needed) {
vcpu              444 arch/mips/kvm/mips.c 		if (!vcpu->mmio_is_write)
vcpu              445 arch/mips/kvm/mips.c 			kvm_mips_complete_mmio_load(vcpu, run);
vcpu              446 arch/mips/kvm/mips.c 		vcpu->mmio_needed = 0;
vcpu              456 arch/mips/kvm/mips.c 	trace_kvm_enter(vcpu);
vcpu              464 arch/mips/kvm/mips.c 	smp_store_mb(vcpu->mode, IN_GUEST_MODE);
vcpu              466 arch/mips/kvm/mips.c 	r = kvm_mips_callbacks->vcpu_run(run, vcpu);
vcpu              468 arch/mips/kvm/mips.c 	trace_kvm_out(vcpu);
vcpu              473 arch/mips/kvm/mips.c 	kvm_sigset_deactivate(vcpu);
vcpu              475 arch/mips/kvm/mips.c 	vcpu_put(vcpu);
vcpu              479 arch/mips/kvm/mips.c int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
vcpu              490 arch/mips/kvm/mips.c 		dvcpu = vcpu;
vcpu              492 arch/mips/kvm/mips.c 		dvcpu = vcpu->kvm->vcpus[irq->cpu];
vcpu              513 arch/mips/kvm/mips.c int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
vcpu              519 arch/mips/kvm/mips.c int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
vcpu              576 arch/mips/kvm/mips.c static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu)
vcpu              581 arch/mips/kvm/mips.c 	if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
vcpu              587 arch/mips/kvm/mips.c 	if (kvm_mips_guest_can_have_msa(&vcpu->arch))
vcpu              589 arch/mips/kvm/mips.c 	ret += kvm_mips_callbacks->num_regs(vcpu);
vcpu              594 arch/mips/kvm/mips.c static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
vcpu              604 arch/mips/kvm/mips.c 	if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
vcpu              627 arch/mips/kvm/mips.c 	if (kvm_mips_guest_can_have_msa(&vcpu->arch)) {
vcpu              641 arch/mips/kvm/mips.c 	return kvm_mips_callbacks->copy_reg_indices(vcpu, indices);
vcpu              644 arch/mips/kvm/mips.c static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
vcpu              647 arch/mips/kvm/mips.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu              648 arch/mips/kvm/mips.c 	struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
vcpu              657 arch/mips/kvm/mips.c 		v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
vcpu              661 arch/mips/kvm/mips.c 		v = (long)vcpu->arch.hi;
vcpu              664 arch/mips/kvm/mips.c 		v = (long)vcpu->arch.lo;
vcpu              668 arch/mips/kvm/mips.c 		v = (long)vcpu->arch.pc;
vcpu              673 arch/mips/kvm/mips.c 		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
vcpu              683 arch/mips/kvm/mips.c 		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
vcpu              692 arch/mips/kvm/mips.c 		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
vcpu              697 arch/mips/kvm/mips.c 		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
vcpu              704 arch/mips/kvm/mips.c 		if (!kvm_mips_guest_has_msa(&vcpu->arch))
vcpu              721 arch/mips/kvm/mips.c 		if (!kvm_mips_guest_has_msa(&vcpu->arch))
vcpu              726 arch/mips/kvm/mips.c 		if (!kvm_mips_guest_has_msa(&vcpu->arch))
vcpu              733 arch/mips/kvm/mips.c 		ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
vcpu              756 arch/mips/kvm/mips.c static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
vcpu              759 arch/mips/kvm/mips.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu              760 arch/mips/kvm/mips.c 	struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
vcpu              791 arch/mips/kvm/mips.c 		vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
vcpu              795 arch/mips/kvm/mips.c 		vcpu->arch.hi = v;
vcpu              798 arch/mips/kvm/mips.c 		vcpu->arch.lo = v;
vcpu              802 arch/mips/kvm/mips.c 		vcpu->arch.pc = v;
vcpu              807 arch/mips/kvm/mips.c 		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
vcpu              817 arch/mips/kvm/mips.c 		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
vcpu              826 arch/mips/kvm/mips.c 		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
vcpu              831 arch/mips/kvm/mips.c 		if (!kvm_mips_guest_has_fpu(&vcpu->arch))
vcpu              838 arch/mips/kvm/mips.c 		if (!kvm_mips_guest_has_msa(&vcpu->arch))
vcpu              852 arch/mips/kvm/mips.c 		if (!kvm_mips_guest_has_msa(&vcpu->arch))
vcpu              857 arch/mips/kvm/mips.c 		if (!kvm_mips_guest_has_msa(&vcpu->arch))
vcpu              864 arch/mips/kvm/mips.c 		return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
vcpu              869 arch/mips/kvm/mips.c static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
vcpu              874 arch/mips/kvm/mips.c 	if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap))
vcpu              883 arch/mips/kvm/mips.c 		vcpu->arch.fpu_enabled = true;
vcpu              886 arch/mips/kvm/mips.c 		vcpu->arch.msa_enabled = true;
vcpu              899 arch/mips/kvm/mips.c 	struct kvm_vcpu *vcpu = filp->private_data;
vcpu              907 arch/mips/kvm/mips.c 		kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
vcpu              910 arch/mips/kvm/mips.c 		return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
vcpu              919 arch/mips/kvm/mips.c 	struct kvm_vcpu *vcpu = filp->private_data;
vcpu              923 arch/mips/kvm/mips.c 	vcpu_load(vcpu);
vcpu              934 arch/mips/kvm/mips.c 			r = kvm_mips_set_reg(vcpu, &reg);
vcpu              936 arch/mips/kvm/mips.c 			r = kvm_mips_get_reg(vcpu, &reg);
vcpu              948 arch/mips/kvm/mips.c 		reg_list.n = kvm_mips_num_regs(vcpu);
vcpu              954 arch/mips/kvm/mips.c 		r = kvm_mips_copy_reg_indices(vcpu, user_list->reg);
vcpu              963 arch/mips/kvm/mips.c 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
vcpu              970 arch/mips/kvm/mips.c 	vcpu_put(vcpu);
vcpu             1066 arch/mips/kvm/mips.c int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
vcpu             1072 arch/mips/kvm/mips.c int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
vcpu             1078 arch/mips/kvm/mips.c void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
vcpu             1082 arch/mips/kvm/mips.c int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
vcpu             1087 arch/mips/kvm/mips.c int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
vcpu             1092 arch/mips/kvm/mips.c vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
vcpu             1144 arch/mips/kvm/mips.c int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
vcpu             1146 arch/mips/kvm/mips.c 	return kvm_mips_pending_timer(vcpu) ||
vcpu             1147 arch/mips/kvm/mips.c 		kvm_read_c0_guest_cause(vcpu->arch.cop0) & C_TI;
vcpu             1150 arch/mips/kvm/mips.c int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
vcpu             1155 arch/mips/kvm/mips.c 	if (!vcpu)
vcpu             1159 arch/mips/kvm/mips.c 	kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc);
vcpu             1160 arch/mips/kvm/mips.c 	kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
vcpu             1164 arch/mips/kvm/mips.c 		       vcpu->arch.gprs[i],
vcpu             1165 arch/mips/kvm/mips.c 		       vcpu->arch.gprs[i + 1],
vcpu             1166 arch/mips/kvm/mips.c 		       vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
vcpu             1168 arch/mips/kvm/mips.c 	kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
vcpu             1169 arch/mips/kvm/mips.c 	kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
vcpu             1171 arch/mips/kvm/mips.c 	cop0 = vcpu->arch.cop0;
vcpu             1181 arch/mips/kvm/mips.c int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu             1185 arch/mips/kvm/mips.c 	vcpu_load(vcpu);
vcpu             1187 arch/mips/kvm/mips.c 	for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
vcpu             1188 arch/mips/kvm/mips.c 		vcpu->arch.gprs[i] = regs->gpr[i];
vcpu             1189 arch/mips/kvm/mips.c 	vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
vcpu             1190 arch/mips/kvm/mips.c 	vcpu->arch.hi = regs->hi;
vcpu             1191 arch/mips/kvm/mips.c 	vcpu->arch.lo = regs->lo;
vcpu             1192 arch/mips/kvm/mips.c 	vcpu->arch.pc = regs->pc;
vcpu             1194 arch/mips/kvm/mips.c 	vcpu_put(vcpu);
vcpu             1198 arch/mips/kvm/mips.c int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu             1202 arch/mips/kvm/mips.c 	vcpu_load(vcpu);
vcpu             1204 arch/mips/kvm/mips.c 	for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
vcpu             1205 arch/mips/kvm/mips.c 		regs->gpr[i] = vcpu->arch.gprs[i];
vcpu             1207 arch/mips/kvm/mips.c 	regs->hi = vcpu->arch.hi;
vcpu             1208 arch/mips/kvm/mips.c 	regs->lo = vcpu->arch.lo;
vcpu             1209 arch/mips/kvm/mips.c 	regs->pc = vcpu->arch.pc;
vcpu             1211 arch/mips/kvm/mips.c 	vcpu_put(vcpu);
vcpu             1217 arch/mips/kvm/mips.c 	struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
vcpu             1219 arch/mips/kvm/mips.c 	kvm_mips_callbacks->queue_timer_int(vcpu);
vcpu             1221 arch/mips/kvm/mips.c 	vcpu->arch.wait = 0;
vcpu             1222 arch/mips/kvm/mips.c 	if (swq_has_sleeper(&vcpu->wq))
vcpu             1223 arch/mips/kvm/mips.c 		swake_up_one(&vcpu->wq);
vcpu             1229 arch/mips/kvm/mips.c 	struct kvm_vcpu *vcpu;
vcpu             1231 arch/mips/kvm/mips.c 	vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
vcpu             1232 arch/mips/kvm/mips.c 	kvm_mips_comparecount_func((unsigned long) vcpu);
vcpu             1233 arch/mips/kvm/mips.c 	return kvm_mips_count_timeout(vcpu);
vcpu             1236 arch/mips/kvm/mips.c int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
vcpu             1240 arch/mips/kvm/mips.c 	err = kvm_mips_callbacks->vcpu_init(vcpu);
vcpu             1244 arch/mips/kvm/mips.c 	hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
vcpu             1246 arch/mips/kvm/mips.c 	vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
vcpu             1250 arch/mips/kvm/mips.c void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
vcpu             1252 arch/mips/kvm/mips.c 	kvm_mips_callbacks->vcpu_uninit(vcpu);
vcpu             1255 arch/mips/kvm/mips.c int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
vcpu             1262 arch/mips/kvm/mips.c int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu             1264 arch/mips/kvm/mips.c 	return kvm_mips_callbacks->vcpu_setup(vcpu);
vcpu             1281 arch/mips/kvm/mips.c int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
vcpu             1283 arch/mips/kvm/mips.c 	u32 cause = vcpu->arch.host_cp0_cause;
vcpu             1285 arch/mips/kvm/mips.c 	u32 __user *opc = (u32 __user *) vcpu->arch.pc;
vcpu             1286 arch/mips/kvm/mips.c 	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
vcpu             1291 arch/mips/kvm/mips.c 	vcpu->mode = OUTSIDE_GUEST_MODE;
vcpu             1310 arch/mips/kvm/mips.c 			cause, opc, run, vcpu);
vcpu             1311 arch/mips/kvm/mips.c 	trace_kvm_exit(vcpu, exccode);
vcpu             1319 arch/mips/kvm/mips.c 		er = kvm_mips_check_privilege(cause, opc, run, vcpu);
vcpu             1331 arch/mips/kvm/mips.c 		kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc);
vcpu             1333 arch/mips/kvm/mips.c 		++vcpu->stat.int_exits;
vcpu             1344 arch/mips/kvm/mips.c 		++vcpu->stat.cop_unusable_exits;
vcpu             1345 arch/mips/kvm/mips.c 		ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
vcpu             1352 arch/mips/kvm/mips.c 		++vcpu->stat.tlbmod_exits;
vcpu             1353 arch/mips/kvm/mips.c 		ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
vcpu             1358 arch/mips/kvm/mips.c 			  cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
vcpu             1361 arch/mips/kvm/mips.c 		++vcpu->stat.tlbmiss_st_exits;
vcpu             1362 arch/mips/kvm/mips.c 		ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
vcpu             1369 arch/mips/kvm/mips.c 		++vcpu->stat.tlbmiss_ld_exits;
vcpu             1370 arch/mips/kvm/mips.c 		ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
vcpu             1374 arch/mips/kvm/mips.c 		++vcpu->stat.addrerr_st_exits;
vcpu             1375 arch/mips/kvm/mips.c 		ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
vcpu             1379 arch/mips/kvm/mips.c 		++vcpu->stat.addrerr_ld_exits;
vcpu             1380 arch/mips/kvm/mips.c 		ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
vcpu             1384 arch/mips/kvm/mips.c 		++vcpu->stat.syscall_exits;
vcpu             1385 arch/mips/kvm/mips.c 		ret = kvm_mips_callbacks->handle_syscall(vcpu);
vcpu             1389 arch/mips/kvm/mips.c 		++vcpu->stat.resvd_inst_exits;
vcpu             1390 arch/mips/kvm/mips.c 		ret = kvm_mips_callbacks->handle_res_inst(vcpu);
vcpu             1394 arch/mips/kvm/mips.c 		++vcpu->stat.break_inst_exits;
vcpu             1395 arch/mips/kvm/mips.c 		ret = kvm_mips_callbacks->handle_break(vcpu);
vcpu             1399 arch/mips/kvm/mips.c 		++vcpu->stat.trap_inst_exits;
vcpu             1400 arch/mips/kvm/mips.c 		ret = kvm_mips_callbacks->handle_trap(vcpu);
vcpu             1404 arch/mips/kvm/mips.c 		++vcpu->stat.msa_fpe_exits;
vcpu             1405 arch/mips/kvm/mips.c 		ret = kvm_mips_callbacks->handle_msa_fpe(vcpu);
vcpu             1409 arch/mips/kvm/mips.c 		++vcpu->stat.fpe_exits;
vcpu             1410 arch/mips/kvm/mips.c 		ret = kvm_mips_callbacks->handle_fpe(vcpu);
vcpu             1414 arch/mips/kvm/mips.c 		++vcpu->stat.msa_disabled_exits;
vcpu             1415 arch/mips/kvm/mips.c 		ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
vcpu             1420 arch/mips/kvm/mips.c 		ret = kvm_mips_callbacks->handle_guest_exit(vcpu);
vcpu             1427 arch/mips/kvm/mips.c 		kvm_get_badinstr(opc, vcpu, &inst);
vcpu             1430 arch/mips/kvm/mips.c 			kvm_read_c0_guest_status(vcpu->arch.cop0));
vcpu             1431 arch/mips/kvm/mips.c 		kvm_arch_vcpu_dump_regs(vcpu);
vcpu             1442 arch/mips/kvm/mips.c 		kvm_vz_acquire_htimer(vcpu);
vcpu             1445 arch/mips/kvm/mips.c 		kvm_mips_deliver_interrupts(vcpu, cause);
vcpu             1452 arch/mips/kvm/mips.c 			++vcpu->stat.signal_exits;
vcpu             1453 arch/mips/kvm/mips.c 			trace_kvm_exit(vcpu, KVM_TRACE_EXIT_SIGNAL);
vcpu             1458 arch/mips/kvm/mips.c 		trace_kvm_reenter(vcpu);
vcpu             1466 arch/mips/kvm/mips.c 		smp_store_mb(vcpu->mode, IN_GUEST_MODE);
vcpu             1468 arch/mips/kvm/mips.c 		kvm_mips_callbacks->vcpu_reenter(run, vcpu);
vcpu             1479 arch/mips/kvm/mips.c 		if (kvm_mips_guest_has_fpu(&vcpu->arch) &&
vcpu             1481 arch/mips/kvm/mips.c 			__kvm_restore_fcsr(&vcpu->arch);
vcpu             1483 arch/mips/kvm/mips.c 		if (kvm_mips_guest_has_msa(&vcpu->arch) &&
vcpu             1485 arch/mips/kvm/mips.c 			__kvm_restore_msacsr(&vcpu->arch);
vcpu             1496 arch/mips/kvm/mips.c void kvm_own_fpu(struct kvm_vcpu *vcpu)
vcpu             1498 arch/mips/kvm/mips.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu             1516 arch/mips/kvm/mips.c 	    vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
vcpu             1517 arch/mips/kvm/mips.c 		kvm_lose_fpu(vcpu);
vcpu             1531 arch/mips/kvm/mips.c 	if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
vcpu             1532 arch/mips/kvm/mips.c 		__kvm_restore_fpu(&vcpu->arch);
vcpu             1533 arch/mips/kvm/mips.c 		vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
vcpu             1534 arch/mips/kvm/mips.c 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
vcpu             1536 arch/mips/kvm/mips.c 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_FPU);
vcpu             1544 arch/mips/kvm/mips.c void kvm_own_msa(struct kvm_vcpu *vcpu)
vcpu             1546 arch/mips/kvm/mips.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu             1555 arch/mips/kvm/mips.c 	if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
vcpu             1563 arch/mips/kvm/mips.c 		    (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU |
vcpu             1565 arch/mips/kvm/mips.c 			kvm_lose_fpu(vcpu);
vcpu             1578 arch/mips/kvm/mips.c 	switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) {
vcpu             1583 arch/mips/kvm/mips.c 		__kvm_restore_msa_upper(&vcpu->arch);
vcpu             1584 arch/mips/kvm/mips.c 		vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
vcpu             1585 arch/mips/kvm/mips.c 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_MSA);
vcpu             1589 arch/mips/kvm/mips.c 		__kvm_restore_msa(&vcpu->arch);
vcpu             1590 arch/mips/kvm/mips.c 		vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
vcpu             1591 arch/mips/kvm/mips.c 		if (kvm_mips_guest_has_fpu(&vcpu->arch))
vcpu             1592 arch/mips/kvm/mips.c 			vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
vcpu             1593 arch/mips/kvm/mips.c 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE,
vcpu             1597 arch/mips/kvm/mips.c 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_MSA);
vcpu             1606 arch/mips/kvm/mips.c void kvm_drop_fpu(struct kvm_vcpu *vcpu)
vcpu             1609 arch/mips/kvm/mips.c 	if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
vcpu             1611 arch/mips/kvm/mips.c 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_MSA);
vcpu             1612 arch/mips/kvm/mips.c 		vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA;
vcpu             1614 arch/mips/kvm/mips.c 	if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
vcpu             1616 arch/mips/kvm/mips.c 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_FPU);
vcpu             1617 arch/mips/kvm/mips.c 		vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
vcpu             1623 arch/mips/kvm/mips.c void kvm_lose_fpu(struct kvm_vcpu *vcpu)
vcpu             1633 arch/mips/kvm/mips.c 	if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
vcpu             1639 arch/mips/kvm/mips.c 		__kvm_save_msa(&vcpu->arch);
vcpu             1640 arch/mips/kvm/mips.c 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA);
vcpu             1644 arch/mips/kvm/mips.c 		if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
vcpu             1648 arch/mips/kvm/mips.c 		vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA);
vcpu             1649 arch/mips/kvm/mips.c 	} else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
vcpu             1655 arch/mips/kvm/mips.c 		__kvm_save_fpu(&vcpu->arch);
vcpu             1656 arch/mips/kvm/mips.c 		vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
vcpu             1657 arch/mips/kvm/mips.c 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
vcpu               60 arch/mips/kvm/mmu.c void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
vcpu               62 arch/mips/kvm/mmu.c 	mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
vcpu              610 arch/mips/kvm/mmu.c static int _kvm_mips_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa,
vcpu              614 arch/mips/kvm/mmu.c 	struct kvm *kvm = vcpu->kvm;
vcpu              686 arch/mips/kvm/mmu.c static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
vcpu              690 arch/mips/kvm/mmu.c 	struct kvm *kvm = vcpu->kvm;
vcpu              691 arch/mips/kvm/mmu.c 	struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
vcpu              702 arch/mips/kvm/mmu.c 	err = _kvm_mips_map_page_fast(vcpu, gpa, write_fault, out_entry,
vcpu              785 arch/mips/kvm/mmu.c static pte_t *kvm_trap_emul_pte_for_gva(struct kvm_vcpu *vcpu,
vcpu              788 arch/mips/kvm/mmu.c 	struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
vcpu              798 arch/mips/kvm/mmu.c 	if (KVM_GUEST_KERNEL_MODE(vcpu))
vcpu              799 arch/mips/kvm/mmu.c 		pgdp = vcpu->arch.guest_kernel_mm.pgd;
vcpu              801 arch/mips/kvm/mmu.c 		pgdp = vcpu->arch.guest_user_mm.pgd;
vcpu              806 arch/mips/kvm/mmu.c void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
vcpu              814 arch/mips/kvm/mmu.c 	pgdp = vcpu->arch.guest_kernel_mm.pgd;
vcpu              822 arch/mips/kvm/mmu.c 		pgdp = vcpu->arch.guest_user_mm.pgd;
vcpu              988 arch/mips/kvm/mmu.c 				      struct kvm_vcpu *vcpu,
vcpu              993 arch/mips/kvm/mmu.c 	ret = kvm_mips_map_page(vcpu, badvaddr, write_fault, NULL, NULL);
vcpu              998 arch/mips/kvm/mmu.c 	return kvm_vz_host_tlb_inv(vcpu, badvaddr);
vcpu             1004 arch/mips/kvm/mmu.c 				    struct kvm_vcpu *vcpu,
vcpu             1020 arch/mips/kvm/mmu.c 	if (kvm_mips_map_page(vcpu, gpa, write_fault, &pte_gpa[idx],
vcpu             1025 arch/mips/kvm/mmu.c 	ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, badvaddr & ~PAGE_SIZE);
vcpu             1036 arch/mips/kvm/mmu.c 	kvm_mips_host_tlb_inv(vcpu, badvaddr, false, true);
vcpu             1040 arch/mips/kvm/mmu.c int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
vcpu             1045 arch/mips/kvm/mmu.c 	struct kvm *kvm = vcpu->kvm;
vcpu             1049 arch/mips/kvm/mmu.c 	bool kernel = KVM_GUEST_KERNEL_MODE(vcpu);
vcpu             1062 arch/mips/kvm/mmu.c 	if (kvm_mips_map_page(vcpu, mips3_tlbpfn_to_paddr(tlb_lo[idx]),
vcpu             1078 arch/mips/kvm/mmu.c 	ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, gva & ~PAGE_SIZE);
vcpu             1089 arch/mips/kvm/mmu.c 	kvm_mips_host_tlb_inv(vcpu, gva, !kernel, kernel);
vcpu             1091 arch/mips/kvm/mmu.c 	kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
vcpu             1098 arch/mips/kvm/mmu.c 				       struct kvm_vcpu *vcpu)
vcpu             1103 arch/mips/kvm/mmu.c 	ptep = kvm_trap_emul_pte_for_gva(vcpu, badvaddr);
vcpu             1109 arch/mips/kvm/mmu.c 	pfn = PFN_DOWN(virt_to_phys(vcpu->arch.kseg0_commpage));
vcpu             1114 arch/mips/kvm/mmu.c 	kvm_mips_host_tlb_inv(vcpu, badvaddr, false, true);
vcpu             1129 arch/mips/kvm/mmu.c static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
vcpu             1131 arch/mips/kvm/mmu.c 	if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
vcpu             1132 arch/mips/kvm/mmu.c 		hrtimer_restart(&vcpu->arch.comparecount_timer);
vcpu             1136 arch/mips/kvm/mmu.c void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu             1140 arch/mips/kvm/mmu.c 	kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
vcpu             1144 arch/mips/kvm/mmu.c 	vcpu->cpu = cpu;
vcpu             1145 arch/mips/kvm/mmu.c 	if (vcpu->arch.last_sched_cpu != cpu) {
vcpu             1147 arch/mips/kvm/mmu.c 			  vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
vcpu             1153 arch/mips/kvm/mmu.c 		kvm_mips_migrate_count(vcpu);
vcpu             1157 arch/mips/kvm/mmu.c 	kvm_mips_callbacks->vcpu_load(vcpu, cpu);
vcpu             1163 arch/mips/kvm/mmu.c void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
vcpu             1171 arch/mips/kvm/mmu.c 	vcpu->arch.last_sched_cpu = cpu;
vcpu             1172 arch/mips/kvm/mmu.c 	vcpu->cpu = -1;
vcpu             1175 arch/mips/kvm/mmu.c 	kvm_mips_callbacks->vcpu_put(vcpu, cpu);
vcpu             1196 arch/mips/kvm/mmu.c enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
vcpu             1200 arch/mips/kvm/mmu.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu             1205 arch/mips/kvm/mmu.c 		if (kvm_mips_handle_kseg0_tlb_fault(gva, vcpu, write) < 0)
vcpu             1210 arch/mips/kvm/mmu.c 		index = kvm_mips_guest_tlb_lookup(vcpu, (gva & VPN2_MASK) |
vcpu             1214 arch/mips/kvm/mmu.c 		tlb = &vcpu->arch.guest_tlb[index];
vcpu             1222 arch/mips/kvm/mmu.c 		if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, gva, write))
vcpu             1231 arch/mips/kvm/mmu.c int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
vcpu             1240 arch/mips/kvm/mmu.c 	kvm_trap_emul_gva_lockless_begin(vcpu);
vcpu             1242 arch/mips/kvm/mmu.c 	kvm_trap_emul_gva_lockless_end(vcpu);
vcpu             1249 arch/mips/kvm/mmu.c 		err = kvm_trap_emul_gva_fault(vcpu, (unsigned long)opc,
vcpu               49 arch/mips/kvm/stats.c void kvm_mips_dump_stats(struct kvm_vcpu *vcpu)
vcpu               54 arch/mips/kvm/stats.c 	kvm_info("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id);
vcpu               57 arch/mips/kvm/stats.c 			if (vcpu->arch.cop0->stat[i][j])
vcpu               59 arch/mips/kvm/stats.c 					 vcpu->arch.cop0->stat[i][j]);
vcpu               44 arch/mips/kvm/tlb.c static u32 kvm_mips_get_root_asid(struct kvm_vcpu *vcpu)
vcpu               46 arch/mips/kvm/tlb.c 	struct mm_struct *gpa_mm = &vcpu->kvm->arch.gpa_mm;
vcpu               55 arch/mips/kvm/tlb.c static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
vcpu               57 arch/mips/kvm/tlb.c 	struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
vcpu               63 arch/mips/kvm/tlb.c static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
vcpu               65 arch/mips/kvm/tlb.c 	struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
vcpu               88 arch/mips/kvm/tlb.c void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
vcpu               90 arch/mips/kvm/tlb.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu               98 arch/mips/kvm/tlb.c 		tlb = vcpu->arch.guest_tlb[i];
vcpu              118 arch/mips/kvm/tlb.c int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
vcpu              122 arch/mips/kvm/tlb.c 	struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
vcpu              166 arch/mips/kvm/tlb.c int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va,
vcpu              182 arch/mips/kvm/tlb.c 						  kvm_mips_get_user_asid(vcpu));
vcpu              185 arch/mips/kvm/tlb.c 						kvm_mips_get_kernel_asid(vcpu));
vcpu              202 arch/mips/kvm/tlb.c 				    kvm_mips_get_user_asid(vcpu), idx_user);
vcpu              206 arch/mips/kvm/tlb.c 				    kvm_mips_get_kernel_asid(vcpu), idx_kernel);
vcpu              251 arch/mips/kvm/tlb.c int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
vcpu              265 arch/mips/kvm/tlb.c 				     kvm_mips_get_root_asid(vcpu));
vcpu              284 arch/mips/kvm/tlb.c 				    kvm_mips_get_root_asid(vcpu), idx);
vcpu              304 arch/mips/kvm/tlb.c int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva,
vcpu               31 arch/mips/kvm/trace.h 	TP_PROTO(struct kvm_vcpu *vcpu),
vcpu               32 arch/mips/kvm/trace.h 	TP_ARGS(vcpu),
vcpu               38 arch/mips/kvm/trace.h 		__entry->pc = vcpu->arch.pc;
vcpu               46 arch/mips/kvm/trace.h 	     TP_PROTO(struct kvm_vcpu *vcpu),
vcpu               47 arch/mips/kvm/trace.h 	     TP_ARGS(vcpu));
vcpu               50 arch/mips/kvm/trace.h 	     TP_PROTO(struct kvm_vcpu *vcpu),
vcpu               51 arch/mips/kvm/trace.h 	     TP_ARGS(vcpu));
vcpu               54 arch/mips/kvm/trace.h 	     TP_PROTO(struct kvm_vcpu *vcpu),
vcpu               55 arch/mips/kvm/trace.h 	     TP_ARGS(vcpu));
vcpu              116 arch/mips/kvm/trace.h 	    TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
vcpu              117 arch/mips/kvm/trace.h 	    TP_ARGS(vcpu, reason),
vcpu              124 arch/mips/kvm/trace.h 			__entry->pc = vcpu->arch.pc;
vcpu              203 arch/mips/kvm/trace.h 	    TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op, unsigned int reg,
vcpu              205 arch/mips/kvm/trace.h 	    TP_ARGS(vcpu, op, reg, val),
vcpu              253 arch/mips/kvm/trace.h 	    TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op,
vcpu              255 arch/mips/kvm/trace.h 	    TP_ARGS(vcpu, op, state),
vcpu              263 arch/mips/kvm/trace.h 			__entry->pc = vcpu->arch.pc;
vcpu              277 arch/mips/kvm/trace.h 	    TP_PROTO(struct kvm_vcpu *vcpu, unsigned int old_asid,
vcpu              279 arch/mips/kvm/trace.h 	    TP_ARGS(vcpu, old_asid, new_asid),
vcpu              287 arch/mips/kvm/trace.h 			__entry->pc = vcpu->arch.pc;
vcpu              299 arch/mips/kvm/trace.h 	    TP_PROTO(struct kvm_vcpu *vcpu, unsigned int guestid),
vcpu              300 arch/mips/kvm/trace.h 	    TP_ARGS(vcpu, guestid),
vcpu              314 arch/mips/kvm/trace.h 	    TP_PROTO(struct kvm_vcpu *vcpu),
vcpu              315 arch/mips/kvm/trace.h 	    TP_ARGS(vcpu),
vcpu              325 arch/mips/kvm/trace.h 			__entry->epc = kvm_read_c0_guest_epc(vcpu->arch.cop0);
vcpu              326 arch/mips/kvm/trace.h 			__entry->pc = vcpu->arch.pc;
vcpu              327 arch/mips/kvm/trace.h 			__entry->badvaddr = kvm_read_c0_guest_badvaddr(vcpu->arch.cop0);
vcpu              328 arch/mips/kvm/trace.h 			__entry->status = kvm_read_c0_guest_status(vcpu->arch.cop0);
vcpu              329 arch/mips/kvm/trace.h 			__entry->cause = kvm_read_c0_guest_cause(vcpu->arch.cop0);
vcpu               44 arch/mips/kvm/trap_emul.c static int kvm_trap_emul_no_handler(struct kvm_vcpu *vcpu)
vcpu               46 arch/mips/kvm/trap_emul.c 	u32 __user *opc = (u32 __user *) vcpu->arch.pc;
vcpu               47 arch/mips/kvm/trap_emul.c 	u32 cause = vcpu->arch.host_cp0_cause;
vcpu               49 arch/mips/kvm/trap_emul.c 	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
vcpu               57 arch/mips/kvm/trap_emul.c 	kvm_get_badinstr(opc, vcpu, &inst);
vcpu               61 arch/mips/kvm/trap_emul.c 		kvm_read_c0_guest_status(vcpu->arch.cop0));
vcpu               62 arch/mips/kvm/trap_emul.c 	kvm_arch_vcpu_dump_regs(vcpu);
vcpu               63 arch/mips/kvm/trap_emul.c 	vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu               67 arch/mips/kvm/trap_emul.c static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
vcpu               69 arch/mips/kvm/trap_emul.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu               70 arch/mips/kvm/trap_emul.c 	struct kvm_run *run = vcpu->run;
vcpu               71 arch/mips/kvm/trap_emul.c 	u32 __user *opc = (u32 __user *) vcpu->arch.pc;
vcpu               72 arch/mips/kvm/trap_emul.c 	u32 cause = vcpu->arch.host_cp0_cause;
vcpu               78 arch/mips/kvm/trap_emul.c 		if (!kvm_mips_guest_has_fpu(&vcpu->arch) ||
vcpu               84 arch/mips/kvm/trap_emul.c 			er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
vcpu               87 arch/mips/kvm/trap_emul.c 			kvm_own_fpu(vcpu);
vcpu               91 arch/mips/kvm/trap_emul.c 		er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
vcpu              110 arch/mips/kvm/trap_emul.c 		ret = kvm_mips_handle_hypcall(vcpu);
vcpu              120 arch/mips/kvm/trap_emul.c 			     struct kvm_vcpu *vcpu)
vcpu              127 arch/mips/kvm/trap_emul.c 	if (kvm_is_ifetch_fault(&vcpu->arch)) {
vcpu              135 arch/mips/kvm/trap_emul.c 	err = kvm_get_badinstr(opc, vcpu, &inst.word);
vcpu              142 arch/mips/kvm/trap_emul.c 	er = kvm_mips_emulate_load(inst, cause, run, vcpu);
vcpu              153 arch/mips/kvm/trap_emul.c 			      struct kvm_vcpu *vcpu)
vcpu              162 arch/mips/kvm/trap_emul.c 	err = kvm_get_badinstr(opc, vcpu, &inst.word);
vcpu              169 arch/mips/kvm/trap_emul.c 	er = kvm_mips_emulate_store(inst, cause, run, vcpu);
vcpu              180 arch/mips/kvm/trap_emul.c 			       struct kvm_vcpu *vcpu, bool store)
vcpu              183 arch/mips/kvm/trap_emul.c 		return kvm_mips_bad_store(cause, opc, run, vcpu);
vcpu              185 arch/mips/kvm/trap_emul.c 		return kvm_mips_bad_load(cause, opc, run, vcpu);
vcpu              188 arch/mips/kvm/trap_emul.c static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
vcpu              190 arch/mips/kvm/trap_emul.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu              191 arch/mips/kvm/trap_emul.c 	struct kvm_run *run = vcpu->run;
vcpu              192 arch/mips/kvm/trap_emul.c 	u32 __user *opc = (u32 __user *) vcpu->arch.pc;
vcpu              193 arch/mips/kvm/trap_emul.c 	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
vcpu              194 arch/mips/kvm/trap_emul.c 	u32 cause = vcpu->arch.host_cp0_cause;
vcpu              208 arch/mips/kvm/trap_emul.c 		index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
vcpu              218 arch/mips/kvm/trap_emul.c 		tlb = vcpu->arch.guest_tlb + index;
vcpu              229 arch/mips/kvm/trap_emul.c 			kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
vcpu              233 arch/mips/kvm/trap_emul.c 		if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, badvaddr,
vcpu              236 arch/mips/kvm/trap_emul.c 			return kvm_mips_bad_store(cause, opc, run, vcpu);
vcpu              239 arch/mips/kvm/trap_emul.c 		if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, true) < 0)
vcpu              241 arch/mips/kvm/trap_emul.c 			return kvm_mips_bad_store(cause, opc, run, vcpu);
vcpu              245 arch/mips/kvm/trap_emul.c 		return kvm_mips_bad_store(cause, opc, run, vcpu);
vcpu              249 arch/mips/kvm/trap_emul.c static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store)
vcpu              251 arch/mips/kvm/trap_emul.c 	struct kvm_run *run = vcpu->run;
vcpu              252 arch/mips/kvm/trap_emul.c 	u32 __user *opc = (u32 __user *) vcpu->arch.pc;
vcpu              253 arch/mips/kvm/trap_emul.c 	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
vcpu              254 arch/mips/kvm/trap_emul.c 	u32 cause = vcpu->arch.host_cp0_cause;
vcpu              259 arch/mips/kvm/trap_emul.c 	    && KVM_GUEST_KERNEL_MODE(vcpu)) {
vcpu              260 arch/mips/kvm/trap_emul.c 		if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
vcpu              279 arch/mips/kvm/trap_emul.c 		er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu, store);
vcpu              291 arch/mips/kvm/trap_emul.c 		if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, store) < 0)
vcpu              292 arch/mips/kvm/trap_emul.c 			ret = kvm_mips_bad_access(cause, opc, run, vcpu, store);
vcpu              293 arch/mips/kvm/trap_emul.c 	} else if (KVM_GUEST_KERNEL_MODE(vcpu)
vcpu              299 arch/mips/kvm/trap_emul.c 		ret = kvm_mips_bad_access(cause, opc, run, vcpu, store);
vcpu              304 arch/mips/kvm/trap_emul.c 		kvm_arch_vcpu_dump_regs(vcpu);
vcpu              311 arch/mips/kvm/trap_emul.c static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
vcpu              313 arch/mips/kvm/trap_emul.c 	return kvm_trap_emul_handle_tlb_miss(vcpu, true);
vcpu              316 arch/mips/kvm/trap_emul.c static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
vcpu              318 arch/mips/kvm/trap_emul.c 	return kvm_trap_emul_handle_tlb_miss(vcpu, false);
vcpu              321 arch/mips/kvm/trap_emul.c static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
vcpu              323 arch/mips/kvm/trap_emul.c 	struct kvm_run *run = vcpu->run;
vcpu              324 arch/mips/kvm/trap_emul.c 	u32 __user *opc = (u32 __user *) vcpu->arch.pc;
vcpu              325 arch/mips/kvm/trap_emul.c 	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
vcpu              326 arch/mips/kvm/trap_emul.c 	u32 cause = vcpu->arch.host_cp0_cause;
vcpu              329 arch/mips/kvm/trap_emul.c 	if (KVM_GUEST_KERNEL_MODE(vcpu)
vcpu              331 arch/mips/kvm/trap_emul.c 		ret = kvm_mips_bad_store(cause, opc, run, vcpu);
vcpu              341 arch/mips/kvm/trap_emul.c static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
vcpu              343 arch/mips/kvm/trap_emul.c 	struct kvm_run *run = vcpu->run;
vcpu              344 arch/mips/kvm/trap_emul.c 	u32 __user *opc = (u32 __user *) vcpu->arch.pc;
vcpu              345 arch/mips/kvm/trap_emul.c 	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
vcpu              346 arch/mips/kvm/trap_emul.c 	u32 cause = vcpu->arch.host_cp0_cause;
vcpu              350 arch/mips/kvm/trap_emul.c 		ret = kvm_mips_bad_load(cause, opc, run, vcpu);
vcpu              360 arch/mips/kvm/trap_emul.c static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
vcpu              362 arch/mips/kvm/trap_emul.c 	struct kvm_run *run = vcpu->run;
vcpu              363 arch/mips/kvm/trap_emul.c 	u32 __user *opc = (u32 __user *) vcpu->arch.pc;
vcpu              364 arch/mips/kvm/trap_emul.c 	u32 cause = vcpu->arch.host_cp0_cause;
vcpu              368 arch/mips/kvm/trap_emul.c 	er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
vcpu              378 arch/mips/kvm/trap_emul.c static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
vcpu              380 arch/mips/kvm/trap_emul.c 	struct kvm_run *run = vcpu->run;
vcpu              381 arch/mips/kvm/trap_emul.c 	u32 __user *opc = (u32 __user *) vcpu->arch.pc;
vcpu              382 arch/mips/kvm/trap_emul.c 	u32 cause = vcpu->arch.host_cp0_cause;
vcpu              386 arch/mips/kvm/trap_emul.c 	er = kvm_mips_handle_ri(cause, opc, run, vcpu);
vcpu              396 arch/mips/kvm/trap_emul.c static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
vcpu              398 arch/mips/kvm/trap_emul.c 	struct kvm_run *run = vcpu->run;
vcpu              399 arch/mips/kvm/trap_emul.c 	u32 __user *opc = (u32 __user *) vcpu->arch.pc;
vcpu              400 arch/mips/kvm/trap_emul.c 	u32 cause = vcpu->arch.host_cp0_cause;
vcpu              404 arch/mips/kvm/trap_emul.c 	er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
vcpu              414 arch/mips/kvm/trap_emul.c static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
vcpu              416 arch/mips/kvm/trap_emul.c 	struct kvm_run *run = vcpu->run;
vcpu              417 arch/mips/kvm/trap_emul.c 	u32 __user *opc = (u32 __user *)vcpu->arch.pc;
vcpu              418 arch/mips/kvm/trap_emul.c 	u32 cause = vcpu->arch.host_cp0_cause;
vcpu              422 arch/mips/kvm/trap_emul.c 	er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu);
vcpu              432 arch/mips/kvm/trap_emul.c static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
vcpu              434 arch/mips/kvm/trap_emul.c 	struct kvm_run *run = vcpu->run;
vcpu              435 arch/mips/kvm/trap_emul.c 	u32 __user *opc = (u32 __user *)vcpu->arch.pc;
vcpu              436 arch/mips/kvm/trap_emul.c 	u32 cause = vcpu->arch.host_cp0_cause;
vcpu              440 arch/mips/kvm/trap_emul.c 	er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu);
vcpu              450 arch/mips/kvm/trap_emul.c static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
vcpu              452 arch/mips/kvm/trap_emul.c 	struct kvm_run *run = vcpu->run;
vcpu              453 arch/mips/kvm/trap_emul.c 	u32 __user *opc = (u32 __user *)vcpu->arch.pc;
vcpu              454 arch/mips/kvm/trap_emul.c 	u32 cause = vcpu->arch.host_cp0_cause;
vcpu              458 arch/mips/kvm/trap_emul.c 	er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu);
vcpu              474 arch/mips/kvm/trap_emul.c static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
vcpu              476 arch/mips/kvm/trap_emul.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu              477 arch/mips/kvm/trap_emul.c 	struct kvm_run *run = vcpu->run;
vcpu              478 arch/mips/kvm/trap_emul.c 	u32 __user *opc = (u32 __user *) vcpu->arch.pc;
vcpu              479 arch/mips/kvm/trap_emul.c 	u32 cause = vcpu->arch.host_cp0_cause;
vcpu              483 arch/mips/kvm/trap_emul.c 	if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
vcpu              489 arch/mips/kvm/trap_emul.c 		er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
vcpu              492 arch/mips/kvm/trap_emul.c 		er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu);
vcpu              495 arch/mips/kvm/trap_emul.c 		kvm_own_msa(vcpu);
vcpu              540 arch/mips/kvm/trap_emul.c static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
vcpu              542 arch/mips/kvm/trap_emul.c 	struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
vcpu              543 arch/mips/kvm/trap_emul.c 	struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
vcpu              605 arch/mips/kvm/trap_emul.c static void kvm_trap_emul_vcpu_uninit(struct kvm_vcpu *vcpu)
vcpu              607 arch/mips/kvm/trap_emul.c 	kvm_mips_emul_free_gva_pt(vcpu->arch.guest_kernel_mm.pgd);
vcpu              608 arch/mips/kvm/trap_emul.c 	kvm_mips_emul_free_gva_pt(vcpu->arch.guest_user_mm.pgd);
vcpu              611 arch/mips/kvm/trap_emul.c static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu              613 arch/mips/kvm/trap_emul.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu              615 arch/mips/kvm/trap_emul.c 	int vcpu_id = vcpu->vcpu_id;
vcpu              618 arch/mips/kvm/trap_emul.c 	kvm_mips_init_count(vcpu, 100*1000*1000);
vcpu              693 arch/mips/kvm/trap_emul.c 	vcpu->arch.pc = KVM_GUEST_CKSEG1ADDR(0x1fc00000);
vcpu              749 arch/mips/kvm/trap_emul.c static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu *vcpu)
vcpu              754 arch/mips/kvm/trap_emul.c static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu *vcpu,
vcpu              765 arch/mips/kvm/trap_emul.c static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
vcpu              769 arch/mips/kvm/trap_emul.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu              845 arch/mips/kvm/trap_emul.c 		*v = kvm_mips_read_count(vcpu);
vcpu              848 arch/mips/kvm/trap_emul.c 		*v = vcpu->arch.count_ctl;
vcpu              851 arch/mips/kvm/trap_emul.c 		*v = ktime_to_ns(vcpu->arch.count_resume);
vcpu              854 arch/mips/kvm/trap_emul.c 		*v = vcpu->arch.count_hz;
vcpu              883 arch/mips/kvm/trap_emul.c static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
vcpu              887 arch/mips/kvm/trap_emul.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu              943 arch/mips/kvm/trap_emul.c 		kvm_mips_write_count(vcpu, v);
vcpu              946 arch/mips/kvm/trap_emul.c 		kvm_mips_write_compare(vcpu, v, false);
vcpu              957 arch/mips/kvm/trap_emul.c 				kvm_mips_count_disable_cause(vcpu);
vcpu              964 arch/mips/kvm/trap_emul.c 				kvm_mips_count_enable_cause(vcpu);
vcpu              975 arch/mips/kvm/trap_emul.c 		change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu);
vcpu              986 arch/mips/kvm/trap_emul.c 		change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu);
vcpu              994 arch/mips/kvm/trap_emul.c 		change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu);
vcpu             1002 arch/mips/kvm/trap_emul.c 		change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu);
vcpu             1012 arch/mips/kvm/trap_emul.c 		ret = kvm_mips_set_count_ctl(vcpu, v);
vcpu             1015 arch/mips/kvm/trap_emul.c 		ret = kvm_mips_set_count_resume(vcpu, v);
vcpu             1018 arch/mips/kvm/trap_emul.c 		ret = kvm_mips_set_count_hz(vcpu, v);
vcpu             1047 arch/mips/kvm/trap_emul.c static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu             1049 arch/mips/kvm/trap_emul.c 	struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
vcpu             1050 arch/mips/kvm/trap_emul.c 	struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
vcpu             1058 arch/mips/kvm/trap_emul.c 		mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
vcpu             1067 arch/mips/kvm/trap_emul.c static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
vcpu             1069 arch/mips/kvm/trap_emul.c 	kvm_lose_fpu(vcpu);
vcpu             1081 arch/mips/kvm/trap_emul.c static void kvm_trap_emul_check_requests(struct kvm_vcpu *vcpu, int cpu,
vcpu             1084 arch/mips/kvm/trap_emul.c 	struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
vcpu             1085 arch/mips/kvm/trap_emul.c 	struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
vcpu             1089 arch/mips/kvm/trap_emul.c 	if (likely(!kvm_request_pending(vcpu)))
vcpu             1092 arch/mips/kvm/trap_emul.c 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
vcpu             1107 arch/mips/kvm/trap_emul.c 			mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
vcpu             1127 arch/mips/kvm/trap_emul.c void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu)
vcpu             1145 arch/mips/kvm/trap_emul.c 	smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
vcpu             1156 arch/mips/kvm/trap_emul.c 	kvm_trap_emul_check_requests(vcpu, smp_processor_id(), true);
vcpu             1166 arch/mips/kvm/trap_emul.c void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu)
vcpu             1173 arch/mips/kvm/trap_emul.c 	smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
vcpu             1183 arch/mips/kvm/trap_emul.c 				       struct kvm_vcpu *vcpu)
vcpu             1185 arch/mips/kvm/trap_emul.c 	struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
vcpu             1186 arch/mips/kvm/trap_emul.c 	struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
vcpu             1188 arch/mips/kvm/trap_emul.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu             1197 arch/mips/kvm/trap_emul.c 	kvm_trap_emul_check_requests(vcpu, cpu, false);
vcpu             1199 arch/mips/kvm/trap_emul.c 	if (KVM_GUEST_KERNEL_MODE(vcpu)) {
vcpu             1211 arch/mips/kvm/trap_emul.c 		if (gasid != vcpu->arch.last_user_gasid) {
vcpu             1215 arch/mips/kvm/trap_emul.c 			vcpu->arch.last_user_gasid = gasid;
vcpu             1226 arch/mips/kvm/trap_emul.c static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
vcpu             1232 arch/mips/kvm/trap_emul.c 	kvm_mips_deliver_interrupts(vcpu,
vcpu             1233 arch/mips/kvm/trap_emul.c 				    kvm_read_c0_guest_cause(vcpu->arch.cop0));
vcpu             1235 arch/mips/kvm/trap_emul.c 	kvm_trap_emul_vcpu_reenter(run, vcpu);
vcpu             1253 arch/mips/kvm/trap_emul.c 	r = vcpu->arch.vcpu_run(run, vcpu);
vcpu               81 arch/mips/kvm/vz.c static inline unsigned int kvm_vz_config_guest_wrmask(struct kvm_vcpu *vcpu)
vcpu               86 arch/mips/kvm/vz.c static inline unsigned int kvm_vz_config1_guest_wrmask(struct kvm_vcpu *vcpu)
vcpu               91 arch/mips/kvm/vz.c static inline unsigned int kvm_vz_config2_guest_wrmask(struct kvm_vcpu *vcpu)
vcpu               96 arch/mips/kvm/vz.c static inline unsigned int kvm_vz_config3_guest_wrmask(struct kvm_vcpu *vcpu)
vcpu              101 arch/mips/kvm/vz.c static inline unsigned int kvm_vz_config4_guest_wrmask(struct kvm_vcpu *vcpu)
vcpu              107 arch/mips/kvm/vz.c static inline unsigned int kvm_vz_config5_guest_wrmask(struct kvm_vcpu *vcpu)
vcpu              112 arch/mips/kvm/vz.c 	if (kvm_mips_guest_has_msa(&vcpu->arch))
vcpu              119 arch/mips/kvm/vz.c 	if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
vcpu              140 arch/mips/kvm/vz.c static inline unsigned int kvm_vz_config_user_wrmask(struct kvm_vcpu *vcpu)
vcpu              142 arch/mips/kvm/vz.c 	return kvm_vz_config_guest_wrmask(vcpu) | MIPS_CONF_M;
vcpu              145 arch/mips/kvm/vz.c static inline unsigned int kvm_vz_config1_user_wrmask(struct kvm_vcpu *vcpu)
vcpu              147 arch/mips/kvm/vz.c 	unsigned int mask = kvm_vz_config1_guest_wrmask(vcpu) | MIPS_CONF_M;
vcpu              150 arch/mips/kvm/vz.c 	if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
vcpu              156 arch/mips/kvm/vz.c static inline unsigned int kvm_vz_config2_user_wrmask(struct kvm_vcpu *vcpu)
vcpu              158 arch/mips/kvm/vz.c 	return kvm_vz_config2_guest_wrmask(vcpu) | MIPS_CONF_M;
vcpu              161 arch/mips/kvm/vz.c static inline unsigned int kvm_vz_config3_user_wrmask(struct kvm_vcpu *vcpu)
vcpu              163 arch/mips/kvm/vz.c 	unsigned int mask = kvm_vz_config3_guest_wrmask(vcpu) | MIPS_CONF_M |
vcpu              167 arch/mips/kvm/vz.c 	if (kvm_mips_guest_can_have_msa(&vcpu->arch))
vcpu              173 arch/mips/kvm/vz.c static inline unsigned int kvm_vz_config4_user_wrmask(struct kvm_vcpu *vcpu)
vcpu              175 arch/mips/kvm/vz.c 	return kvm_vz_config4_guest_wrmask(vcpu) | MIPS_CONF_M;
vcpu              178 arch/mips/kvm/vz.c static inline unsigned int kvm_vz_config5_user_wrmask(struct kvm_vcpu *vcpu)
vcpu              180 arch/mips/kvm/vz.c 	return kvm_vz_config5_guest_wrmask(vcpu) | MIPS_CONF5_MRP;
vcpu              189 arch/mips/kvm/vz.c static void kvm_vz_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
vcpu              191 arch/mips/kvm/vz.c 	set_bit(priority, &vcpu->arch.pending_exceptions);
vcpu              192 arch/mips/kvm/vz.c 	clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
vcpu              195 arch/mips/kvm/vz.c static void kvm_vz_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
vcpu              197 arch/mips/kvm/vz.c 	clear_bit(priority, &vcpu->arch.pending_exceptions);
vcpu              198 arch/mips/kvm/vz.c 	set_bit(priority, &vcpu->arch.pending_exceptions_clr);
vcpu              201 arch/mips/kvm/vz.c static void kvm_vz_queue_timer_int_cb(struct kvm_vcpu *vcpu)
vcpu              207 arch/mips/kvm/vz.c 	kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
vcpu              210 arch/mips/kvm/vz.c static void kvm_vz_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
vcpu              216 arch/mips/kvm/vz.c 	kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
vcpu              219 arch/mips/kvm/vz.c static void kvm_vz_queue_io_int_cb(struct kvm_vcpu *vcpu,
vcpu              230 arch/mips/kvm/vz.c 		kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IO);
vcpu              234 arch/mips/kvm/vz.c 		kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IPI_1);
vcpu              238 arch/mips/kvm/vz.c 		kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IPI_2);
vcpu              247 arch/mips/kvm/vz.c static void kvm_vz_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
vcpu              258 arch/mips/kvm/vz.c 		kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IO);
vcpu              262 arch/mips/kvm/vz.c 		kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1);
vcpu              266 arch/mips/kvm/vz.c 		kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2);
vcpu              282 arch/mips/kvm/vz.c static int kvm_vz_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
vcpu              306 arch/mips/kvm/vz.c 	clear_bit(priority, &vcpu->arch.pending_exceptions);
vcpu              310 arch/mips/kvm/vz.c static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
vcpu              348 arch/mips/kvm/vz.c 	clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
vcpu              364 arch/mips/kvm/vz.c static bool kvm_vz_should_use_htimer(struct kvm_vcpu *vcpu)
vcpu              366 arch/mips/kvm/vz.c 	if (kvm_mips_count_disabled(vcpu))
vcpu              370 arch/mips/kvm/vz.c 	if (mips_hpt_frequency != vcpu->arch.count_hz)
vcpu              389 arch/mips/kvm/vz.c static void _kvm_vz_restore_stimer(struct kvm_vcpu *vcpu, u32 compare,
vcpu              411 arch/mips/kvm/vz.c static void _kvm_vz_restore_htimer(struct kvm_vcpu *vcpu,
vcpu              423 arch/mips/kvm/vz.c 	freeze_time = kvm_mips_freeze_hrtimer(vcpu, &start_count);
vcpu              439 arch/mips/kvm/vz.c 		kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
vcpu              448 arch/mips/kvm/vz.c static void kvm_vz_restore_timer(struct kvm_vcpu *vcpu)
vcpu              450 arch/mips/kvm/vz.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu              457 arch/mips/kvm/vz.c 	_kvm_vz_restore_stimer(vcpu, compare, cause);
vcpu              469 arch/mips/kvm/vz.c void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu)
vcpu              474 arch/mips/kvm/vz.c 	if (!(gctl0 & MIPS_GCTL0_GT) && kvm_vz_should_use_htimer(vcpu)) {
vcpu              478 arch/mips/kvm/vz.c 		_kvm_vz_restore_htimer(vcpu, read_gc0_compare(),
vcpu              493 arch/mips/kvm/vz.c static void _kvm_vz_save_htimer(struct kvm_vcpu *vcpu,
vcpu              527 arch/mips/kvm/vz.c 		kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
vcpu              533 arch/mips/kvm/vz.c 	kvm_mips_restore_hrtimer(vcpu, before_time, end_count, -0x10000);
vcpu              543 arch/mips/kvm/vz.c static void kvm_vz_save_timer(struct kvm_vcpu *vcpu)
vcpu              545 arch/mips/kvm/vz.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu              554 arch/mips/kvm/vz.c 		_kvm_vz_save_htimer(vcpu, &compare, &cause);
vcpu              572 arch/mips/kvm/vz.c void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu)
vcpu              583 arch/mips/kvm/vz.c 		_kvm_vz_save_htimer(vcpu, &compare, &cause);
vcpu              586 arch/mips/kvm/vz.c 		_kvm_vz_restore_stimer(vcpu, compare, cause);
vcpu              642 arch/mips/kvm/vz.c static bool is_eva_am_mapped(struct kvm_vcpu *vcpu, unsigned int am, bool eu)
vcpu              690 arch/mips/kvm/vz.c 			opc = (u32 *)vcpu->arch.pc;
vcpu              691 arch/mips/kvm/vz.c 			if (vcpu->arch.host_cp0_cause & CAUSEF_BD)
vcpu              693 arch/mips/kvm/vz.c 			err = kvm_get_badinstr(opc, vcpu, &inst.word);
vcpu              714 arch/mips/kvm/vz.c static int kvm_vz_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
vcpu              760 arch/mips/kvm/vz.c 			if (is_eva_am_mapped(vcpu, (segctl >> 4) & 0x7,
vcpu              785 arch/mips/kvm/vz.c 				if (is_eva_am_mapped(vcpu, (segctl >> 59) & 0x7,
vcpu              803 arch/mips/kvm/vz.c 	return kvm_vz_guest_tlb_lookup(vcpu, gva, gpa);
vcpu              821 arch/mips/kvm/vz.c static int kvm_vz_badvaddr_to_gpa(struct kvm_vcpu *vcpu, unsigned long badvaddr,
vcpu              824 arch/mips/kvm/vz.c 	unsigned int gexccode = (vcpu->arch.host_cp0_guestctl0 &
vcpu              839 arch/mips/kvm/vz.c 	return kvm_vz_gva_to_gpa(vcpu, badvaddr, gpa);
vcpu              842 arch/mips/kvm/vz.c static int kvm_trap_vz_no_handler(struct kvm_vcpu *vcpu)
vcpu              844 arch/mips/kvm/vz.c 	u32 *opc = (u32 *) vcpu->arch.pc;
vcpu              845 arch/mips/kvm/vz.c 	u32 cause = vcpu->arch.host_cp0_cause;
vcpu              847 arch/mips/kvm/vz.c 	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
vcpu              855 arch/mips/kvm/vz.c 	kvm_get_badinstr(opc, vcpu, &inst);
vcpu              860 arch/mips/kvm/vz.c 	kvm_arch_vcpu_dump_regs(vcpu);
vcpu              861 arch/mips/kvm/vz.c 	vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu              889 arch/mips/kvm/vz.c static void kvm_write_maari(struct kvm_vcpu *vcpu, unsigned long val)
vcpu              891 arch/mips/kvm/vz.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu              895 arch/mips/kvm/vz.c 		kvm_write_sw_gc0_maari(cop0, ARRAY_SIZE(vcpu->arch.maar) - 1);
vcpu              896 arch/mips/kvm/vz.c 	else if (val < ARRAY_SIZE(vcpu->arch.maar))
vcpu              903 arch/mips/kvm/vz.c 					      struct kvm_vcpu *vcpu)
vcpu              905 arch/mips/kvm/vz.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu              915 arch/mips/kvm/vz.c 	curr_pc = vcpu->arch.pc;
vcpu              916 arch/mips/kvm/vz.c 	er = update_pc(vcpu, cause);
vcpu              923 arch/mips/kvm/vz.c 			er = kvm_mips_emul_wait(vcpu);
vcpu              941 arch/mips/kvm/vz.c 				val = kvm_mips_read_count(vcpu);
vcpu              958 arch/mips/kvm/vz.c 						ARRAY_SIZE(vcpu->arch.maar));
vcpu              959 arch/mips/kvm/vz.c 				val = vcpu->arch.maar[
vcpu              986 arch/mips/kvm/vz.c 				vcpu->arch.gprs[rt] = val;
vcpu              989 arch/mips/kvm/vz.c 			trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mfc_op) ?
vcpu              999 arch/mips/kvm/vz.c 			val = vcpu->arch.gprs[rt];
vcpu             1000 arch/mips/kvm/vz.c 			trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mtc_op) ?
vcpu             1006 arch/mips/kvm/vz.c 				kvm_vz_lose_htimer(vcpu);
vcpu             1007 arch/mips/kvm/vz.c 				kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
vcpu             1010 arch/mips/kvm/vz.c 				kvm_mips_write_compare(vcpu,
vcpu             1011 arch/mips/kvm/vz.c 						       vcpu->arch.gprs[rt],
vcpu             1031 arch/mips/kvm/vz.c 						ARRAY_SIZE(vcpu->arch.maar));
vcpu             1032 arch/mips/kvm/vz.c 				vcpu->arch.maar[kvm_read_sw_gc0_maari(cop0)] =
vcpu             1038 arch/mips/kvm/vz.c 				kvm_write_maari(vcpu, val);
vcpu             1057 arch/mips/kvm/vz.c 		vcpu->arch.pc = curr_pc;
vcpu             1066 arch/mips/kvm/vz.c 					       struct kvm_vcpu *vcpu)
vcpu             1071 arch/mips/kvm/vz.c 	struct kvm_vcpu_arch *arch = &vcpu->arch;
vcpu             1078 arch/mips/kvm/vz.c 	curr_pc = vcpu->arch.pc;
vcpu             1079 arch/mips/kvm/vz.c 	er = update_pc(vcpu, cause);
vcpu             1124 arch/mips/kvm/vz.c 		curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base],
vcpu             1127 arch/mips/kvm/vz.c 	vcpu->arch.pc = curr_pc;
vcpu             1133 arch/mips/kvm/vz.c 						     struct kvm_vcpu *vcpu)
vcpu             1136 arch/mips/kvm/vz.c 	struct kvm_vcpu_arch *arch = &vcpu->arch;
vcpu             1137 arch/mips/kvm/vz.c 	struct kvm_run *run = vcpu->run;
vcpu             1147 arch/mips/kvm/vz.c 	err = kvm_get_badinstr(opc, vcpu, &inst.word);
vcpu             1153 arch/mips/kvm/vz.c 		er = kvm_vz_gpsi_cop0(inst, opc, cause, run, vcpu);
vcpu             1157 arch/mips/kvm/vz.c 		trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
vcpu             1158 arch/mips/kvm/vz.c 		er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu);
vcpu             1165 arch/mips/kvm/vz.c 			trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
vcpu             1166 arch/mips/kvm/vz.c 			er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu);
vcpu             1180 arch/mips/kvm/vz.c 					(long)(int)kvm_mips_read_count(vcpu);
vcpu             1183 arch/mips/kvm/vz.c 				trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
vcpu             1188 arch/mips/kvm/vz.c 			trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
vcpu             1191 arch/mips/kvm/vz.c 			er = update_pc(vcpu, cause);
vcpu             1202 arch/mips/kvm/vz.c 		kvm_arch_vcpu_dump_regs(vcpu);
vcpu             1211 arch/mips/kvm/vz.c 						     struct kvm_vcpu *vcpu)
vcpu             1214 arch/mips/kvm/vz.c 	struct kvm_vcpu_arch *arch = &vcpu->arch;
vcpu             1223 arch/mips/kvm/vz.c 	err = kvm_get_badinstr(opc, vcpu, &inst.word);
vcpu             1237 arch/mips/kvm/vz.c 		trace_kvm_hwr(vcpu, KVM_TRACE_MTC0, KVM_TRACE_COP0(rd, sel),
vcpu             1242 arch/mips/kvm/vz.c 			if (!kvm_mips_guest_has_fpu(&vcpu->arch))
vcpu             1261 arch/mips/kvm/vz.c 				kvm_drop_fpu(vcpu);
vcpu             1272 arch/mips/kvm/vz.c 			    vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
vcpu             1273 arch/mips/kvm/vz.c 				kvm_lose_fpu(vcpu);
vcpu             1283 arch/mips/kvm/vz.c 					kvm_vz_lose_htimer(vcpu);
vcpu             1284 arch/mips/kvm/vz.c 					kvm_mips_count_disable_cause(vcpu);
vcpu             1286 arch/mips/kvm/vz.c 					kvm_mips_count_enable_cause(vcpu);
vcpu             1311 arch/mips/kvm/vz.c 			    vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
vcpu             1317 arch/mips/kvm/vz.c 				(change & kvm_vz_config5_guest_wrmask(vcpu));
vcpu             1326 arch/mips/kvm/vz.c 			er = update_pc(vcpu, cause);
vcpu             1337 arch/mips/kvm/vz.c 						     struct kvm_vcpu *vcpu)
vcpu             1343 arch/mips/kvm/vz.c 	trace_kvm_guest_mode_change(vcpu);
vcpu             1349 arch/mips/kvm/vz.c 						   struct kvm_vcpu *vcpu)
vcpu             1358 arch/mips/kvm/vz.c 	err = kvm_get_badinstr(opc, vcpu, &inst.word);
vcpu             1366 arch/mips/kvm/vz.c 	curr_pc = vcpu->arch.pc;
vcpu             1367 arch/mips/kvm/vz.c 	er = update_pc(vcpu, cause);
vcpu             1371 arch/mips/kvm/vz.c 	er = kvm_mips_emul_hypcall(vcpu, inst);
vcpu             1373 arch/mips/kvm/vz.c 		vcpu->arch.pc = curr_pc;
vcpu             1381 arch/mips/kvm/vz.c 							struct kvm_vcpu *vcpu)
vcpu             1390 arch/mips/kvm/vz.c 	kvm_get_badinstr(opc, vcpu, &inst);
vcpu             1398 arch/mips/kvm/vz.c static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu)
vcpu             1400 arch/mips/kvm/vz.c 	u32 *opc = (u32 *) vcpu->arch.pc;
vcpu             1401 arch/mips/kvm/vz.c 	u32 cause = vcpu->arch.host_cp0_cause;
vcpu             1403 arch/mips/kvm/vz.c 	u32 gexccode = (vcpu->arch.host_cp0_guestctl0 &
vcpu             1407 arch/mips/kvm/vz.c 	trace_kvm_exit(vcpu, KVM_TRACE_EXIT_GEXCCODE_BASE + gexccode);
vcpu             1410 arch/mips/kvm/vz.c 		++vcpu->stat.vz_gpsi_exits;
vcpu             1411 arch/mips/kvm/vz.c 		er = kvm_trap_vz_handle_gpsi(cause, opc, vcpu);
vcpu             1414 arch/mips/kvm/vz.c 		++vcpu->stat.vz_gsfc_exits;
vcpu             1415 arch/mips/kvm/vz.c 		er = kvm_trap_vz_handle_gsfc(cause, opc, vcpu);
vcpu             1418 arch/mips/kvm/vz.c 		++vcpu->stat.vz_hc_exits;
vcpu             1419 arch/mips/kvm/vz.c 		er = kvm_trap_vz_handle_hc(cause, opc, vcpu);
vcpu             1422 arch/mips/kvm/vz.c 		++vcpu->stat.vz_grr_exits;
vcpu             1424 arch/mips/kvm/vz.c 						       vcpu);
vcpu             1427 arch/mips/kvm/vz.c 		++vcpu->stat.vz_gva_exits;
vcpu             1429 arch/mips/kvm/vz.c 						       vcpu);
vcpu             1432 arch/mips/kvm/vz.c 		++vcpu->stat.vz_ghfc_exits;
vcpu             1433 arch/mips/kvm/vz.c 		er = kvm_trap_vz_handle_ghfc(cause, opc, vcpu);
vcpu             1436 arch/mips/kvm/vz.c 		++vcpu->stat.vz_gpa_exits;
vcpu             1438 arch/mips/kvm/vz.c 						       vcpu);
vcpu             1441 arch/mips/kvm/vz.c 		++vcpu->stat.vz_resvd_exits;
vcpu             1443 arch/mips/kvm/vz.c 						       vcpu);
vcpu             1451 arch/mips/kvm/vz.c 		ret = kvm_mips_handle_hypcall(vcpu);
vcpu             1453 arch/mips/kvm/vz.c 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu             1466 arch/mips/kvm/vz.c static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
vcpu             1468 arch/mips/kvm/vz.c 	struct kvm_run *run = vcpu->run;
vcpu             1469 arch/mips/kvm/vz.c 	u32 cause = vcpu->arch.host_cp0_cause;
vcpu             1479 arch/mips/kvm/vz.c 		if (WARN_ON(!kvm_mips_guest_has_fpu(&vcpu->arch) ||
vcpu             1480 arch/mips/kvm/vz.c 			    vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
vcpu             1485 arch/mips/kvm/vz.c 		kvm_own_fpu(vcpu);
vcpu             1513 arch/mips/kvm/vz.c static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu)
vcpu             1515 arch/mips/kvm/vz.c 	struct kvm_run *run = vcpu->run;
vcpu             1523 arch/mips/kvm/vz.c 	if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
vcpu             1526 arch/mips/kvm/vz.c 	    vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
vcpu             1531 arch/mips/kvm/vz.c 	kvm_own_msa(vcpu);
vcpu             1536 arch/mips/kvm/vz.c static int kvm_trap_vz_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
vcpu             1538 arch/mips/kvm/vz.c 	struct kvm_run *run = vcpu->run;
vcpu             1539 arch/mips/kvm/vz.c 	u32 *opc = (u32 *) vcpu->arch.pc;
vcpu             1540 arch/mips/kvm/vz.c 	u32 cause = vcpu->arch.host_cp0_cause;
vcpu             1541 arch/mips/kvm/vz.c 	ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
vcpu             1546 arch/mips/kvm/vz.c 	if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, false)) {
vcpu             1548 arch/mips/kvm/vz.c 		if (kvm_is_ifetch_fault(&vcpu->arch)) {
vcpu             1556 arch/mips/kvm/vz.c 		err = kvm_get_badinstr(opc, vcpu, &inst.word);
vcpu             1563 arch/mips/kvm/vz.c 		er = kvm_mips_emulate_load(inst, cause, run, vcpu);
vcpu             1583 arch/mips/kvm/vz.c static int kvm_trap_vz_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
vcpu             1585 arch/mips/kvm/vz.c 	struct kvm_run *run = vcpu->run;
vcpu             1586 arch/mips/kvm/vz.c 	u32 *opc = (u32 *) vcpu->arch.pc;
vcpu             1587 arch/mips/kvm/vz.c 	u32 cause = vcpu->arch.host_cp0_cause;
vcpu             1588 arch/mips/kvm/vz.c 	ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
vcpu             1595 arch/mips/kvm/vz.c 	if (kvm_vz_badvaddr_to_gpa(vcpu, badvaddr, &badvaddr))
vcpu             1597 arch/mips/kvm/vz.c 	vcpu->arch.host_cp0_badvaddr = badvaddr;
vcpu             1599 arch/mips/kvm/vz.c 	if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, true)) {
vcpu             1603 arch/mips/kvm/vz.c 		err = kvm_get_badinstr(opc, vcpu, &inst.word);
vcpu             1610 arch/mips/kvm/vz.c 		er = kvm_mips_emulate_store(inst, cause, run, vcpu);
vcpu             1694 arch/mips/kvm/vz.c static unsigned long kvm_vz_num_regs(struct kvm_vcpu *vcpu)
vcpu             1712 arch/mips/kvm/vz.c 		ret += 1 + ARRAY_SIZE(vcpu->arch.maar);
vcpu             1718 arch/mips/kvm/vz.c static int kvm_vz_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
vcpu             1765 arch/mips/kvm/vz.c 		for (i = 0; i < ARRAY_SIZE(vcpu->arch.maar); ++i) {
vcpu             1822 arch/mips/kvm/vz.c static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
vcpu             1826 arch/mips/kvm/vz.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu             1920 arch/mips/kvm/vz.c 		*v = kvm_mips_read_count(vcpu);
vcpu             1986 arch/mips/kvm/vz.c 		if (idx >= ARRAY_SIZE(vcpu->arch.maar))
vcpu             1988 arch/mips/kvm/vz.c 		*v = vcpu->arch.maar[idx];
vcpu             1993 arch/mips/kvm/vz.c 		*v = kvm_read_sw_gc0_maari(vcpu->arch.cop0);
vcpu             2029 arch/mips/kvm/vz.c 		*v = vcpu->arch.count_ctl;
vcpu             2032 arch/mips/kvm/vz.c 		*v = ktime_to_ns(vcpu->arch.count_resume);
vcpu             2035 arch/mips/kvm/vz.c 		*v = vcpu->arch.count_hz;
vcpu             2043 arch/mips/kvm/vz.c static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
vcpu             2047 arch/mips/kvm/vz.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu             2143 arch/mips/kvm/vz.c 		kvm_mips_write_count(vcpu, v);
vcpu             2149 arch/mips/kvm/vz.c 		kvm_mips_write_compare(vcpu, v, false);
vcpu             2166 arch/mips/kvm/vz.c 				kvm_mips_count_disable_cause(vcpu);
vcpu             2171 arch/mips/kvm/vz.c 				kvm_mips_count_enable_cause(vcpu);
vcpu             2195 arch/mips/kvm/vz.c 		change = (cur ^ v) & kvm_vz_config_user_wrmask(vcpu);
vcpu             2205 arch/mips/kvm/vz.c 		change = (cur ^ v) & kvm_vz_config1_user_wrmask(vcpu);
vcpu             2215 arch/mips/kvm/vz.c 		change = (cur ^ v) & kvm_vz_config2_user_wrmask(vcpu);
vcpu             2225 arch/mips/kvm/vz.c 		change = (cur ^ v) & kvm_vz_config3_user_wrmask(vcpu);
vcpu             2235 arch/mips/kvm/vz.c 		change = (cur ^ v) & kvm_vz_config4_user_wrmask(vcpu);
vcpu             2245 arch/mips/kvm/vz.c 		change = (cur ^ v) & kvm_vz_config5_user_wrmask(vcpu);
vcpu             2255 arch/mips/kvm/vz.c 		if (idx >= ARRAY_SIZE(vcpu->arch.maar))
vcpu             2257 arch/mips/kvm/vz.c 		vcpu->arch.maar[idx] = mips_process_maar(dmtc_op, v);
vcpu             2262 arch/mips/kvm/vz.c 		kvm_write_maari(vcpu, v);
vcpu             2298 arch/mips/kvm/vz.c 		ret = kvm_mips_set_count_ctl(vcpu, v);
vcpu             2301 arch/mips/kvm/vz.c 		ret = kvm_mips_set_count_resume(vcpu, v);
vcpu             2304 arch/mips/kvm/vz.c 		ret = kvm_mips_set_count_hz(vcpu, v);
vcpu             2313 arch/mips/kvm/vz.c static void kvm_vz_get_new_guestid(unsigned long cpu, struct kvm_vcpu *vcpu)
vcpu             2335 arch/mips/kvm/vz.c static int kvm_vz_check_requests(struct kvm_vcpu *vcpu, int cpu)
vcpu             2340 arch/mips/kvm/vz.c 	if (!kvm_request_pending(vcpu))
vcpu             2343 arch/mips/kvm/vz.c 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
vcpu             2347 arch/mips/kvm/vz.c 				vcpu->arch.vzguestid[i] = 0;
vcpu             2362 arch/mips/kvm/vz.c static void kvm_vz_vcpu_save_wired(struct kvm_vcpu *vcpu)
vcpu             2370 arch/mips/kvm/vz.c 	if (wired > vcpu->arch.wired_tlb_limit) {
vcpu             2371 arch/mips/kvm/vz.c 		tlbs = krealloc(vcpu->arch.wired_tlb, wired *
vcpu             2372 arch/mips/kvm/vz.c 				sizeof(*vcpu->arch.wired_tlb), GFP_ATOMIC);
vcpu             2375 arch/mips/kvm/vz.c 			wired = vcpu->arch.wired_tlb_limit;
vcpu             2377 arch/mips/kvm/vz.c 			vcpu->arch.wired_tlb = tlbs;
vcpu             2378 arch/mips/kvm/vz.c 			vcpu->arch.wired_tlb_limit = wired;
vcpu             2384 arch/mips/kvm/vz.c 		kvm_vz_save_guesttlb(vcpu->arch.wired_tlb, 0, wired);
vcpu             2386 arch/mips/kvm/vz.c 	for (i = wired; i < vcpu->arch.wired_tlb_used; ++i) {
vcpu             2387 arch/mips/kvm/vz.c 		vcpu->arch.wired_tlb[i].tlb_hi = UNIQUE_GUEST_ENTRYHI(i);
vcpu             2388 arch/mips/kvm/vz.c 		vcpu->arch.wired_tlb[i].tlb_lo[0] = 0;
vcpu             2389 arch/mips/kvm/vz.c 		vcpu->arch.wired_tlb[i].tlb_lo[1] = 0;
vcpu             2390 arch/mips/kvm/vz.c 		vcpu->arch.wired_tlb[i].tlb_mask = 0;
vcpu             2392 arch/mips/kvm/vz.c 	vcpu->arch.wired_tlb_used = wired;
vcpu             2395 arch/mips/kvm/vz.c static void kvm_vz_vcpu_load_wired(struct kvm_vcpu *vcpu)
vcpu             2398 arch/mips/kvm/vz.c 	if (vcpu->arch.wired_tlb)
vcpu             2399 arch/mips/kvm/vz.c 		kvm_vz_load_guesttlb(vcpu->arch.wired_tlb, 0,
vcpu             2400 arch/mips/kvm/vz.c 				     vcpu->arch.wired_tlb_used);
vcpu             2403 arch/mips/kvm/vz.c static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu)
vcpu             2405 arch/mips/kvm/vz.c 	struct kvm *kvm = vcpu->kvm;
vcpu             2413 arch/mips/kvm/vz.c 	migrated = (vcpu->arch.last_exec_cpu != cpu);
vcpu             2414 arch/mips/kvm/vz.c 	vcpu->arch.last_exec_cpu = cpu;
vcpu             2431 arch/mips/kvm/vz.c 		    (vcpu->arch.vzguestid[cpu] ^ guestid_cache(cpu)) &
vcpu             2433 arch/mips/kvm/vz.c 			kvm_vz_get_new_guestid(cpu, vcpu);
vcpu             2434 arch/mips/kvm/vz.c 			vcpu->arch.vzguestid[cpu] = guestid_cache(cpu);
vcpu             2435 arch/mips/kvm/vz.c 			trace_kvm_guestid_change(vcpu,
vcpu             2436 arch/mips/kvm/vz.c 						 vcpu->arch.vzguestid[cpu]);
vcpu             2440 arch/mips/kvm/vz.c 		change_c0_guestctl1(GUESTID_MASK, vcpu->arch.vzguestid[cpu]);
vcpu             2449 arch/mips/kvm/vz.c 		if (migrated || last_exec_vcpu[cpu] != vcpu)
vcpu             2451 arch/mips/kvm/vz.c 		last_exec_vcpu[cpu] = vcpu;
vcpu             2464 arch/mips/kvm/vz.c static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu             2466 arch/mips/kvm/vz.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu             2473 arch/mips/kvm/vz.c 	migrated = (vcpu->arch.last_sched_cpu != cpu);
vcpu             2479 arch/mips/kvm/vz.c 	all = migrated || (last_vcpu[cpu] != vcpu);
vcpu             2480 arch/mips/kvm/vz.c 	last_vcpu[cpu] = vcpu;
vcpu             2489 arch/mips/kvm/vz.c 		kvm_vz_vcpu_load_tlb(vcpu, cpu);
vcpu             2490 arch/mips/kvm/vz.c 		kvm_vz_vcpu_load_wired(vcpu);
vcpu             2497 arch/mips/kvm/vz.c 	kvm_vz_restore_timer(vcpu);
vcpu             2606 arch/mips/kvm/vz.c static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
vcpu             2608 arch/mips/kvm/vz.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu             2611 arch/mips/kvm/vz.c 		kvm_vz_vcpu_save_wired(vcpu);
vcpu             2613 arch/mips/kvm/vz.c 	kvm_lose_fpu(vcpu);
vcpu             2696 arch/mips/kvm/vz.c 	kvm_vz_save_timer(vcpu);
vcpu             2938 arch/mips/kvm/vz.c static int kvm_vz_vcpu_init(struct kvm_vcpu *vcpu)
vcpu             2943 arch/mips/kvm/vz.c 		vcpu->arch.vzguestid[i] = 0;
vcpu             2948 arch/mips/kvm/vz.c static void kvm_vz_vcpu_uninit(struct kvm_vcpu *vcpu)
vcpu             2958 arch/mips/kvm/vz.c 		if (last_vcpu[cpu] == vcpu)
vcpu             2960 arch/mips/kvm/vz.c 		if (last_exec_vcpu[cpu] == vcpu)
vcpu             2965 arch/mips/kvm/vz.c static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu             2967 arch/mips/kvm/vz.c 	struct mips_coproc *cop0 = vcpu->arch.cop0;
vcpu             2976 arch/mips/kvm/vz.c 	kvm_mips_init_count(vcpu, count_hz);
vcpu             2999 arch/mips/kvm/vz.c 	kvm_write_sw_gc0_ebase(cop0, (s32)0x80000000 | vcpu->vcpu_id);
vcpu             3101 arch/mips/kvm/vz.c 	vcpu->arch.pc = CKSEG1ADDR(0x1fc00000);
vcpu             3132 arch/mips/kvm/vz.c static void kvm_vz_vcpu_reenter(struct kvm_run *run, struct kvm_vcpu *vcpu)
vcpu             3137 arch/mips/kvm/vz.c 	preserve_guest_tlb = kvm_vz_check_requests(vcpu, cpu);
vcpu             3140 arch/mips/kvm/vz.c 		kvm_vz_vcpu_save_wired(vcpu);
vcpu             3142 arch/mips/kvm/vz.c 	kvm_vz_vcpu_load_tlb(vcpu, cpu);
vcpu             3145 arch/mips/kvm/vz.c 		kvm_vz_vcpu_load_wired(vcpu);
vcpu             3148 arch/mips/kvm/vz.c static int kvm_vz_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
vcpu             3153 arch/mips/kvm/vz.c 	kvm_vz_acquire_htimer(vcpu);
vcpu             3155 arch/mips/kvm/vz.c 	kvm_mips_deliver_interrupts(vcpu, read_gc0_cause());
vcpu             3157 arch/mips/kvm/vz.c 	kvm_vz_check_requests(vcpu, cpu);
vcpu             3158 arch/mips/kvm/vz.c 	kvm_vz_vcpu_load_tlb(vcpu, cpu);
vcpu             3159 arch/mips/kvm/vz.c 	kvm_vz_vcpu_load_wired(vcpu);
vcpu             3161 arch/mips/kvm/vz.c 	r = vcpu->arch.vcpu_run(run, vcpu);
vcpu             3163 arch/mips/kvm/vz.c 	kvm_vz_vcpu_save_wired(vcpu);
vcpu              149 arch/powerpc/include/asm/asm-prototypes.h void _kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
vcpu              150 arch/powerpc/include/asm/asm-prototypes.h void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
vcpu              163 arch/powerpc/include/asm/asm-prototypes.h void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv);
vcpu              164 arch/powerpc/include/asm/asm-prototypes.h void kvmppc_restore_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv);
vcpu              166 arch/powerpc/include/asm/asm-prototypes.h static inline void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr,
vcpu              168 arch/powerpc/include/asm/asm-prototypes.h static inline void kvmppc_restore_tm_hv(struct kvm_vcpu *vcpu, u64 msr,
vcpu              174 arch/powerpc/include/asm/asm-prototypes.h void kvmhv_save_guest_pmu(struct kvm_vcpu *vcpu, bool pmu_in_use);
vcpu              175 arch/powerpc/include/asm/asm-prototypes.h void kvmhv_load_guest_pmu(struct kvm_vcpu *vcpu);
vcpu              177 arch/powerpc/include/asm/asm-prototypes.h int __kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu);
vcpu              179 arch/powerpc/include/asm/asm-prototypes.h long kvmppc_h_set_dabr(struct kvm_vcpu *vcpu, unsigned long dabr);
vcpu              180 arch/powerpc/include/asm/asm-prototypes.h long kvmppc_h_set_xdabr(struct kvm_vcpu *vcpu, unsigned long dabr,
vcpu              535 arch/powerpc/include/asm/fsl_hcalls.h 	unsigned int vcpu, unsigned int *state)
vcpu              543 arch/powerpc/include/asm/fsl_hcalls.h 	r4 = vcpu;
vcpu              565 arch/powerpc/include/asm/fsl_hcalls.h static inline unsigned int fh_enter_nap(unsigned int handle, unsigned int vcpu)
vcpu              573 arch/powerpc/include/asm/fsl_hcalls.h 	r4 = vcpu;
vcpu              590 arch/powerpc/include/asm/fsl_hcalls.h static inline unsigned int fh_exit_nap(unsigned int handle, unsigned int vcpu)
vcpu              598 arch/powerpc/include/asm/fsl_hcalls.h 	r4 = vcpu;
vcpu              145 arch/powerpc/include/asm/kvm_book3s.h extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask);
vcpu              146 arch/powerpc/include/asm/kvm_book3s.h extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
vcpu              147 arch/powerpc/include/asm/kvm_book3s.h extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
vcpu              148 arch/powerpc/include/asm/kvm_book3s.h extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
vcpu              149 arch/powerpc/include/asm/kvm_book3s.h extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
vcpu              150 arch/powerpc/include/asm/kvm_book3s.h extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
vcpu              151 arch/powerpc/include/asm/kvm_book3s.h extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
vcpu              152 arch/powerpc/include/asm/kvm_book3s.h extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
vcpu              154 arch/powerpc/include/asm/kvm_book3s.h extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
vcpu              155 arch/powerpc/include/asm/kvm_book3s.h extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
vcpu              156 arch/powerpc/include/asm/kvm_book3s.h extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size);
vcpu              157 arch/powerpc/include/asm/kvm_book3s.h extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
vcpu              159 arch/powerpc/include/asm/kvm_book3s.h 			struct kvm_vcpu *vcpu, unsigned long addr,
vcpu              163 arch/powerpc/include/asm/kvm_book3s.h extern int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu              166 arch/powerpc/include/asm/kvm_book3s.h extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
vcpu              167 arch/powerpc/include/asm/kvm_book3s.h extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu);
vcpu              169 arch/powerpc/include/asm/kvm_book3s.h extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu);
vcpu              170 arch/powerpc/include/asm/kvm_book3s.h extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);
vcpu              171 arch/powerpc/include/asm/kvm_book3s.h extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
vcpu              178 arch/powerpc/include/asm/kvm_book3s.h 			struct kvm_vcpu *vcpu,
vcpu              183 arch/powerpc/include/asm/kvm_book3s.h extern long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
vcpu              185 arch/powerpc/include/asm/kvm_book3s.h extern long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
vcpu              187 arch/powerpc/include/asm/kvm_book3s.h extern int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
vcpu              190 arch/powerpc/include/asm/kvm_book3s.h extern int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
vcpu              193 arch/powerpc/include/asm/kvm_book3s.h extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
vcpu              204 arch/powerpc/include/asm/kvm_book3s.h extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
vcpu              228 arch/powerpc/include/asm/kvm_book3s.h extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
vcpu              229 arch/powerpc/include/asm/kvm_book3s.h extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
vcpu              230 arch/powerpc/include/asm/kvm_book3s.h extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
vcpu              232 arch/powerpc/include/asm/kvm_book3s.h extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags);
vcpu              233 arch/powerpc/include/asm/kvm_book3s.h extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac);
vcpu              234 arch/powerpc/include/asm/kvm_book3s.h extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
vcpu              236 arch/powerpc/include/asm/kvm_book3s.h extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
vcpu              237 arch/powerpc/include/asm/kvm_book3s.h extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
vcpu              238 arch/powerpc/include/asm/kvm_book3s.h extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa,
vcpu              265 arch/powerpc/include/asm/kvm_book3s.h extern void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr);
vcpu              267 arch/powerpc/include/asm/kvm_book3s.h extern int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu);
vcpu              268 arch/powerpc/include/asm/kvm_book3s.h extern int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu);
vcpu              269 arch/powerpc/include/asm/kvm_book3s.h extern void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu);
vcpu              273 arch/powerpc/include/asm/kvm_book3s.h extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
vcpu              274 arch/powerpc/include/asm/kvm_book3s.h extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
vcpu              275 arch/powerpc/include/asm/kvm_book3s.h extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
vcpu              279 arch/powerpc/include/asm/kvm_book3s.h extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu);
vcpu              280 arch/powerpc/include/asm/kvm_book3s.h extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu);
vcpu              283 arch/powerpc/include/asm/kvm_book3s.h void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu);
vcpu              284 arch/powerpc/include/asm/kvm_book3s.h void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu);
vcpu              285 arch/powerpc/include/asm/kvm_book3s.h void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu);
vcpu              286 arch/powerpc/include/asm/kvm_book3s.h void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu);
vcpu              288 arch/powerpc/include/asm/kvm_book3s.h static inline void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) {}
vcpu              289 arch/powerpc/include/asm/kvm_book3s.h static inline void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) {}
vcpu              290 arch/powerpc/include/asm/kvm_book3s.h static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {}
vcpu              291 arch/powerpc/include/asm/kvm_book3s.h static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {}
vcpu              297 arch/powerpc/include/asm/kvm_book3s.h long kvmhv_set_partition_table(struct kvm_vcpu *vcpu);
vcpu              298 arch/powerpc/include/asm/kvm_book3s.h long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu);
vcpu              301 arch/powerpc/include/asm/kvm_book3s.h long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
vcpu              302 arch/powerpc/include/asm/kvm_book3s.h long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu);
vcpu              303 arch/powerpc/include/asm/kvm_book3s.h int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu,
vcpu              305 arch/powerpc/include/asm/kvm_book3s.h void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr);
vcpu              306 arch/powerpc/include/asm/kvm_book3s.h void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
vcpu              308 arch/powerpc/include/asm/kvm_book3s.h long int kvmhv_nested_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu);
vcpu              310 arch/powerpc/include/asm/kvm_book3s.h void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
vcpu              314 arch/powerpc/include/asm/kvm_book3s.h static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
vcpu              316 arch/powerpc/include/asm/kvm_book3s.h 	return vcpu->arch.book3s;
vcpu              328 arch/powerpc/include/asm/kvm_book3s.h static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
vcpu              330 arch/powerpc/include/asm/kvm_book3s.h 	vcpu->arch.regs.gpr[num] = val;
vcpu              333 arch/powerpc/include/asm/kvm_book3s.h static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
vcpu              335 arch/powerpc/include/asm/kvm_book3s.h 	return vcpu->arch.regs.gpr[num];
vcpu              338 arch/powerpc/include/asm/kvm_book3s.h static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
vcpu              340 arch/powerpc/include/asm/kvm_book3s.h 	vcpu->arch.regs.ccr = val;
vcpu              343 arch/powerpc/include/asm/kvm_book3s.h static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
vcpu              345 arch/powerpc/include/asm/kvm_book3s.h 	return vcpu->arch.regs.ccr;
vcpu              348 arch/powerpc/include/asm/kvm_book3s.h static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
vcpu              350 arch/powerpc/include/asm/kvm_book3s.h 	vcpu->arch.regs.xer = val;
vcpu              353 arch/powerpc/include/asm/kvm_book3s.h static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
vcpu              355 arch/powerpc/include/asm/kvm_book3s.h 	return vcpu->arch.regs.xer;
vcpu              358 arch/powerpc/include/asm/kvm_book3s.h static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
vcpu              360 arch/powerpc/include/asm/kvm_book3s.h 	vcpu->arch.regs.ctr = val;
vcpu              363 arch/powerpc/include/asm/kvm_book3s.h static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
vcpu              365 arch/powerpc/include/asm/kvm_book3s.h 	return vcpu->arch.regs.ctr;
vcpu              368 arch/powerpc/include/asm/kvm_book3s.h static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
vcpu              370 arch/powerpc/include/asm/kvm_book3s.h 	vcpu->arch.regs.link = val;
vcpu              373 arch/powerpc/include/asm/kvm_book3s.h static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
vcpu              375 arch/powerpc/include/asm/kvm_book3s.h 	return vcpu->arch.regs.link;
vcpu              378 arch/powerpc/include/asm/kvm_book3s.h static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
vcpu              380 arch/powerpc/include/asm/kvm_book3s.h 	vcpu->arch.regs.nip = val;
vcpu              383 arch/powerpc/include/asm/kvm_book3s.h static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
vcpu              385 arch/powerpc/include/asm/kvm_book3s.h 	return vcpu->arch.regs.nip;
vcpu              388 arch/powerpc/include/asm/kvm_book3s.h static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu);
vcpu              389 arch/powerpc/include/asm/kvm_book3s.h static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
vcpu              391 arch/powerpc/include/asm/kvm_book3s.h 	return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE);
vcpu              394 arch/powerpc/include/asm/kvm_book3s.h static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
vcpu              396 arch/powerpc/include/asm/kvm_book3s.h 	return vcpu->arch.fault_dar;
vcpu              405 arch/powerpc/include/asm/kvm_book3s.h static inline bool kvmppc_supports_magic_page(struct kvm_vcpu *vcpu)
vcpu              408 arch/powerpc/include/asm/kvm_book3s.h 	return !is_kvmppc_hv_enabled(vcpu->kvm);
vcpu              411 arch/powerpc/include/asm/kvm_book3s.h extern int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu);
vcpu              412 arch/powerpc/include/asm/kvm_book3s.h extern int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu);
vcpu               12 arch/powerpc/include/asm/kvm_book3s_32.h static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
vcpu               14 arch/powerpc/include/asm/kvm_book3s_32.h 	return vcpu->arch.shadow_vcpu;
vcpu              124 arch/powerpc/include/asm/kvm_book3s_64.h static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
vcpu              143 arch/powerpc/include/asm/kvm_book3s_64.h static inline bool kvmhv_vcpu_is_radix(struct kvm_vcpu *vcpu)
vcpu              147 arch/powerpc/include/asm/kvm_book3s_64.h 	if (vcpu->arch.nested)
vcpu              148 arch/powerpc/include/asm/kvm_book3s_64.h 		radix = vcpu->arch.nested->radix;
vcpu              150 arch/powerpc/include/asm/kvm_book3s_64.h 		radix = kvm_is_radix(vcpu->kvm);
vcpu              588 arch/powerpc/include/asm/kvm_book3s_64.h static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
vcpu              590 arch/powerpc/include/asm/kvm_book3s_64.h 	vcpu->arch.regs.ccr  = vcpu->arch.cr_tm;
vcpu              591 arch/powerpc/include/asm/kvm_book3s_64.h 	vcpu->arch.regs.xer = vcpu->arch.xer_tm;
vcpu              592 arch/powerpc/include/asm/kvm_book3s_64.h 	vcpu->arch.regs.link  = vcpu->arch.lr_tm;
vcpu              593 arch/powerpc/include/asm/kvm_book3s_64.h 	vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
vcpu              594 arch/powerpc/include/asm/kvm_book3s_64.h 	vcpu->arch.amr = vcpu->arch.amr_tm;
vcpu              595 arch/powerpc/include/asm/kvm_book3s_64.h 	vcpu->arch.ppr = vcpu->arch.ppr_tm;
vcpu              596 arch/powerpc/include/asm/kvm_book3s_64.h 	vcpu->arch.dscr = vcpu->arch.dscr_tm;
vcpu              597 arch/powerpc/include/asm/kvm_book3s_64.h 	vcpu->arch.tar = vcpu->arch.tar_tm;
vcpu              598 arch/powerpc/include/asm/kvm_book3s_64.h 	memcpy(vcpu->arch.regs.gpr, vcpu->arch.gpr_tm,
vcpu              599 arch/powerpc/include/asm/kvm_book3s_64.h 	       sizeof(vcpu->arch.regs.gpr));
vcpu              600 arch/powerpc/include/asm/kvm_book3s_64.h 	vcpu->arch.fp  = vcpu->arch.fp_tm;
vcpu              601 arch/powerpc/include/asm/kvm_book3s_64.h 	vcpu->arch.vr  = vcpu->arch.vr_tm;
vcpu              602 arch/powerpc/include/asm/kvm_book3s_64.h 	vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
vcpu              605 arch/powerpc/include/asm/kvm_book3s_64.h static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu)
vcpu              607 arch/powerpc/include/asm/kvm_book3s_64.h 	vcpu->arch.cr_tm  = vcpu->arch.regs.ccr;
vcpu              608 arch/powerpc/include/asm/kvm_book3s_64.h 	vcpu->arch.xer_tm = vcpu->arch.regs.xer;
vcpu              609 arch/powerpc/include/asm/kvm_book3s_64.h 	vcpu->arch.lr_tm  = vcpu->arch.regs.link;
vcpu              610 arch/powerpc/include/asm/kvm_book3s_64.h 	vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
vcpu              611 arch/powerpc/include/asm/kvm_book3s_64.h 	vcpu->arch.amr_tm = vcpu->arch.amr;
vcpu              612 arch/powerpc/include/asm/kvm_book3s_64.h 	vcpu->arch.ppr_tm = vcpu->arch.ppr;
vcpu              613 arch/powerpc/include/asm/kvm_book3s_64.h 	vcpu->arch.dscr_tm = vcpu->arch.dscr;
vcpu              614 arch/powerpc/include/asm/kvm_book3s_64.h 	vcpu->arch.tar_tm = vcpu->arch.tar;
vcpu              615 arch/powerpc/include/asm/kvm_book3s_64.h 	memcpy(vcpu->arch.gpr_tm, vcpu->arch.regs.gpr,
vcpu              616 arch/powerpc/include/asm/kvm_book3s_64.h 	       sizeof(vcpu->arch.regs.gpr));
vcpu              617 arch/powerpc/include/asm/kvm_book3s_64.h 	vcpu->arch.fp_tm  = vcpu->arch.fp;
vcpu              618 arch/powerpc/include/asm/kvm_book3s_64.h 	vcpu->arch.vr_tm  = vcpu->arch.vr;
vcpu              619 arch/powerpc/include/asm/kvm_book3s_64.h 	vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
vcpu               26 arch/powerpc/include/asm/kvm_booke.h static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
vcpu               28 arch/powerpc/include/asm/kvm_booke.h 	vcpu->arch.regs.gpr[num] = val;
vcpu               31 arch/powerpc/include/asm/kvm_booke.h static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
vcpu               33 arch/powerpc/include/asm/kvm_booke.h 	return vcpu->arch.regs.gpr[num];
vcpu               36 arch/powerpc/include/asm/kvm_booke.h static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
vcpu               38 arch/powerpc/include/asm/kvm_booke.h 	vcpu->arch.regs.ccr = val;
vcpu               41 arch/powerpc/include/asm/kvm_booke.h static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
vcpu               43 arch/powerpc/include/asm/kvm_booke.h 	return vcpu->arch.regs.ccr;
vcpu               46 arch/powerpc/include/asm/kvm_booke.h static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
vcpu               48 arch/powerpc/include/asm/kvm_booke.h 	vcpu->arch.regs.xer = val;
vcpu               51 arch/powerpc/include/asm/kvm_booke.h static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
vcpu               53 arch/powerpc/include/asm/kvm_booke.h 	return vcpu->arch.regs.xer;
vcpu               56 arch/powerpc/include/asm/kvm_booke.h static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
vcpu               62 arch/powerpc/include/asm/kvm_booke.h static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
vcpu               64 arch/powerpc/include/asm/kvm_booke.h 	vcpu->arch.regs.ctr = val;
vcpu               67 arch/powerpc/include/asm/kvm_booke.h static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
vcpu               69 arch/powerpc/include/asm/kvm_booke.h 	return vcpu->arch.regs.ctr;
vcpu               72 arch/powerpc/include/asm/kvm_booke.h static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
vcpu               74 arch/powerpc/include/asm/kvm_booke.h 	vcpu->arch.regs.link = val;
vcpu               77 arch/powerpc/include/asm/kvm_booke.h static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
vcpu               79 arch/powerpc/include/asm/kvm_booke.h 	return vcpu->arch.regs.link;
vcpu               82 arch/powerpc/include/asm/kvm_booke.h static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
vcpu               84 arch/powerpc/include/asm/kvm_booke.h 	vcpu->arch.regs.nip = val;
vcpu               87 arch/powerpc/include/asm/kvm_booke.h static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
vcpu               89 arch/powerpc/include/asm/kvm_booke.h 	return vcpu->arch.regs.nip;
vcpu               92 arch/powerpc/include/asm/kvm_booke.h static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
vcpu               94 arch/powerpc/include/asm/kvm_booke.h 	return vcpu->arch.fault_dear;
vcpu               97 arch/powerpc/include/asm/kvm_booke.h static inline bool kvmppc_supports_magic_page(struct kvm_vcpu *vcpu)
vcpu              393 arch/powerpc/include/asm/kvm_host.h 	void (*slbmte)(struct kvm_vcpu *vcpu, u64 rb, u64 rs);
vcpu              394 arch/powerpc/include/asm/kvm_host.h 	u64  (*slbmfee)(struct kvm_vcpu *vcpu, u64 slb_nr);
vcpu              395 arch/powerpc/include/asm/kvm_host.h 	u64  (*slbmfev)(struct kvm_vcpu *vcpu, u64 slb_nr);
vcpu              396 arch/powerpc/include/asm/kvm_host.h 	int  (*slbfee)(struct kvm_vcpu *vcpu, gva_t eaddr, ulong *ret_slb);
vcpu              397 arch/powerpc/include/asm/kvm_host.h 	void (*slbie)(struct kvm_vcpu *vcpu, u64 slb_nr);
vcpu              398 arch/powerpc/include/asm/kvm_host.h 	void (*slbia)(struct kvm_vcpu *vcpu);
vcpu              400 arch/powerpc/include/asm/kvm_host.h 	void (*mtsrin)(struct kvm_vcpu *vcpu, u32 srnum, ulong value);
vcpu              401 arch/powerpc/include/asm/kvm_host.h 	u32  (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum);
vcpu              402 arch/powerpc/include/asm/kvm_host.h 	int  (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr,
vcpu              404 arch/powerpc/include/asm/kvm_host.h 	void (*reset_msr)(struct kvm_vcpu *vcpu);
vcpu              405 arch/powerpc/include/asm/kvm_host.h 	void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large);
vcpu              406 arch/powerpc/include/asm/kvm_host.h 	int  (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid);
vcpu              407 arch/powerpc/include/asm/kvm_host.h 	u64  (*ea_to_vp)(struct kvm_vcpu *vcpu, gva_t eaddr, bool data);
vcpu              408 arch/powerpc/include/asm/kvm_host.h 	bool (*is_dcbz32)(struct kvm_vcpu *vcpu);
vcpu              831 arch/powerpc/include/asm/kvm_host.h #define VCPU_FPR(vcpu, i)	(vcpu)->arch.fp.fpr[i][TS_FPROFFSET]
vcpu              832 arch/powerpc/include/asm/kvm_host.h #define VCPU_VSX_FPR(vcpu, i, j)	((vcpu)->arch.fp.fpr[i][j])
vcpu              833 arch/powerpc/include/asm/kvm_host.h #define VCPU_VSX_VR(vcpu, i)		((vcpu)->arch.vr.vr[i])
vcpu              860 arch/powerpc/include/asm/kvm_host.h static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
vcpu              862 arch/powerpc/include/asm/kvm_host.h static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
vcpu              863 arch/powerpc/include/asm/kvm_host.h static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
vcpu              864 arch/powerpc/include/asm/kvm_host.h static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
vcpu               61 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
vcpu               62 arch/powerpc/include/asm/kvm_ppc.h extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
vcpu               65 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
vcpu               66 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu               69 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu               72 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu               75 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu               77 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu               79 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu               82 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu               86 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
vcpu               89 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
vcpu               91 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
vcpu               94 arch/powerpc/include/asm/kvm_ppc.h                                       struct kvm_vcpu *vcpu);
vcpu               95 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
vcpu               96 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
vcpu               97 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
vcpu               98 arch/powerpc/include/asm/kvm_ppc.h extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
vcpu               99 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
vcpu              100 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
vcpu              101 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
vcpu              102 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
vcpu              106 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
vcpu              108 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
vcpu              109 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
vcpu              110 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu);
vcpu              111 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu);
vcpu              112 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
vcpu              113 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
vcpu              114 arch/powerpc/include/asm/kvm_ppc.h extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
vcpu              116 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
vcpu              117 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
vcpu              118 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
vcpu              124 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
vcpu              125 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
vcpu              127 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
vcpu              130 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
vcpu              131 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
vcpu              133 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
vcpu              134 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
vcpu              135 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags);
vcpu              136 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
vcpu              137 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
vcpu              138 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
vcpu              139 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu);
vcpu              140 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
vcpu              141 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
vcpu              142 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
vcpu              144 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
vcpu              145 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
vcpu              147 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
vcpu              150 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
vcpu              151 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
vcpu              153 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
vcpu              154 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
vcpu              159 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
vcpu              160 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
vcpu              161 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
vcpu              170 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
vcpu              172 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
vcpu              189 arch/powerpc/include/asm/kvm_ppc.h extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
vcpu              191 arch/powerpc/include/asm/kvm_ppc.h extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
vcpu              194 arch/powerpc/include/asm/kvm_ppc.h extern long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
vcpu              197 arch/powerpc/include/asm/kvm_ppc.h extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
vcpu              225 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
vcpu              233 arch/powerpc/include/asm/kvm_ppc.h int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
vcpu              236 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
vcpu              246 arch/powerpc/include/asm/kvm_ppc.h void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu);
vcpu              247 arch/powerpc/include/asm/kvm_ppc.h void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu);
vcpu              266 arch/powerpc/include/asm/kvm_ppc.h 	int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
vcpu              267 arch/powerpc/include/asm/kvm_ppc.h 	int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
vcpu              268 arch/powerpc/include/asm/kvm_ppc.h 	int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
vcpu              270 arch/powerpc/include/asm/kvm_ppc.h 	int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
vcpu              272 arch/powerpc/include/asm/kvm_ppc.h 	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
vcpu              273 arch/powerpc/include/asm/kvm_ppc.h 	void (*vcpu_put)(struct kvm_vcpu *vcpu);
vcpu              274 arch/powerpc/include/asm/kvm_ppc.h 	void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
vcpu              275 arch/powerpc/include/asm/kvm_ppc.h 	int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
vcpu              277 arch/powerpc/include/asm/kvm_ppc.h 	void (*vcpu_free)(struct kvm_vcpu *vcpu);
vcpu              278 arch/powerpc/include/asm/kvm_ppc.h 	int (*check_requests)(struct kvm_vcpu *vcpu);
vcpu              294 arch/powerpc/include/asm/kvm_ppc.h 	void (*mmu_destroy)(struct kvm_vcpu *vcpu);
vcpu              302 arch/powerpc/include/asm/kvm_ppc.h 	int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu              304 arch/powerpc/include/asm/kvm_ppc.h 	int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
vcpu              305 arch/powerpc/include/asm/kvm_ppc.h 	int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
vcpu              306 arch/powerpc/include/asm/kvm_ppc.h 	void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
vcpu              318 arch/powerpc/include/asm/kvm_ppc.h 	void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr);
vcpu              320 arch/powerpc/include/asm/kvm_ppc.h 	int (*load_from_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
vcpu              322 arch/powerpc/include/asm/kvm_ppc.h 	int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
vcpu              329 arch/powerpc/include/asm/kvm_ppc.h static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
vcpu              337 arch/powerpc/include/asm/kvm_ppc.h 	if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
vcpu              338 arch/powerpc/include/asm/kvm_ppc.h 		ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
vcpu              342 arch/powerpc/include/asm/kvm_ppc.h 		fetched_inst = kvmppc_need_byteswap(vcpu) ?
vcpu              343 arch/powerpc/include/asm/kvm_ppc.h 				swab32(vcpu->arch.last_inst) :
vcpu              344 arch/powerpc/include/asm/kvm_ppc.h 				vcpu->arch.last_inst;
vcpu              346 arch/powerpc/include/asm/kvm_ppc.h 		fetched_inst = vcpu->arch.last_inst;
vcpu              416 arch/powerpc/include/asm/kvm_ppc.h int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
vcpu              417 arch/powerpc/include/asm/kvm_ppc.h int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
vcpu              419 arch/powerpc/include/asm/kvm_ppc.h int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
vcpu              420 arch/powerpc/include/asm/kvm_ppc.h int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
vcpu              422 arch/powerpc/include/asm/kvm_ppc.h int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
vcpu              423 arch/powerpc/include/asm/kvm_ppc.h int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
vcpu              424 arch/powerpc/include/asm/kvm_ppc.h int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
vcpu              425 arch/powerpc/include/asm/kvm_ppc.h int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
vcpu              427 arch/powerpc/include/asm/kvm_ppc.h void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
vcpu              551 arch/powerpc/include/asm/kvm_ppc.h static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
vcpu              553 arch/powerpc/include/asm/kvm_ppc.h 	vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
vcpu              586 arch/powerpc/include/asm/kvm_ppc.h static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
vcpu              588 arch/powerpc/include/asm/kvm_ppc.h 	kvm_vcpu_kick(vcpu);
vcpu              596 arch/powerpc/include/asm/kvm_ppc.h static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
vcpu              598 arch/powerpc/include/asm/kvm_ppc.h 	return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
vcpu              612 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
vcpu              613 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
vcpu              614 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
vcpu              615 arch/powerpc/include/asm/kvm_ppc.h extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
vcpu              616 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
vcpu              618 arch/powerpc/include/asm/kvm_ppc.h 			struct kvm_vcpu *vcpu, u32 cpu);
vcpu              624 arch/powerpc/include/asm/kvm_ppc.h extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr,
vcpu              640 arch/powerpc/include/asm/kvm_ppc.h static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
vcpu              642 arch/powerpc/include/asm/kvm_ppc.h static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
vcpu              644 arch/powerpc/include/asm/kvm_ppc.h static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
vcpu              645 arch/powerpc/include/asm/kvm_ppc.h static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
vcpu              668 arch/powerpc/include/asm/kvm_ppc.h 				    struct kvm_vcpu *vcpu, u32 cpu);
vcpu              669 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu);
vcpu              674 arch/powerpc/include/asm/kvm_ppc.h extern u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu);
vcpu              675 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
vcpu              679 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu);
vcpu              681 arch/powerpc/include/asm/kvm_ppc.h static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
vcpu              683 arch/powerpc/include/asm/kvm_ppc.h 	return vcpu->arch.irq_type == KVMPPC_IRQ_XIVE;
vcpu              687 arch/powerpc/include/asm/kvm_ppc.h 					   struct kvm_vcpu *vcpu, u32 cpu);
vcpu              688 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu);
vcpu              691 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
vcpu              693 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
vcpu              708 arch/powerpc/include/asm/kvm_ppc.h 					   struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
vcpu              709 arch/powerpc/include/asm/kvm_ppc.h static inline void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
vcpu              714 arch/powerpc/include/asm/kvm_ppc.h static inline u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) { return 0; }
vcpu              715 arch/powerpc/include/asm/kvm_ppc.h static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { return -ENOENT; }
vcpu              719 arch/powerpc/include/asm/kvm_ppc.h static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { }
vcpu              721 arch/powerpc/include/asm/kvm_ppc.h static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
vcpu              724 arch/powerpc/include/asm/kvm_ppc.h 			  struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
vcpu              725 arch/powerpc/include/asm/kvm_ppc.h static inline void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
vcpu              728 arch/powerpc/include/asm/kvm_ppc.h static inline int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
vcpu              731 arch/powerpc/include/asm/kvm_ppc.h static inline int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
vcpu              753 arch/powerpc/include/asm/kvm_ppc.h long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
vcpu              755 arch/powerpc/include/asm/kvm_ppc.h long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
vcpu              758 arch/powerpc/include/asm/kvm_ppc.h long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
vcpu              761 arch/powerpc/include/asm/kvm_ppc.h long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
vcpu              763 arch/powerpc/include/asm/kvm_ppc.h long kvmppc_h_random(struct kvm_vcpu *vcpu);
vcpu              765 arch/powerpc/include/asm/kvm_ppc.h void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
vcpu              769 arch/powerpc/include/asm/kvm_ppc.h long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
vcpu              771 arch/powerpc/include/asm/kvm_ppc.h long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
vcpu              773 arch/powerpc/include/asm/kvm_ppc.h long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu);
vcpu              774 arch/powerpc/include/asm/kvm_ppc.h long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
vcpu              777 arch/powerpc/include/asm/kvm_ppc.h long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
vcpu              779 arch/powerpc/include/asm/kvm_ppc.h long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
vcpu              781 arch/powerpc/include/asm/kvm_ppc.h long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
vcpu              783 arch/powerpc/include/asm/kvm_ppc.h long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
vcpu              785 arch/powerpc/include/asm/kvm_ppc.h long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
vcpu              787 arch/powerpc/include/asm/kvm_ppc.h unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
vcpu              788 arch/powerpc/include/asm/kvm_ppc.h unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu);
vcpu              789 arch/powerpc/include/asm/kvm_ppc.h unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server);
vcpu              790 arch/powerpc/include/asm/kvm_ppc.h int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
vcpu              792 arch/powerpc/include/asm/kvm_ppc.h int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
vcpu              793 arch/powerpc/include/asm/kvm_ppc.h int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
vcpu              794 arch/powerpc/include/asm/kvm_ppc.h void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu);
vcpu              818 arch/powerpc/include/asm/kvm_ppc.h 	void		(*vcpu_kick)(struct kvm_vcpu *vcpu);
vcpu              823 arch/powerpc/include/asm/kvm_ppc.h static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
vcpu              828 arch/powerpc/include/asm/kvm_ppc.h 	return vcpu->arch.epr;
vcpu              834 arch/powerpc/include/asm/kvm_ppc.h static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
vcpu              839 arch/powerpc/include/asm/kvm_ppc.h 	vcpu->arch.epr = epr;
vcpu              845 arch/powerpc/include/asm/kvm_ppc.h void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
vcpu              846 arch/powerpc/include/asm/kvm_ppc.h int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
vcpu              848 arch/powerpc/include/asm/kvm_ppc.h void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
vcpu              852 arch/powerpc/include/asm/kvm_ppc.h static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
vcpu              857 arch/powerpc/include/asm/kvm_ppc.h 		struct kvm_vcpu *vcpu, u32 cpu)
vcpu              863 arch/powerpc/include/asm/kvm_ppc.h 		struct kvm_vcpu *vcpu)
vcpu              869 arch/powerpc/include/asm/kvm_ppc.h int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
vcpu              871 arch/powerpc/include/asm/kvm_ppc.h int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
vcpu              901 arch/powerpc/include/asm/kvm_ppc.h static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
vcpu              905 arch/powerpc/include/asm/kvm_ppc.h 	return vcpu->arch.shared_big_endian;
vcpu              915 arch/powerpc/include/asm/kvm_ppc.h static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
vcpu              921 arch/powerpc/include/asm/kvm_ppc.h static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val)	\
vcpu              927 arch/powerpc/include/asm/kvm_ppc.h static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
vcpu              929 arch/powerpc/include/asm/kvm_ppc.h 	if (kvmppc_shared_big_endian(vcpu))				\
vcpu              930 arch/powerpc/include/asm/kvm_ppc.h 	       return be##size##_to_cpu(vcpu->arch.shared->reg);	\
vcpu              932 arch/powerpc/include/asm/kvm_ppc.h 	       return le##size##_to_cpu(vcpu->arch.shared->reg);	\
vcpu              936 arch/powerpc/include/asm/kvm_ppc.h static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val)	\
vcpu              938 arch/powerpc/include/asm/kvm_ppc.h 	if (kvmppc_shared_big_endian(vcpu))				\
vcpu              939 arch/powerpc/include/asm/kvm_ppc.h 	       vcpu->arch.shared->reg = cpu_to_be##size(val);		\
vcpu              941 arch/powerpc/include/asm/kvm_ppc.h 	       vcpu->arch.shared->reg = cpu_to_le##size(val);		\
vcpu              974 arch/powerpc/include/asm/kvm_ppc.h static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
vcpu              976 arch/powerpc/include/asm/kvm_ppc.h 	if (kvmppc_shared_big_endian(vcpu))
vcpu              977 arch/powerpc/include/asm/kvm_ppc.h 	       vcpu->arch.shared->msr = cpu_to_be64(val);
vcpu              979 arch/powerpc/include/asm/kvm_ppc.h 	       vcpu->arch.shared->msr = cpu_to_le64(val);
vcpu              988 arch/powerpc/include/asm/kvm_ppc.h static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
vcpu              990 arch/powerpc/include/asm/kvm_ppc.h 	if (kvmppc_shared_big_endian(vcpu))
vcpu              991 arch/powerpc/include/asm/kvm_ppc.h 	       return be32_to_cpu(vcpu->arch.shared->sr[nr]);
vcpu              993 arch/powerpc/include/asm/kvm_ppc.h 	       return le32_to_cpu(vcpu->arch.shared->sr[nr]);
vcpu              996 arch/powerpc/include/asm/kvm_ppc.h static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
vcpu              998 arch/powerpc/include/asm/kvm_ppc.h 	if (kvmppc_shared_big_endian(vcpu))
vcpu              999 arch/powerpc/include/asm/kvm_ppc.h 	       vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
vcpu             1001 arch/powerpc/include/asm/kvm_ppc.h 	       vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
vcpu             1026 arch/powerpc/include/asm/kvm_ppc.h static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
vcpu             1031 arch/powerpc/include/asm/kvm_ppc.h 	ea = kvmppc_get_gpr(vcpu, rb);
vcpu             1033 arch/powerpc/include/asm/kvm_ppc.h 		ea += kvmppc_get_gpr(vcpu, ra);
vcpu             1041 arch/powerpc/include/asm/kvm_ppc.h 	if (!(kvmppc_get_msr(vcpu) & msr_64bit))
vcpu               77 arch/powerpc/kvm/book3s.c void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu)
vcpu               79 arch/powerpc/kvm/book3s.c 	if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) {
vcpu               80 arch/powerpc/kvm/book3s.c 		ulong pc = kvmppc_get_pc(vcpu);
vcpu               81 arch/powerpc/kvm/book3s.c 		ulong lr = kvmppc_get_lr(vcpu);
vcpu               83 arch/powerpc/kvm/book3s.c 			kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK);
vcpu               85 arch/powerpc/kvm/book3s.c 			kvmppc_set_lr(vcpu, lr & ~SPLIT_HACK_MASK);
vcpu               86 arch/powerpc/kvm/book3s.c 		vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK;
vcpu               91 arch/powerpc/kvm/book3s.c static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
vcpu               93 arch/powerpc/kvm/book3s.c 	if (!is_kvmppc_hv_enabled(vcpu->kvm))
vcpu               94 arch/powerpc/kvm/book3s.c 		return to_book3s(vcpu)->hior;
vcpu               98 arch/powerpc/kvm/book3s.c static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
vcpu              101 arch/powerpc/kvm/book3s.c 	if (is_kvmppc_hv_enabled(vcpu->kvm))
vcpu              104 arch/powerpc/kvm/book3s.c 		kvmppc_set_int_pending(vcpu, 1);
vcpu              106 arch/powerpc/kvm/book3s.c 		kvmppc_set_int_pending(vcpu, 0);
vcpu              109 arch/powerpc/kvm/book3s.c static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
vcpu              115 arch/powerpc/kvm/book3s.c 	if (is_kvmppc_hv_enabled(vcpu->kvm))
vcpu              118 arch/powerpc/kvm/book3s.c 	crit_raw = kvmppc_get_critical(vcpu);
vcpu              119 arch/powerpc/kvm/book3s.c 	crit_r1 = kvmppc_get_gpr(vcpu, 1);
vcpu              122 arch/powerpc/kvm/book3s.c 	if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
vcpu              130 arch/powerpc/kvm/book3s.c 	crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR);
vcpu              135 arch/powerpc/kvm/book3s.c void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
vcpu              137 arch/powerpc/kvm/book3s.c 	kvmppc_unfixup_split_real(vcpu);
vcpu              138 arch/powerpc/kvm/book3s.c 	kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
vcpu              139 arch/powerpc/kvm/book3s.c 	kvmppc_set_srr1(vcpu, (kvmppc_get_msr(vcpu) & ~0x783f0000ul) | flags);
vcpu              140 arch/powerpc/kvm/book3s.c 	kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
vcpu              141 arch/powerpc/kvm/book3s.c 	vcpu->arch.mmu.reset_msr(vcpu);
vcpu              171 arch/powerpc/kvm/book3s.c void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
vcpu              174 arch/powerpc/kvm/book3s.c 	unsigned long old_pending = vcpu->arch.pending_exceptions;
vcpu              177 arch/powerpc/kvm/book3s.c 		  &vcpu->arch.pending_exceptions);
vcpu              179 arch/powerpc/kvm/book3s.c 	kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions,
vcpu              183 arch/powerpc/kvm/book3s.c void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
vcpu              185 arch/powerpc/kvm/book3s.c 	vcpu->stat.queue_intr++;
vcpu              188 arch/powerpc/kvm/book3s.c 		&vcpu->arch.pending_exceptions);
vcpu              195 arch/powerpc/kvm/book3s.c void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags)
vcpu              198 arch/powerpc/kvm/book3s.c 	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_MACHINE_CHECK, flags);
vcpu              202 arch/powerpc/kvm/book3s.c void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
vcpu              205 arch/powerpc/kvm/book3s.c 	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags);
vcpu              209 arch/powerpc/kvm/book3s.c void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
vcpu              212 arch/powerpc/kvm/book3s.c 	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, 0);
vcpu              215 arch/powerpc/kvm/book3s.c void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
vcpu              218 arch/powerpc/kvm/book3s.c 	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_ALTIVEC, 0);
vcpu              221 arch/powerpc/kvm/book3s.c void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu)
vcpu              224 arch/powerpc/kvm/book3s.c 	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_VSX, 0);
vcpu              227 arch/powerpc/kvm/book3s.c void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
vcpu              229 arch/powerpc/kvm/book3s.c 	kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
vcpu              233 arch/powerpc/kvm/book3s.c int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
vcpu              235 arch/powerpc/kvm/book3s.c 	return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
vcpu              239 arch/powerpc/kvm/book3s.c void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
vcpu              241 arch/powerpc/kvm/book3s.c 	kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
vcpu              245 arch/powerpc/kvm/book3s.c void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
vcpu              269 arch/powerpc/kvm/book3s.c 		vcpu->arch.external_oneshot = 1;
vcpu              271 arch/powerpc/kvm/book3s.c 	kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
vcpu              274 arch/powerpc/kvm/book3s.c void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
vcpu              276 arch/powerpc/kvm/book3s.c 	kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
vcpu              279 arch/powerpc/kvm/book3s.c void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar,
vcpu              282 arch/powerpc/kvm/book3s.c 	kvmppc_set_dar(vcpu, dar);
vcpu              283 arch/powerpc/kvm/book3s.c 	kvmppc_set_dsisr(vcpu, flags);
vcpu              284 arch/powerpc/kvm/book3s.c 	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, 0);
vcpu              288 arch/powerpc/kvm/book3s.c void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags)
vcpu              290 arch/powerpc/kvm/book3s.c 	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE, flags);
vcpu              294 arch/powerpc/kvm/book3s.c static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu,
vcpu              299 arch/powerpc/kvm/book3s.c 	bool crit = kvmppc_critical_section(vcpu);
vcpu              303 arch/powerpc/kvm/book3s.c 		deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
vcpu              307 arch/powerpc/kvm/book3s.c 		deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
vcpu              366 arch/powerpc/kvm/book3s.c 		kvmppc_inject_interrupt(vcpu, vec, 0);
vcpu              374 arch/powerpc/kvm/book3s.c static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
vcpu              386 arch/powerpc/kvm/book3s.c 			if (vcpu->arch.external_oneshot) {
vcpu              387 arch/powerpc/kvm/book3s.c 				vcpu->arch.external_oneshot = 0;
vcpu              396 arch/powerpc/kvm/book3s.c int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
vcpu              398 arch/powerpc/kvm/book3s.c 	unsigned long *pending = &vcpu->arch.pending_exceptions;
vcpu              399 arch/powerpc/kvm/book3s.c 	unsigned long old_pending = vcpu->arch.pending_exceptions;
vcpu              403 arch/powerpc/kvm/book3s.c 	if (vcpu->arch.pending_exceptions)
vcpu              404 arch/powerpc/kvm/book3s.c 		printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
vcpu              408 arch/powerpc/kvm/book3s.c 		if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
vcpu              409 arch/powerpc/kvm/book3s.c 		    clear_irqprio(vcpu, priority)) {
vcpu              410 arch/powerpc/kvm/book3s.c 			clear_bit(priority, &vcpu->arch.pending_exceptions);
vcpu              420 arch/powerpc/kvm/book3s.c 	kvmppc_update_int_pending(vcpu, *pending, old_pending);
vcpu              426 arch/powerpc/kvm/book3s.c kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
vcpu              429 arch/powerpc/kvm/book3s.c 	ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM;
vcpu              432 arch/powerpc/kvm/book3s.c 	if (!(kvmppc_get_msr(vcpu) & MSR_SF))
vcpu              438 arch/powerpc/kvm/book3s.c 		ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
vcpu              448 arch/powerpc/kvm/book3s.c 	return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable);
vcpu              452 arch/powerpc/kvm/book3s.c int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
vcpu              457 arch/powerpc/kvm/book3s.c 	int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR));
vcpu              461 arch/powerpc/kvm/book3s.c 		r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite);
vcpu              471 arch/powerpc/kvm/book3s.c 		if ((kvmppc_get_msr(vcpu) & (MSR_IR | MSR_DR)) == MSR_DR &&
vcpu              473 arch/powerpc/kvm/book3s.c 			if ((vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
vcpu              482 arch/powerpc/kvm/book3s.c int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
vcpu              485 arch/powerpc/kvm/book3s.c 	ulong pc = kvmppc_get_pc(vcpu);
vcpu              491 arch/powerpc/kvm/book3s.c 	r = kvmppc_ld(vcpu, &pc, sizeof(u32), inst, false);
vcpu              499 arch/powerpc/kvm/book3s.c int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu              504 arch/powerpc/kvm/book3s.c int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
vcpu              509 arch/powerpc/kvm/book3s.c void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
vcpu              513 arch/powerpc/kvm/book3s.c int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
vcpu              518 arch/powerpc/kvm/book3s.c 	vcpu_load(vcpu);
vcpu              519 arch/powerpc/kvm/book3s.c 	ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
vcpu              520 arch/powerpc/kvm/book3s.c 	vcpu_put(vcpu);
vcpu              525 arch/powerpc/kvm/book3s.c int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
vcpu              530 arch/powerpc/kvm/book3s.c 	vcpu_load(vcpu);
vcpu              531 arch/powerpc/kvm/book3s.c 	ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
vcpu              532 arch/powerpc/kvm/book3s.c 	vcpu_put(vcpu);
vcpu              537 arch/powerpc/kvm/book3s.c int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu              541 arch/powerpc/kvm/book3s.c 	regs->pc = kvmppc_get_pc(vcpu);
vcpu              542 arch/powerpc/kvm/book3s.c 	regs->cr = kvmppc_get_cr(vcpu);
vcpu              543 arch/powerpc/kvm/book3s.c 	regs->ctr = kvmppc_get_ctr(vcpu);
vcpu              544 arch/powerpc/kvm/book3s.c 	regs->lr = kvmppc_get_lr(vcpu);
vcpu              545 arch/powerpc/kvm/book3s.c 	regs->xer = kvmppc_get_xer(vcpu);
vcpu              546 arch/powerpc/kvm/book3s.c 	regs->msr = kvmppc_get_msr(vcpu);
vcpu              547 arch/powerpc/kvm/book3s.c 	regs->srr0 = kvmppc_get_srr0(vcpu);
vcpu              548 arch/powerpc/kvm/book3s.c 	regs->srr1 = kvmppc_get_srr1(vcpu);
vcpu              549 arch/powerpc/kvm/book3s.c 	regs->pid = vcpu->arch.pid;
vcpu              550 arch/powerpc/kvm/book3s.c 	regs->sprg0 = kvmppc_get_sprg0(vcpu);
vcpu              551 arch/powerpc/kvm/book3s.c 	regs->sprg1 = kvmppc_get_sprg1(vcpu);
vcpu              552 arch/powerpc/kvm/book3s.c 	regs->sprg2 = kvmppc_get_sprg2(vcpu);
vcpu              553 arch/powerpc/kvm/book3s.c 	regs->sprg3 = kvmppc_get_sprg3(vcpu);
vcpu              554 arch/powerpc/kvm/book3s.c 	regs->sprg4 = kvmppc_get_sprg4(vcpu);
vcpu              555 arch/powerpc/kvm/book3s.c 	regs->sprg5 = kvmppc_get_sprg5(vcpu);
vcpu              556 arch/powerpc/kvm/book3s.c 	regs->sprg6 = kvmppc_get_sprg6(vcpu);
vcpu              557 arch/powerpc/kvm/book3s.c 	regs->sprg7 = kvmppc_get_sprg7(vcpu);
vcpu              560 arch/powerpc/kvm/book3s.c 		regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
vcpu              565 arch/powerpc/kvm/book3s.c int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu              569 arch/powerpc/kvm/book3s.c 	kvmppc_set_pc(vcpu, regs->pc);
vcpu              570 arch/powerpc/kvm/book3s.c 	kvmppc_set_cr(vcpu, regs->cr);
vcpu              571 arch/powerpc/kvm/book3s.c 	kvmppc_set_ctr(vcpu, regs->ctr);
vcpu              572 arch/powerpc/kvm/book3s.c 	kvmppc_set_lr(vcpu, regs->lr);
vcpu              573 arch/powerpc/kvm/book3s.c 	kvmppc_set_xer(vcpu, regs->xer);
vcpu              574 arch/powerpc/kvm/book3s.c 	kvmppc_set_msr(vcpu, regs->msr);
vcpu              575 arch/powerpc/kvm/book3s.c 	kvmppc_set_srr0(vcpu, regs->srr0);
vcpu              576 arch/powerpc/kvm/book3s.c 	kvmppc_set_srr1(vcpu, regs->srr1);
vcpu              577 arch/powerpc/kvm/book3s.c 	kvmppc_set_sprg0(vcpu, regs->sprg0);
vcpu              578 arch/powerpc/kvm/book3s.c 	kvmppc_set_sprg1(vcpu, regs->sprg1);
vcpu              579 arch/powerpc/kvm/book3s.c 	kvmppc_set_sprg2(vcpu, regs->sprg2);
vcpu              580 arch/powerpc/kvm/book3s.c 	kvmppc_set_sprg3(vcpu, regs->sprg3);
vcpu              581 arch/powerpc/kvm/book3s.c 	kvmppc_set_sprg4(vcpu, regs->sprg4);
vcpu              582 arch/powerpc/kvm/book3s.c 	kvmppc_set_sprg5(vcpu, regs->sprg5);
vcpu              583 arch/powerpc/kvm/book3s.c 	kvmppc_set_sprg6(vcpu, regs->sprg6);
vcpu              584 arch/powerpc/kvm/book3s.c 	kvmppc_set_sprg7(vcpu, regs->sprg7);
vcpu              587 arch/powerpc/kvm/book3s.c 		kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
vcpu              592 arch/powerpc/kvm/book3s.c int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
vcpu              597 arch/powerpc/kvm/book3s.c int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
vcpu              602 arch/powerpc/kvm/book3s.c int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
vcpu              608 arch/powerpc/kvm/book3s.c 	r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
vcpu              613 arch/powerpc/kvm/book3s.c 			*val = get_reg_val(id, kvmppc_get_dar(vcpu));
vcpu              616 arch/powerpc/kvm/book3s.c 			*val = get_reg_val(id, kvmppc_get_dsisr(vcpu));
vcpu              620 arch/powerpc/kvm/book3s.c 			*val = get_reg_val(id, VCPU_FPR(vcpu, i));
vcpu              623 arch/powerpc/kvm/book3s.c 			*val = get_reg_val(id, vcpu->arch.fp.fpscr);
vcpu              629 arch/powerpc/kvm/book3s.c 				val->vsxval[0] = vcpu->arch.fp.fpr[i][0];
vcpu              630 arch/powerpc/kvm/book3s.c 				val->vsxval[1] = vcpu->arch.fp.fpr[i][1];
vcpu              641 arch/powerpc/kvm/book3s.c 			if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
vcpu              646 arch/powerpc/kvm/book3s.c 				*val = get_reg_val(id, kvmppc_xive_get_icp(vcpu));
vcpu              648 arch/powerpc/kvm/book3s.c 				*val = get_reg_val(id, kvmppc_xics_get_icp(vcpu));
vcpu              653 arch/powerpc/kvm/book3s.c 			if (!vcpu->arch.xive_vcpu) {
vcpu              658 arch/powerpc/kvm/book3s.c 				r = kvmppc_xive_native_get_vp(vcpu, val);
vcpu              664 arch/powerpc/kvm/book3s.c 			*val = get_reg_val(id, vcpu->arch.fscr);
vcpu              667 arch/powerpc/kvm/book3s.c 			*val = get_reg_val(id, vcpu->arch.tar);
vcpu              670 arch/powerpc/kvm/book3s.c 			*val = get_reg_val(id, vcpu->arch.ebbhr);
vcpu              673 arch/powerpc/kvm/book3s.c 			*val = get_reg_val(id, vcpu->arch.ebbrr);
vcpu              676 arch/powerpc/kvm/book3s.c 			*val = get_reg_val(id, vcpu->arch.bescr);
vcpu              679 arch/powerpc/kvm/book3s.c 			*val = get_reg_val(id, vcpu->arch.ic);
vcpu              690 arch/powerpc/kvm/book3s.c int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
vcpu              696 arch/powerpc/kvm/book3s.c 	r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
vcpu              701 arch/powerpc/kvm/book3s.c 			kvmppc_set_dar(vcpu, set_reg_val(id, *val));
vcpu              704 arch/powerpc/kvm/book3s.c 			kvmppc_set_dsisr(vcpu, set_reg_val(id, *val));
vcpu              708 arch/powerpc/kvm/book3s.c 			VCPU_FPR(vcpu, i) = set_reg_val(id, *val);
vcpu              711 arch/powerpc/kvm/book3s.c 			vcpu->arch.fp.fpscr = set_reg_val(id, *val);
vcpu              717 arch/powerpc/kvm/book3s.c 				vcpu->arch.fp.fpr[i][0] = val->vsxval[0];
vcpu              718 arch/powerpc/kvm/book3s.c 				vcpu->arch.fp.fpr[i][1] = val->vsxval[1];
vcpu              726 arch/powerpc/kvm/book3s.c 			if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
vcpu              731 arch/powerpc/kvm/book3s.c 				r = kvmppc_xive_set_icp(vcpu, set_reg_val(id, *val));
vcpu              733 arch/powerpc/kvm/book3s.c 				r = kvmppc_xics_set_icp(vcpu, set_reg_val(id, *val));
vcpu              738 arch/powerpc/kvm/book3s.c 			if (!vcpu->arch.xive_vcpu) {
vcpu              743 arch/powerpc/kvm/book3s.c 				r = kvmppc_xive_native_set_vp(vcpu, val);
vcpu              749 arch/powerpc/kvm/book3s.c 			vcpu->arch.fscr = set_reg_val(id, *val);
vcpu              752 arch/powerpc/kvm/book3s.c 			vcpu->arch.tar = set_reg_val(id, *val);
vcpu              755 arch/powerpc/kvm/book3s.c 			vcpu->arch.ebbhr = set_reg_val(id, *val);
vcpu              758 arch/powerpc/kvm/book3s.c 			vcpu->arch.ebbrr = set_reg_val(id, *val);
vcpu              761 arch/powerpc/kvm/book3s.c 			vcpu->arch.bescr = set_reg_val(id, *val);
vcpu              764 arch/powerpc/kvm/book3s.c 			vcpu->arch.ic = set_reg_val(id, *val);
vcpu              775 arch/powerpc/kvm/book3s.c void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu              777 arch/powerpc/kvm/book3s.c 	vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
vcpu              780 arch/powerpc/kvm/book3s.c void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
vcpu              782 arch/powerpc/kvm/book3s.c 	vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
vcpu              785 arch/powerpc/kvm/book3s.c void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
vcpu              787 arch/powerpc/kvm/book3s.c 	vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr);
vcpu              791 arch/powerpc/kvm/book3s.c int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
vcpu              793 arch/powerpc/kvm/book3s.c 	return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu);
vcpu              796 arch/powerpc/kvm/book3s.c int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
vcpu              802 arch/powerpc/kvm/book3s.c int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
vcpu              805 arch/powerpc/kvm/book3s.c 	vcpu_load(vcpu);
vcpu              806 arch/powerpc/kvm/book3s.c 	vcpu->guest_debug = dbg->control;
vcpu              807 arch/powerpc/kvm/book3s.c 	vcpu_put(vcpu);
vcpu              811 arch/powerpc/kvm/book3s.c void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
vcpu              813 arch/powerpc/kvm/book3s.c 	kvmppc_core_queue_dec(vcpu);
vcpu              814 arch/powerpc/kvm/book3s.c 	kvm_vcpu_kick(vcpu);
vcpu              822 arch/powerpc/kvm/book3s.c void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
vcpu              824 arch/powerpc/kvm/book3s.c 	vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
vcpu              827 arch/powerpc/kvm/book3s.c int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
vcpu              829 arch/powerpc/kvm/book3s.c 	return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
vcpu              891 arch/powerpc/kvm/book3s.c void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
vcpu              893 arch/powerpc/kvm/book3s.c 	vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
vcpu              929 arch/powerpc/kvm/book3s.c int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu)
vcpu              931 arch/powerpc/kvm/book3s.c 	unsigned long size = kvmppc_get_gpr(vcpu, 4);
vcpu              932 arch/powerpc/kvm/book3s.c 	unsigned long addr = kvmppc_get_gpr(vcpu, 5);
vcpu              940 arch/powerpc/kvm/book3s.c 	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
vcpu              941 arch/powerpc/kvm/book3s.c 	ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf);
vcpu              942 arch/powerpc/kvm/book3s.c 	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
vcpu              948 arch/powerpc/kvm/book3s.c 		kvmppc_set_gpr(vcpu, 4, *(u8 *)&buf);
vcpu              952 arch/powerpc/kvm/book3s.c 		kvmppc_set_gpr(vcpu, 4, be16_to_cpu(*(__be16 *)&buf));
vcpu              956 arch/powerpc/kvm/book3s.c 		kvmppc_set_gpr(vcpu, 4, be32_to_cpu(*(__be32 *)&buf));
vcpu              960 arch/powerpc/kvm/book3s.c 		kvmppc_set_gpr(vcpu, 4, be64_to_cpu(*(__be64 *)&buf));
vcpu              971 arch/powerpc/kvm/book3s.c int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
vcpu              973 arch/powerpc/kvm/book3s.c 	unsigned long size = kvmppc_get_gpr(vcpu, 4);
vcpu              974 arch/powerpc/kvm/book3s.c 	unsigned long addr = kvmppc_get_gpr(vcpu, 5);
vcpu              975 arch/powerpc/kvm/book3s.c 	unsigned long val = kvmppc_get_gpr(vcpu, 6);
vcpu             1001 arch/powerpc/kvm/book3s.c 	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
vcpu             1002 arch/powerpc/kvm/book3s.c 	ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf);
vcpu             1003 arch/powerpc/kvm/book3s.c 	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
vcpu               19 arch/powerpc/kvm/book3s.h extern void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu);
vcpu               20 arch/powerpc/kvm/book3s.h extern int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu               22 arch/powerpc/kvm/book3s.h extern int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu,
vcpu               24 arch/powerpc/kvm/book3s.h extern int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu,
vcpu               30 arch/powerpc/kvm/book3s.h extern void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val);
vcpu               32 arch/powerpc/kvm/book3s.h static inline void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val) {}
vcpu               40 arch/powerpc/kvm/book3s_32_mmu.c static inline bool check_debug_ip(struct kvm_vcpu *vcpu)
vcpu               43 arch/powerpc/kvm/book3s_32_mmu.c 	return vcpu->arch.regs.nip == DEBUG_MMU_PTE_IP;
vcpu               69 arch/powerpc/kvm/book3s_32_mmu.c static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
vcpu               72 arch/powerpc/kvm/book3s_32_mmu.c static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
vcpu               75 arch/powerpc/kvm/book3s_32_mmu.c static u32 find_sr(struct kvm_vcpu *vcpu, gva_t eaddr)
vcpu               77 arch/powerpc/kvm/book3s_32_mmu.c 	return kvmppc_get_sr(vcpu, (eaddr >> 28) & 0xf);
vcpu               80 arch/powerpc/kvm/book3s_32_mmu.c static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
vcpu               86 arch/powerpc/kvm/book3s_32_mmu.c 	if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data, false))
vcpu               89 arch/powerpc/kvm/book3s_32_mmu.c 	kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
vcpu               93 arch/powerpc/kvm/book3s_32_mmu.c static void kvmppc_mmu_book3s_32_reset_msr(struct kvm_vcpu *vcpu)
vcpu               95 arch/powerpc/kvm/book3s_32_mmu.c 	kvmppc_set_msr(vcpu, 0);
vcpu               98 arch/powerpc/kvm/book3s_32_mmu.c static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvm_vcpu *vcpu,
vcpu              102 arch/powerpc/kvm/book3s_32_mmu.c 	struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
vcpu              117 arch/powerpc/kvm/book3s_32_mmu.c 		kvmppc_get_pc(vcpu), eaddr, vcpu_book3s->sdr1, pteg,
vcpu              120 arch/powerpc/kvm/book3s_32_mmu.c 	r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT);
vcpu              132 arch/powerpc/kvm/book3s_32_mmu.c static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
vcpu              136 arch/powerpc/kvm/book3s_32_mmu.c 	struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
vcpu              146 arch/powerpc/kvm/book3s_32_mmu.c 		if (kvmppc_get_msr(vcpu) & MSR_PR) {
vcpu              154 arch/powerpc/kvm/book3s_32_mmu.c 		if (check_debug_ip(vcpu))
vcpu              162 arch/powerpc/kvm/book3s_32_mmu.c 			kvmppc_mmu_book3s_32_esid_to_vsid(vcpu,
vcpu              187 arch/powerpc/kvm/book3s_32_mmu.c static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
vcpu              199 arch/powerpc/kvm/book3s_32_mmu.c 	sre = find_sr(vcpu, eaddr);
vcpu              204 arch/powerpc/kvm/book3s_32_mmu.c 	pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data);
vcpu              206 arch/powerpc/kvm/book3s_32_mmu.c 	ptegp = kvmppc_mmu_book3s_32_get_pteg(vcpu, sre, eaddr, primary);
vcpu              229 arch/powerpc/kvm/book3s_32_mmu.c 			if ((sr_kp(sre) &&  (kvmppc_get_msr(vcpu) & MSR_PR)) ||
vcpu              230 arch/powerpc/kvm/book3s_32_mmu.c 			    (sr_ks(sre) && !(kvmppc_get_msr(vcpu) & MSR_PR)))
vcpu              282 arch/powerpc/kvm/book3s_32_mmu.c 	if (check_debug_ip(vcpu)) {
vcpu              284 arch/powerpc/kvm/book3s_32_mmu.c 			    to_book3s(vcpu)->sdr1, ptegp);
vcpu              295 arch/powerpc/kvm/book3s_32_mmu.c static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
vcpu              300 arch/powerpc/kvm/book3s_32_mmu.c 	ulong mp_ea = vcpu->arch.magic_page_ea;
vcpu              308 arch/powerpc/kvm/book3s_32_mmu.c 	    !(kvmppc_get_msr(vcpu) & MSR_PR)) {
vcpu              309 arch/powerpc/kvm/book3s_32_mmu.c 		pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data);
vcpu              310 arch/powerpc/kvm/book3s_32_mmu.c 		pte->raddr = vcpu->arch.magic_page_pa | (pte->raddr & 0xfff);
vcpu              319 arch/powerpc/kvm/book3s_32_mmu.c 	r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data, iswrite);
vcpu              321 arch/powerpc/kvm/book3s_32_mmu.c 		r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte,
vcpu              324 arch/powerpc/kvm/book3s_32_mmu.c 		r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte,
vcpu              331 arch/powerpc/kvm/book3s_32_mmu.c static u32 kvmppc_mmu_book3s_32_mfsrin(struct kvm_vcpu *vcpu, u32 srnum)
vcpu              333 arch/powerpc/kvm/book3s_32_mmu.c 	return kvmppc_get_sr(vcpu, srnum);
vcpu              336 arch/powerpc/kvm/book3s_32_mmu.c static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
vcpu              339 arch/powerpc/kvm/book3s_32_mmu.c 	kvmppc_set_sr(vcpu, srnum, value);
vcpu              340 arch/powerpc/kvm/book3s_32_mmu.c 	kvmppc_mmu_map_segment(vcpu, srnum << SID_SHIFT);
vcpu              343 arch/powerpc/kvm/book3s_32_mmu.c static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large)
vcpu              349 arch/powerpc/kvm/book3s_32_mmu.c 	kvm_for_each_vcpu(i, v, vcpu->kvm)
vcpu              353 arch/powerpc/kvm/book3s_32_mmu.c static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
vcpu              359 arch/powerpc/kvm/book3s_32_mmu.c 	u64 msr = kvmppc_get_msr(vcpu);
vcpu              362 arch/powerpc/kvm/book3s_32_mmu.c 		sr = find_sr(vcpu, ea);
vcpu              396 arch/powerpc/kvm/book3s_32_mmu.c static bool kvmppc_mmu_book3s_32_is_dcbz32(struct kvm_vcpu *vcpu)
vcpu              402 arch/powerpc/kvm/book3s_32_mmu.c void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu)
vcpu              404 arch/powerpc/kvm/book3s_32_mmu.c 	struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
vcpu               49 arch/powerpc/kvm/book3s_32_mmu_host.c void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
vcpu               66 arch/powerpc/kvm/book3s_32_mmu_host.c static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
vcpu               79 arch/powerpc/kvm/book3s_32_mmu_host.c static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
vcpu               84 arch/powerpc/kvm/book3s_32_mmu_host.c 	if (kvmppc_get_msr(vcpu) & MSR_PR)
vcpu               87 arch/powerpc/kvm/book3s_32_mmu_host.c 	sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
vcpu               88 arch/powerpc/kvm/book3s_32_mmu_host.c 	map = &to_book3s(vcpu)->sid_map[sid_map_mask];
vcpu               95 arch/powerpc/kvm/book3s_32_mmu_host.c 	map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
vcpu              106 arch/powerpc/kvm/book3s_32_mmu_host.c static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr,
vcpu              130 arch/powerpc/kvm/book3s_32_mmu_host.c int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
vcpu              148 arch/powerpc/kvm/book3s_32_mmu_host.c 	hpaddr = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable);
vcpu              158 arch/powerpc/kvm/book3s_32_mmu_host.c 	vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
vcpu              159 arch/powerpc/kvm/book3s_32_mmu_host.c 	map = find_sid_vsid(vcpu, vsid);
vcpu              161 arch/powerpc/kvm/book3s_32_mmu_host.c 		kvmppc_mmu_map_segment(vcpu, eaddr);
vcpu              162 arch/powerpc/kvm/book3s_32_mmu_host.c 		map = find_sid_vsid(vcpu, vsid);
vcpu              176 arch/powerpc/kvm/book3s_32_mmu_host.c 	pteg = kvmppc_mmu_get_pteg(vcpu, vsid, eaddr, primary);
vcpu              200 arch/powerpc/kvm/book3s_32_mmu_host.c 		mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
vcpu              233 arch/powerpc/kvm/book3s_32_mmu_host.c 	pte = kvmppc_mmu_hpte_cache_next(vcpu);
vcpu              251 arch/powerpc/kvm/book3s_32_mmu_host.c 	kvmppc_mmu_hpte_cache_map(vcpu, pte);
vcpu              258 arch/powerpc/kvm/book3s_32_mmu_host.c void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
vcpu              260 arch/powerpc/kvm/book3s_32_mmu_host.c 	kvmppc_mmu_pte_vflush(vcpu, pte->vpage, 0xfffffffffULL);
vcpu              263 arch/powerpc/kvm/book3s_32_mmu_host.c static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
vcpu              266 arch/powerpc/kvm/book3s_32_mmu_host.c 	struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
vcpu              270 arch/powerpc/kvm/book3s_32_mmu_host.c 	if (kvmppc_get_msr(vcpu) & MSR_PR)
vcpu              276 arch/powerpc/kvm/book3s_32_mmu_host.c 	sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
vcpu              280 arch/powerpc/kvm/book3s_32_mmu_host.c 	map = &to_book3s(vcpu)->sid_map[sid_map_mask];
vcpu              290 arch/powerpc/kvm/book3s_32_mmu_host.c 		kvmppc_mmu_pte_flush(vcpu, 0, 0);
vcpu              291 arch/powerpc/kvm/book3s_32_mmu_host.c 		kvmppc_mmu_flush_segments(vcpu);
vcpu              302 arch/powerpc/kvm/book3s_32_mmu_host.c int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
vcpu              308 arch/powerpc/kvm/book3s_32_mmu_host.c 	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
vcpu              311 arch/powerpc/kvm/book3s_32_mmu_host.c 	if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
vcpu              318 arch/powerpc/kvm/book3s_32_mmu_host.c 	map = find_sid_vsid(vcpu, gvsid);
vcpu              320 arch/powerpc/kvm/book3s_32_mmu_host.c 		map = create_sid_map(vcpu, gvsid);
vcpu              333 arch/powerpc/kvm/book3s_32_mmu_host.c void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
vcpu              336 arch/powerpc/kvm/book3s_32_mmu_host.c 	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
vcpu              345 arch/powerpc/kvm/book3s_32_mmu_host.c void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
vcpu              349 arch/powerpc/kvm/book3s_32_mmu_host.c 	kvmppc_mmu_hpte_destroy(vcpu);
vcpu              352 arch/powerpc/kvm/book3s_32_mmu_host.c 		__destroy_context(to_book3s(vcpu)->context_id[i]);
vcpu              359 arch/powerpc/kvm/book3s_32_mmu_host.c int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
vcpu              361 arch/powerpc/kvm/book3s_32_mmu_host.c 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
vcpu              385 arch/powerpc/kvm/book3s_32_mmu_host.c 	kvmppc_mmu_hpte_init(vcpu);
vcpu              394 arch/powerpc/kvm/book3s_32_mmu_host.c 		__destroy_context(to_book3s(vcpu)->context_id[j]);
vcpu               27 arch/powerpc/kvm/book3s_64_mmu.c static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu)
vcpu               29 arch/powerpc/kvm/book3s_64_mmu.c 	unsigned long msr = vcpu->arch.intr_msr;
vcpu               30 arch/powerpc/kvm/book3s_64_mmu.c 	unsigned long cur_msr = kvmppc_get_msr(vcpu);
vcpu               38 arch/powerpc/kvm/book3s_64_mmu.c 	kvmppc_set_msr(vcpu, msr);
vcpu               42 arch/powerpc/kvm/book3s_64_mmu.c 				struct kvm_vcpu *vcpu,
vcpu               49 arch/powerpc/kvm/book3s_64_mmu.c 	for (i = 0; i < vcpu->arch.slb_nr; i++) {
vcpu               52 arch/powerpc/kvm/book3s_64_mmu.c 		if (!vcpu->arch.slb[i].valid)
vcpu               55 arch/powerpc/kvm/book3s_64_mmu.c 		if (vcpu->arch.slb[i].tb)
vcpu               58 arch/powerpc/kvm/book3s_64_mmu.c 		if (vcpu->arch.slb[i].esid == cmp_esid)
vcpu               59 arch/powerpc/kvm/book3s_64_mmu.c 			return &vcpu->arch.slb[i];
vcpu               64 arch/powerpc/kvm/book3s_64_mmu.c 	for (i = 0; i < vcpu->arch.slb_nr; i++) {
vcpu               65 arch/powerpc/kvm/book3s_64_mmu.c 	    if (vcpu->arch.slb[i].vsid)
vcpu               67 arch/powerpc/kvm/book3s_64_mmu.c 			vcpu->arch.slb[i].valid ? 'v' : ' ',
vcpu               68 arch/powerpc/kvm/book3s_64_mmu.c 			vcpu->arch.slb[i].large ? 'l' : ' ',
vcpu               69 arch/powerpc/kvm/book3s_64_mmu.c 			vcpu->arch.slb[i].tb    ? 't' : ' ',
vcpu               70 arch/powerpc/kvm/book3s_64_mmu.c 			vcpu->arch.slb[i].esid,
vcpu               71 arch/powerpc/kvm/book3s_64_mmu.c 			vcpu->arch.slb[i].vsid);
vcpu               95 arch/powerpc/kvm/book3s_64_mmu.c static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
vcpu              100 arch/powerpc/kvm/book3s_64_mmu.c 	slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
vcpu              130 arch/powerpc/kvm/book3s_64_mmu.c static hva_t kvmppc_mmu_book3s_64_get_pteg(struct kvm_vcpu *vcpu,
vcpu              134 arch/powerpc/kvm/book3s_64_mmu.c 	struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
vcpu              159 arch/powerpc/kvm/book3s_64_mmu.c 	if (vcpu->arch.papr_enabled)
vcpu              162 arch/powerpc/kvm/book3s_64_mmu.c 		r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT);
vcpu              205 arch/powerpc/kvm/book3s_64_mmu.c static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
vcpu              221 arch/powerpc/kvm/book3s_64_mmu.c 	ulong mp_ea = vcpu->arch.magic_page_ea;
vcpu              226 arch/powerpc/kvm/book3s_64_mmu.c 	    !(kvmppc_get_msr(vcpu) & MSR_PR)) {
vcpu              228 arch/powerpc/kvm/book3s_64_mmu.c 		gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
vcpu              229 arch/powerpc/kvm/book3s_64_mmu.c 		gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff);
vcpu              240 arch/powerpc/kvm/book3s_64_mmu.c 	slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
vcpu              258 arch/powerpc/kvm/book3s_64_mmu.c 	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
vcpu              261 arch/powerpc/kvm/book3s_64_mmu.c 	ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu, slbe, eaddr, second);
vcpu              271 arch/powerpc/kvm/book3s_64_mmu.c 	if ((kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Kp)
vcpu              273 arch/powerpc/kvm/book3s_64_mmu.c 	else if (!(kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Ks)
vcpu              284 arch/powerpc/kvm/book3s_64_mmu.c 			    (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
vcpu              309 arch/powerpc/kvm/book3s_64_mmu.c 	gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
vcpu              315 arch/powerpc/kvm/book3s_64_mmu.c 	if (unlikely(vcpu->arch.disable_kernel_nx) &&
vcpu              316 arch/powerpc/kvm/book3s_64_mmu.c 	    !(kvmppc_get_msr(vcpu) & MSR_PR))
vcpu              363 arch/powerpc/kvm/book3s_64_mmu.c 	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
vcpu              370 arch/powerpc/kvm/book3s_64_mmu.c 	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
vcpu              378 arch/powerpc/kvm/book3s_64_mmu.c static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
vcpu              390 arch/powerpc/kvm/book3s_64_mmu.c 	if (slb_nr > vcpu->arch.slb_nr)
vcpu              393 arch/powerpc/kvm/book3s_64_mmu.c 	slbe = &vcpu->arch.slb[slb_nr];
vcpu              407 arch/powerpc/kvm/book3s_64_mmu.c 		if (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE) {
vcpu              424 arch/powerpc/kvm/book3s_64_mmu.c 	kvmppc_mmu_map_segment(vcpu, esid << SID_SHIFT);
vcpu              427 arch/powerpc/kvm/book3s_64_mmu.c static int kvmppc_mmu_book3s_64_slbfee(struct kvm_vcpu *vcpu, gva_t eaddr,
vcpu              430 arch/powerpc/kvm/book3s_64_mmu.c 	struct kvmppc_slb *slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
vcpu              440 arch/powerpc/kvm/book3s_64_mmu.c static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr)
vcpu              444 arch/powerpc/kvm/book3s_64_mmu.c 	if (slb_nr > vcpu->arch.slb_nr)
vcpu              447 arch/powerpc/kvm/book3s_64_mmu.c 	slbe = &vcpu->arch.slb[slb_nr];
vcpu              452 arch/powerpc/kvm/book3s_64_mmu.c static u64 kvmppc_mmu_book3s_64_slbmfev(struct kvm_vcpu *vcpu, u64 slb_nr)
vcpu              456 arch/powerpc/kvm/book3s_64_mmu.c 	if (slb_nr > vcpu->arch.slb_nr)
vcpu              459 arch/powerpc/kvm/book3s_64_mmu.c 	slbe = &vcpu->arch.slb[slb_nr];
vcpu              464 arch/powerpc/kvm/book3s_64_mmu.c static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea)
vcpu              471 arch/powerpc/kvm/book3s_64_mmu.c 	slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
vcpu              483 arch/powerpc/kvm/book3s_64_mmu.c 	kvmppc_mmu_flush_segment(vcpu, ea & ~(seg_size - 1), seg_size);
vcpu              486 arch/powerpc/kvm/book3s_64_mmu.c static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
vcpu              492 arch/powerpc/kvm/book3s_64_mmu.c 	for (i = 1; i < vcpu->arch.slb_nr; i++) {
vcpu              493 arch/powerpc/kvm/book3s_64_mmu.c 		vcpu->arch.slb[i].valid = false;
vcpu              494 arch/powerpc/kvm/book3s_64_mmu.c 		vcpu->arch.slb[i].orige = 0;
vcpu              495 arch/powerpc/kvm/book3s_64_mmu.c 		vcpu->arch.slb[i].origv = 0;
vcpu              498 arch/powerpc/kvm/book3s_64_mmu.c 	if (kvmppc_get_msr(vcpu) & MSR_IR) {
vcpu              499 arch/powerpc/kvm/book3s_64_mmu.c 		kvmppc_mmu_flush_segments(vcpu);
vcpu              500 arch/powerpc/kvm/book3s_64_mmu.c 		kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
vcpu              504 arch/powerpc/kvm/book3s_64_mmu.c static void kvmppc_mmu_book3s_64_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
vcpu              541 arch/powerpc/kvm/book3s_64_mmu.c 	kvmppc_mmu_book3s_64_slbmte(vcpu, rs, rb);
vcpu              544 arch/powerpc/kvm/book3s_64_mmu.c static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va,
vcpu              559 arch/powerpc/kvm/book3s_64_mmu.c 	if (vcpu->arch.hflags & BOOK3S_HFLAG_NEW_TLBIE) {
vcpu              573 arch/powerpc/kvm/book3s_64_mmu.c 	kvm_for_each_vcpu(i, v, vcpu->kvm)
vcpu              578 arch/powerpc/kvm/book3s_64_mmu.c static int segment_contains_magic_page(struct kvm_vcpu *vcpu, ulong esid)
vcpu              580 arch/powerpc/kvm/book3s_64_mmu.c 	ulong mp_ea = vcpu->arch.magic_page_ea;
vcpu              582 arch/powerpc/kvm/book3s_64_mmu.c 	return mp_ea && !(kvmppc_get_msr(vcpu) & MSR_PR) &&
vcpu              587 arch/powerpc/kvm/book3s_64_mmu.c static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
vcpu              593 arch/powerpc/kvm/book3s_64_mmu.c 	ulong mp_ea = vcpu->arch.magic_page_ea;
vcpu              595 arch/powerpc/kvm/book3s_64_mmu.c 	u64 msr = kvmppc_get_msr(vcpu);
vcpu              598 arch/powerpc/kvm/book3s_64_mmu.c 		slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
vcpu              639 arch/powerpc/kvm/book3s_64_mmu.c 	    !segment_contains_magic_page(vcpu, esid))
vcpu              643 arch/powerpc/kvm/book3s_64_mmu.c 	if (kvmppc_get_msr(vcpu) & MSR_PR)
vcpu              653 arch/powerpc/kvm/book3s_64_mmu.c 	    !(kvmppc_get_msr(vcpu) & MSR_PR)) {
vcpu              661 arch/powerpc/kvm/book3s_64_mmu.c static bool kvmppc_mmu_book3s_64_is_dcbz32(struct kvm_vcpu *vcpu)
vcpu              663 arch/powerpc/kvm/book3s_64_mmu.c 	return (to_book3s(vcpu)->hid[5] & 0x80);
vcpu              666 arch/powerpc/kvm/book3s_64_mmu.c void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu)
vcpu              668 arch/powerpc/kvm/book3s_64_mmu.c 	struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
vcpu              685 arch/powerpc/kvm/book3s_64_mmu.c 	vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
vcpu               23 arch/powerpc/kvm/book3s_64_mmu_host.c void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
vcpu               32 arch/powerpc/kvm/book3s_64_mmu_host.c static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
vcpu               45 arch/powerpc/kvm/book3s_64_mmu_host.c static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
vcpu               50 arch/powerpc/kvm/book3s_64_mmu_host.c 	if (kvmppc_get_msr(vcpu) & MSR_PR)
vcpu               53 arch/powerpc/kvm/book3s_64_mmu_host.c 	sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
vcpu               54 arch/powerpc/kvm/book3s_64_mmu_host.c 	map = &to_book3s(vcpu)->sid_map[sid_map_mask];
vcpu               60 arch/powerpc/kvm/book3s_64_mmu_host.c 	map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
vcpu               70 arch/powerpc/kvm/book3s_64_mmu_host.c int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
vcpu               86 arch/powerpc/kvm/book3s_64_mmu_host.c 	struct kvm *kvm = vcpu->kvm;
vcpu               96 arch/powerpc/kvm/book3s_64_mmu_host.c 	pfn = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable);
vcpu              106 arch/powerpc/kvm/book3s_64_mmu_host.c 	vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
vcpu              107 arch/powerpc/kvm/book3s_64_mmu_host.c 	map = find_sid_vsid(vcpu, vsid);
vcpu              109 arch/powerpc/kvm/book3s_64_mmu_host.c 		ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr);
vcpu              111 arch/powerpc/kvm/book3s_64_mmu_host.c 		map = find_sid_vsid(vcpu, vsid);
vcpu              127 arch/powerpc/kvm/book3s_64_mmu_host.c 		mark_page_dirty(vcpu->kvm, gfn);
vcpu              149 arch/powerpc/kvm/book3s_64_mmu_host.c 	cpte = kvmppc_mmu_hpte_cache_next(vcpu);
vcpu              198 arch/powerpc/kvm/book3s_64_mmu_host.c 		kvmppc_mmu_hpte_cache_map(vcpu, cpte);
vcpu              212 arch/powerpc/kvm/book3s_64_mmu_host.c void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
vcpu              217 arch/powerpc/kvm/book3s_64_mmu_host.c 	vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid);
vcpu              220 arch/powerpc/kvm/book3s_64_mmu_host.c 	kvmppc_mmu_pte_vflush(vcpu, pte->vpage, mask);
vcpu              223 arch/powerpc/kvm/book3s_64_mmu_host.c static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
vcpu              227 arch/powerpc/kvm/book3s_64_mmu_host.c 	struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
vcpu              231 arch/powerpc/kvm/book3s_64_mmu_host.c 	if (kvmppc_get_msr(vcpu) & MSR_PR)
vcpu              237 arch/powerpc/kvm/book3s_64_mmu_host.c 	sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
vcpu              241 arch/powerpc/kvm/book3s_64_mmu_host.c 	map = &to_book3s(vcpu)->sid_map[sid_map_mask];
vcpu              251 arch/powerpc/kvm/book3s_64_mmu_host.c 		kvmppc_mmu_pte_flush(vcpu, 0, 0);
vcpu              252 arch/powerpc/kvm/book3s_64_mmu_host.c 		kvmppc_mmu_flush_segments(vcpu);
vcpu              269 arch/powerpc/kvm/book3s_64_mmu_host.c static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
vcpu              271 arch/powerpc/kvm/book3s_64_mmu_host.c 	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
vcpu              300 arch/powerpc/kvm/book3s_64_mmu_host.c 		kvmppc_mmu_flush_segments(vcpu);
vcpu              310 arch/powerpc/kvm/book3s_64_mmu_host.c int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
vcpu              312 arch/powerpc/kvm/book3s_64_mmu_host.c 	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
vcpu              321 arch/powerpc/kvm/book3s_64_mmu_host.c 	slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK);
vcpu              323 arch/powerpc/kvm/book3s_64_mmu_host.c 	if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
vcpu              330 arch/powerpc/kvm/book3s_64_mmu_host.c 	map = find_sid_vsid(vcpu, gvsid);
vcpu              332 arch/powerpc/kvm/book3s_64_mmu_host.c 		map = create_sid_map(vcpu, gvsid);
vcpu              356 arch/powerpc/kvm/book3s_64_mmu_host.c void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong ea, ulong seg_size)
vcpu              358 arch/powerpc/kvm/book3s_64_mmu_host.c 	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
vcpu              373 arch/powerpc/kvm/book3s_64_mmu_host.c void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
vcpu              375 arch/powerpc/kvm/book3s_64_mmu_host.c 	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
vcpu              381 arch/powerpc/kvm/book3s_64_mmu_host.c void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
vcpu              383 arch/powerpc/kvm/book3s_64_mmu_host.c 	kvmppc_mmu_hpte_destroy(vcpu);
vcpu              384 arch/powerpc/kvm/book3s_64_mmu_host.c 	__destroy_context(to_book3s(vcpu)->context_id[0]);
vcpu              387 arch/powerpc/kvm/book3s_64_mmu_host.c int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
vcpu              389 arch/powerpc/kvm/book3s_64_mmu_host.c 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
vcpu              402 arch/powerpc/kvm/book3s_64_mmu_host.c 	kvmppc_mmu_hpte_init(vcpu);
vcpu              204 arch/powerpc/kvm/book3s_64_mmu_hv.c void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
vcpu              215 arch/powerpc/kvm/book3s_64_mmu_hv.c 	struct kvm *kvm = vcpu->kvm;
vcpu              278 arch/powerpc/kvm/book3s_64_mmu_hv.c static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
vcpu              280 arch/powerpc/kvm/book3s_64_mmu_hv.c 	unsigned long msr = vcpu->arch.intr_msr;
vcpu              283 arch/powerpc/kvm/book3s_64_mmu_hv.c 	if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr))
vcpu              286 arch/powerpc/kvm/book3s_64_mmu_hv.c 		msr |= vcpu->arch.shregs.msr & MSR_TS_MASK;
vcpu              287 arch/powerpc/kvm/book3s_64_mmu_hv.c 	kvmppc_set_msr(vcpu, msr);
vcpu              310 arch/powerpc/kvm/book3s_64_mmu_hv.c static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu,
vcpu              316 arch/powerpc/kvm/book3s_64_mmu_hv.c 	for (i = 0; i < vcpu->arch.slb_nr; i++) {
vcpu              317 arch/powerpc/kvm/book3s_64_mmu_hv.c 		if (!(vcpu->arch.slb[i].orige & SLB_ESID_V))
vcpu              320 arch/powerpc/kvm/book3s_64_mmu_hv.c 		if (vcpu->arch.slb[i].origv & SLB_VSID_B_1T)
vcpu              325 arch/powerpc/kvm/book3s_64_mmu_hv.c 		if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0)
vcpu              326 arch/powerpc/kvm/book3s_64_mmu_hv.c 			return &vcpu->arch.slb[i];
vcpu              340 arch/powerpc/kvm/book3s_64_mmu_hv.c static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
vcpu              343 arch/powerpc/kvm/book3s_64_mmu_hv.c 	struct kvm *kvm = vcpu->kvm;
vcpu              350 arch/powerpc/kvm/book3s_64_mmu_hv.c 	int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
vcpu              352 arch/powerpc/kvm/book3s_64_mmu_hv.c 	if (kvm_is_radix(vcpu->kvm))
vcpu              353 arch/powerpc/kvm/book3s_64_mmu_hv.c 		return kvmppc_mmu_radix_xlate(vcpu, eaddr, gpte, data, iswrite);
vcpu              357 arch/powerpc/kvm/book3s_64_mmu_hv.c 		slbe = kvmppc_mmu_book3s_hv_find_slbe(vcpu, eaddr);
vcpu              363 arch/powerpc/kvm/book3s_64_mmu_hv.c 		slb_v = vcpu->kvm->arch.vrma_slb_v;
vcpu              388 arch/powerpc/kvm/book3s_64_mmu_hv.c 	key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
vcpu              398 arch/powerpc/kvm/book3s_64_mmu_hv.c 		int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr);
vcpu              428 arch/powerpc/kvm/book3s_64_mmu_hv.c int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu              441 arch/powerpc/kvm/book3s_64_mmu_hv.c 		idx = srcu_read_lock(&vcpu->kvm->srcu);
vcpu              442 arch/powerpc/kvm/book3s_64_mmu_hv.c 		ret = kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, (gpa_t) gpa, 0,
vcpu              444 arch/powerpc/kvm/book3s_64_mmu_hv.c 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
vcpu              446 arch/powerpc/kvm/book3s_64_mmu_hv.c 			kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
vcpu              454 arch/powerpc/kvm/book3s_64_mmu_hv.c 	if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) !=
vcpu              486 arch/powerpc/kvm/book3s_64_mmu_hv.c 	vcpu->arch.paddr_accessed = gpa;
vcpu              487 arch/powerpc/kvm/book3s_64_mmu_hv.c 	vcpu->arch.vaddr_accessed = ea;
vcpu              488 arch/powerpc/kvm/book3s_64_mmu_hv.c 	return kvmppc_emulate_mmio(run, vcpu);
vcpu              491 arch/powerpc/kvm/book3s_64_mmu_hv.c int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu              494 arch/powerpc/kvm/book3s_64_mmu_hv.c 	struct kvm *kvm = vcpu->kvm;
vcpu              513 arch/powerpc/kvm/book3s_64_mmu_hv.c 		return kvmppc_book3s_radix_page_fault(run, vcpu, ea, dsisr);
vcpu              521 arch/powerpc/kvm/book3s_64_mmu_hv.c 	if (ea != vcpu->arch.pgfault_addr)
vcpu              524 arch/powerpc/kvm/book3s_64_mmu_hv.c 	if (vcpu->arch.pgfault_cache) {
vcpu              526 arch/powerpc/kvm/book3s_64_mmu_hv.c 		if (mmio_update == vcpu->arch.pgfault_cache->mmio_update) {
vcpu              527 arch/powerpc/kvm/book3s_64_mmu_hv.c 			r = vcpu->arch.pgfault_cache->rpte;
vcpu              528 arch/powerpc/kvm/book3s_64_mmu_hv.c 			psize = kvmppc_actual_pgsz(vcpu->arch.pgfault_hpte[0],
vcpu              533 arch/powerpc/kvm/book3s_64_mmu_hv.c 			return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
vcpu              537 arch/powerpc/kvm/book3s_64_mmu_hv.c 	index = vcpu->arch.pgfault_index;
vcpu              553 arch/powerpc/kvm/book3s_64_mmu_hv.c 	if (hpte[0] != vcpu->arch.pgfault_hpte[0] ||
vcpu              554 arch/powerpc/kvm/book3s_64_mmu_hv.c 	    hpte[1] != vcpu->arch.pgfault_hpte[1])
vcpu              565 arch/powerpc/kvm/book3s_64_mmu_hv.c 	trace_kvm_page_fault_enter(vcpu, hpte, memslot, ea, dsisr);
vcpu              569 arch/powerpc/kvm/book3s_64_mmu_hv.c 		return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
vcpu              692 arch/powerpc/kvm/book3s_64_mmu_hv.c 	if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) {
vcpu              725 arch/powerpc/kvm/book3s_64_mmu_hv.c 	trace_kvm_page_fault_exit(vcpu, hpte, ret);
vcpu             2157 arch/powerpc/kvm/book3s_64_mmu_hv.c void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
vcpu             2159 arch/powerpc/kvm/book3s_64_mmu_hv.c 	struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
vcpu             2161 arch/powerpc/kvm/book3s_64_mmu_hv.c 	vcpu->arch.slb_nr = 32;		/* POWER7/POWER8 */
vcpu             2166 arch/powerpc/kvm/book3s_64_mmu_hv.c 	vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
vcpu               84 arch/powerpc/kvm/book3s_64_mmu_radix.c static long kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
vcpu               87 arch/powerpc/kvm/book3s_64_mmu_radix.c 	int lpid = vcpu->kvm->arch.lpid;
vcpu               88 arch/powerpc/kvm/book3s_64_mmu_radix.c 	int pid = vcpu->arch.pid;
vcpu               95 arch/powerpc/kvm/book3s_64_mmu_radix.c 	if (vcpu->arch.nested)
vcpu               96 arch/powerpc/kvm/book3s_64_mmu_radix.c 		lpid = vcpu->arch.nested->shadow_lpid;
vcpu              107 arch/powerpc/kvm/book3s_64_mmu_radix.c long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *to,
vcpu              112 arch/powerpc/kvm/book3s_64_mmu_radix.c 	ret = kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, to, NULL, n);
vcpu              120 arch/powerpc/kvm/book3s_64_mmu_radix.c long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *from,
vcpu              123 arch/powerpc/kvm/book3s_64_mmu_radix.c 	return kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, NULL, from, n);
vcpu              127 arch/powerpc/kvm/book3s_64_mmu_radix.c int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
vcpu              131 arch/powerpc/kvm/book3s_64_mmu_radix.c 	struct kvm *kvm = vcpu->kvm;
vcpu              220 arch/powerpc/kvm/book3s_64_mmu_radix.c int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
vcpu              224 arch/powerpc/kvm/book3s_64_mmu_radix.c 	struct kvm *kvm = vcpu->kvm;
vcpu              246 arch/powerpc/kvm/book3s_64_mmu_radix.c 	return kvmppc_mmu_walk_radix_tree(vcpu, eaddr, gpte, root, pte_ret_p);
vcpu              249 arch/powerpc/kvm/book3s_64_mmu_radix.c int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
vcpu              259 arch/powerpc/kvm/book3s_64_mmu_radix.c 		pid = vcpu->arch.pid;
vcpu              268 arch/powerpc/kvm/book3s_64_mmu_radix.c 	ret = kvmppc_mmu_radix_translate_table(vcpu, eaddr, gpte,
vcpu              269 arch/powerpc/kvm/book3s_64_mmu_radix.c 				vcpu->kvm->arch.process_table, pid, &pte);
vcpu              274 arch/powerpc/kvm/book3s_64_mmu_radix.c 	if (kvmppc_get_msr(vcpu) & MSR_PR) {
vcpu              283 arch/powerpc/kvm/book3s_64_mmu_radix.c 			if (vcpu->arch.amr & (1ul << 62))
vcpu              285 arch/powerpc/kvm/book3s_64_mmu_radix.c 			if (vcpu->arch.amr & (1ul << 63))
vcpu              287 arch/powerpc/kvm/book3s_64_mmu_radix.c 			if (vcpu->arch.iamr & (1ul << 62))
vcpu              766 arch/powerpc/kvm/book3s_64_mmu_radix.c int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
vcpu              772 arch/powerpc/kvm/book3s_64_mmu_radix.c 	struct kvm *kvm = vcpu->kvm;
vcpu              817 arch/powerpc/kvm/book3s_64_mmu_radix.c 	ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
vcpu              889 arch/powerpc/kvm/book3s_64_mmu_radix.c int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu              892 arch/powerpc/kvm/book3s_64_mmu_radix.c 	struct kvm *kvm = vcpu->kvm;
vcpu              907 arch/powerpc/kvm/book3s_64_mmu_radix.c 		kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
vcpu              912 arch/powerpc/kvm/book3s_64_mmu_radix.c 	gpa = vcpu->arch.fault_gpa & ~0xfffUL;
vcpu              929 arch/powerpc/kvm/book3s_64_mmu_radix.c 			kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
vcpu              932 arch/powerpc/kvm/book3s_64_mmu_radix.c 		return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, writing);
vcpu              938 arch/powerpc/kvm/book3s_64_mmu_radix.c 			kvmppc_core_queue_data_storage(vcpu, ea, DSISR_ISSTORE |
vcpu              959 arch/powerpc/kvm/book3s_64_mmu_radix.c 	ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot, writing,
vcpu              540 arch/powerpc/kvm/book3s_64_vio.c long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
vcpu              552 arch/powerpc/kvm/book3s_64_vio.c 	stt = kvmppc_find_table(vcpu->kvm, liobn);
vcpu              560 arch/powerpc/kvm/book3s_64_vio.c 	idx = srcu_read_lock(&vcpu->kvm->srcu);
vcpu              568 arch/powerpc/kvm/book3s_64_vio.c 	if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
vcpu              577 arch/powerpc/kvm/book3s_64_vio.c 			ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
vcpu              580 arch/powerpc/kvm/book3s_64_vio.c 			ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
vcpu              586 arch/powerpc/kvm/book3s_64_vio.c 			kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
vcpu              594 arch/powerpc/kvm/book3s_64_vio.c 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
vcpu              600 arch/powerpc/kvm/book3s_64_vio.c long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
vcpu              611 arch/powerpc/kvm/book3s_64_vio.c 	stt = kvmppc_find_table(vcpu->kvm, liobn);
vcpu              630 arch/powerpc/kvm/book3s_64_vio.c 	idx = srcu_read_lock(&vcpu->kvm->srcu);
vcpu              631 arch/powerpc/kvm/book3s_64_vio.c 	if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua)) {
vcpu              666 arch/powerpc/kvm/book3s_64_vio.c 		if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
vcpu              672 arch/powerpc/kvm/book3s_64_vio.c 			ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
vcpu              677 arch/powerpc/kvm/book3s_64_vio.c 				kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl,
vcpu              691 arch/powerpc/kvm/book3s_64_vio.c 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
vcpu              697 arch/powerpc/kvm/book3s_64_vio.c long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
vcpu              705 arch/powerpc/kvm/book3s_64_vio.c 	stt = kvmppc_find_table(vcpu->kvm, liobn);
vcpu              721 arch/powerpc/kvm/book3s_64_vio.c 			ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
vcpu              731 arch/powerpc/kvm/book3s_64_vio.c 			kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
vcpu              386 arch/powerpc/kvm/book3s_64_vio_hv.c long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
vcpu              399 arch/powerpc/kvm/book3s_64_vio_hv.c 	if (kvm_is_radix(vcpu->kvm))
vcpu              402 arch/powerpc/kvm/book3s_64_vio_hv.c 	stt = kvmppc_find_table(vcpu->kvm, liobn);
vcpu              415 arch/powerpc/kvm/book3s_64_vio_hv.c 	if ((dir != DMA_NONE) && kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
vcpu              422 arch/powerpc/kvm/book3s_64_vio_hv.c 			ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
vcpu              425 arch/powerpc/kvm/book3s_64_vio_hv.c 			ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
vcpu              431 arch/powerpc/kvm/book3s_64_vio_hv.c 			kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
vcpu              441 arch/powerpc/kvm/book3s_64_vio_hv.c static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu,
vcpu              456 arch/powerpc/kvm/book3s_64_vio_hv.c 	ptep = __find_linux_pte(vcpu->arch.pgdir, ua, NULL, &shift);
vcpu              477 arch/powerpc/kvm/book3s_64_vio_hv.c long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
vcpu              489 arch/powerpc/kvm/book3s_64_vio_hv.c 	if (kvm_is_radix(vcpu->kvm))
vcpu              492 arch/powerpc/kvm/book3s_64_vio_hv.c 	stt = kvmppc_find_table(vcpu->kvm, liobn);
vcpu              511 arch/powerpc/kvm/book3s_64_vio_hv.c 	if (mm_iommu_preregistered(vcpu->kvm->mm)) {
vcpu              519 arch/powerpc/kvm/book3s_64_vio_hv.c 		if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL))
vcpu              522 arch/powerpc/kvm/book3s_64_vio_hv.c 		mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
vcpu              535 arch/powerpc/kvm/book3s_64_vio_hv.c 		if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
vcpu              551 arch/powerpc/kvm/book3s_64_vio_hv.c 		if (kvmppc_rm_ua_to_hpa(vcpu, ua, &tces)) {
vcpu              569 arch/powerpc/kvm/book3s_64_vio_hv.c 		if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) {
vcpu              575 arch/powerpc/kvm/book3s_64_vio_hv.c 			ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
vcpu              580 arch/powerpc/kvm/book3s_64_vio_hv.c 				kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl,
vcpu              600 arch/powerpc/kvm/book3s_64_vio_hv.c long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
vcpu              609 arch/powerpc/kvm/book3s_64_vio_hv.c 	if (kvm_is_radix(vcpu->kvm))
vcpu              612 arch/powerpc/kvm/book3s_64_vio_hv.c 	stt = kvmppc_find_table(vcpu->kvm, liobn);
vcpu              628 arch/powerpc/kvm/book3s_64_vio_hv.c 			ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
vcpu              638 arch/powerpc/kvm/book3s_64_vio_hv.c 			kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
vcpu              653 arch/powerpc/kvm/book3s_64_vio_hv.c long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
vcpu              662 arch/powerpc/kvm/book3s_64_vio_hv.c 	stt = kvmppc_find_table(vcpu->kvm, liobn);
vcpu              673 arch/powerpc/kvm/book3s_64_vio_hv.c 		vcpu->arch.regs.gpr[4] = 0;
vcpu              678 arch/powerpc/kvm/book3s_64_vio_hv.c 	vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE];
vcpu               74 arch/powerpc/kvm/book3s_emulate.c static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level)
vcpu               77 arch/powerpc/kvm/book3s_emulate.c 	if (vcpu->arch.papr_enabled && (level > PRIV_SUPER))
vcpu               81 arch/powerpc/kvm/book3s_emulate.c 	if ((kvmppc_get_msr(vcpu) & MSR_PR) && level > PRIV_PROBLEM)
vcpu               88 arch/powerpc/kvm/book3s_emulate.c static inline void kvmppc_copyto_vcpu_tm(struct kvm_vcpu *vcpu)
vcpu               90 arch/powerpc/kvm/book3s_emulate.c 	memcpy(&vcpu->arch.gpr_tm[0], &vcpu->arch.regs.gpr[0],
vcpu               91 arch/powerpc/kvm/book3s_emulate.c 			sizeof(vcpu->arch.gpr_tm));
vcpu               92 arch/powerpc/kvm/book3s_emulate.c 	memcpy(&vcpu->arch.fp_tm, &vcpu->arch.fp,
vcpu               94 arch/powerpc/kvm/book3s_emulate.c 	memcpy(&vcpu->arch.vr_tm, &vcpu->arch.vr,
vcpu               96 arch/powerpc/kvm/book3s_emulate.c 	vcpu->arch.ppr_tm = vcpu->arch.ppr;
vcpu               97 arch/powerpc/kvm/book3s_emulate.c 	vcpu->arch.dscr_tm = vcpu->arch.dscr;
vcpu               98 arch/powerpc/kvm/book3s_emulate.c 	vcpu->arch.amr_tm = vcpu->arch.amr;
vcpu               99 arch/powerpc/kvm/book3s_emulate.c 	vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
vcpu              100 arch/powerpc/kvm/book3s_emulate.c 	vcpu->arch.tar_tm = vcpu->arch.tar;
vcpu              101 arch/powerpc/kvm/book3s_emulate.c 	vcpu->arch.lr_tm = vcpu->arch.regs.link;
vcpu              102 arch/powerpc/kvm/book3s_emulate.c 	vcpu->arch.cr_tm = vcpu->arch.regs.ccr;
vcpu              103 arch/powerpc/kvm/book3s_emulate.c 	vcpu->arch.xer_tm = vcpu->arch.regs.xer;
vcpu              104 arch/powerpc/kvm/book3s_emulate.c 	vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
vcpu              107 arch/powerpc/kvm/book3s_emulate.c static inline void kvmppc_copyfrom_vcpu_tm(struct kvm_vcpu *vcpu)
vcpu              109 arch/powerpc/kvm/book3s_emulate.c 	memcpy(&vcpu->arch.regs.gpr[0], &vcpu->arch.gpr_tm[0],
vcpu              110 arch/powerpc/kvm/book3s_emulate.c 			sizeof(vcpu->arch.regs.gpr));
vcpu              111 arch/powerpc/kvm/book3s_emulate.c 	memcpy(&vcpu->arch.fp, &vcpu->arch.fp_tm,
vcpu              113 arch/powerpc/kvm/book3s_emulate.c 	memcpy(&vcpu->arch.vr, &vcpu->arch.vr_tm,
vcpu              115 arch/powerpc/kvm/book3s_emulate.c 	vcpu->arch.ppr = vcpu->arch.ppr_tm;
vcpu              116 arch/powerpc/kvm/book3s_emulate.c 	vcpu->arch.dscr = vcpu->arch.dscr_tm;
vcpu              117 arch/powerpc/kvm/book3s_emulate.c 	vcpu->arch.amr = vcpu->arch.amr_tm;
vcpu              118 arch/powerpc/kvm/book3s_emulate.c 	vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
vcpu              119 arch/powerpc/kvm/book3s_emulate.c 	vcpu->arch.tar = vcpu->arch.tar_tm;
vcpu              120 arch/powerpc/kvm/book3s_emulate.c 	vcpu->arch.regs.link = vcpu->arch.lr_tm;
vcpu              121 arch/powerpc/kvm/book3s_emulate.c 	vcpu->arch.regs.ccr = vcpu->arch.cr_tm;
vcpu              122 arch/powerpc/kvm/book3s_emulate.c 	vcpu->arch.regs.xer = vcpu->arch.xer_tm;
vcpu              123 arch/powerpc/kvm/book3s_emulate.c 	vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
vcpu              126 arch/powerpc/kvm/book3s_emulate.c static void kvmppc_emulate_treclaim(struct kvm_vcpu *vcpu, int ra_val)
vcpu              128 arch/powerpc/kvm/book3s_emulate.c 	unsigned long guest_msr = kvmppc_get_msr(vcpu);
vcpu              133 arch/powerpc/kvm/book3s_emulate.c 	vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) |
vcpu              140 arch/powerpc/kvm/book3s_emulate.c 	kvmppc_save_tm_pr(vcpu);
vcpu              141 arch/powerpc/kvm/book3s_emulate.c 	kvmppc_copyfrom_vcpu_tm(vcpu);
vcpu              149 arch/powerpc/kvm/book3s_emulate.c 		if (kvmppc_get_msr(vcpu) & MSR_PR)
vcpu              152 arch/powerpc/kvm/book3s_emulate.c 		if (kvmppc_get_msr(vcpu) & MSR_HV)
vcpu              155 arch/powerpc/kvm/book3s_emulate.c 		vcpu->arch.texasr = texasr;
vcpu              156 arch/powerpc/kvm/book3s_emulate.c 		vcpu->arch.tfiar = kvmppc_get_pc(vcpu);
vcpu              158 arch/powerpc/kvm/book3s_emulate.c 		mtspr(SPRN_TFIAR, vcpu->arch.tfiar);
vcpu              165 arch/powerpc/kvm/book3s_emulate.c 	kvmppc_set_msr(vcpu, guest_msr);
vcpu              168 arch/powerpc/kvm/book3s_emulate.c 	if (vcpu->arch.shadow_fscr & FSCR_TAR)
vcpu              169 arch/powerpc/kvm/book3s_emulate.c 		mtspr(SPRN_TAR, vcpu->arch.tar);
vcpu              172 arch/powerpc/kvm/book3s_emulate.c static void kvmppc_emulate_trchkpt(struct kvm_vcpu *vcpu)
vcpu              174 arch/powerpc/kvm/book3s_emulate.c 	unsigned long guest_msr = kvmppc_get_msr(vcpu);
vcpu              181 arch/powerpc/kvm/book3s_emulate.c 	kvmppc_giveup_ext(vcpu, MSR_VSX);
vcpu              182 arch/powerpc/kvm/book3s_emulate.c 	kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
vcpu              183 arch/powerpc/kvm/book3s_emulate.c 	kvmppc_copyto_vcpu_tm(vcpu);
vcpu              184 arch/powerpc/kvm/book3s_emulate.c 	kvmppc_save_tm_sprs(vcpu);
vcpu              191 arch/powerpc/kvm/book3s_emulate.c 	kvmppc_set_msr(vcpu, guest_msr);
vcpu              192 arch/powerpc/kvm/book3s_emulate.c 	kvmppc_restore_tm_pr(vcpu);
vcpu              197 arch/powerpc/kvm/book3s_emulate.c void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val)
vcpu              203 arch/powerpc/kvm/book3s_emulate.c 	unsigned long guest_msr = kvmppc_get_msr(vcpu);
vcpu              212 arch/powerpc/kvm/book3s_emulate.c 	vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) |
vcpu              216 arch/powerpc/kvm/book3s_emulate.c 	vcpu->arch.texasr = mfspr(SPRN_TEXASR);
vcpu              223 arch/powerpc/kvm/book3s_emulate.c 		vcpu->arch.texasr &= ~(TEXASR_PR | TEXASR_HV);
vcpu              225 arch/powerpc/kvm/book3s_emulate.c 			vcpu->arch.texasr |= TEXASR_PR;
vcpu              228 arch/powerpc/kvm/book3s_emulate.c 			vcpu->arch.texasr |= TEXASR_HV;
vcpu              230 arch/powerpc/kvm/book3s_emulate.c 		vcpu->arch.tfiar = kvmppc_get_pc(vcpu);
vcpu              238 arch/powerpc/kvm/book3s_emulate.c int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu              251 arch/powerpc/kvm/book3s_emulate.c 		if ((kvmppc_get_msr(vcpu) & MSR_LE) &&
vcpu              260 arch/powerpc/kvm/book3s_emulate.c 			kvmppc_set_gpr(vcpu, 3, EV_UNIMPLEMENTED);
vcpu              261 arch/powerpc/kvm/book3s_emulate.c 			kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
vcpu              269 arch/powerpc/kvm/book3s_emulate.c 			unsigned long srr1 = kvmppc_get_srr1(vcpu);
vcpu              271 arch/powerpc/kvm/book3s_emulate.c 			unsigned long cur_msr = kvmppc_get_msr(vcpu);
vcpu              285 arch/powerpc/kvm/book3s_emulate.c 			kvmppc_set_pc(vcpu, kvmppc_get_srr0(vcpu));
vcpu              286 arch/powerpc/kvm/book3s_emulate.c 			kvmppc_set_msr(vcpu, srr1);
vcpu              299 arch/powerpc/kvm/book3s_emulate.c 			kvmppc_set_gpr(vcpu, rt, kvmppc_get_msr(vcpu));
vcpu              303 arch/powerpc/kvm/book3s_emulate.c 			ulong rs_val = kvmppc_get_gpr(vcpu, rs);
vcpu              305 arch/powerpc/kvm/book3s_emulate.c 				ulong new_msr = kvmppc_get_msr(vcpu);
vcpu              308 arch/powerpc/kvm/book3s_emulate.c 				kvmppc_set_msr_fast(vcpu, new_msr);
vcpu              310 arch/powerpc/kvm/book3s_emulate.c 				kvmppc_set_msr(vcpu, rs_val);
vcpu              314 arch/powerpc/kvm/book3s_emulate.c 			kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
vcpu              321 arch/powerpc/kvm/book3s_emulate.c 			if (vcpu->arch.mmu.mfsrin) {
vcpu              323 arch/powerpc/kvm/book3s_emulate.c 				sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
vcpu              324 arch/powerpc/kvm/book3s_emulate.c 				kvmppc_set_gpr(vcpu, rt, sr);
vcpu              332 arch/powerpc/kvm/book3s_emulate.c 			srnum = (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf;
vcpu              333 arch/powerpc/kvm/book3s_emulate.c 			if (vcpu->arch.mmu.mfsrin) {
vcpu              335 arch/powerpc/kvm/book3s_emulate.c 				sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
vcpu              336 arch/powerpc/kvm/book3s_emulate.c 				kvmppc_set_gpr(vcpu, rt, sr);
vcpu              341 arch/powerpc/kvm/book3s_emulate.c 			vcpu->arch.mmu.mtsrin(vcpu,
vcpu              343 arch/powerpc/kvm/book3s_emulate.c 				kvmppc_get_gpr(vcpu, rs));
vcpu              346 arch/powerpc/kvm/book3s_emulate.c 			vcpu->arch.mmu.mtsrin(vcpu,
vcpu              347 arch/powerpc/kvm/book3s_emulate.c 				(kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf,
vcpu              348 arch/powerpc/kvm/book3s_emulate.c 				kvmppc_get_gpr(vcpu, rs));
vcpu              354 arch/powerpc/kvm/book3s_emulate.c 			ulong addr = kvmppc_get_gpr(vcpu, rb);
vcpu              355 arch/powerpc/kvm/book3s_emulate.c 			vcpu->arch.mmu.tlbie(vcpu, addr, large);
vcpu              362 arch/powerpc/kvm/book3s_emulate.c 			ulong cmd = kvmppc_get_gpr(vcpu, 3);
vcpu              365 arch/powerpc/kvm/book3s_emulate.c 		        if ((kvmppc_get_msr(vcpu) & MSR_PR) ||
vcpu              366 arch/powerpc/kvm/book3s_emulate.c 			    !vcpu->arch.papr_enabled) {
vcpu              371 arch/powerpc/kvm/book3s_emulate.c 			if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE)
vcpu              376 arch/powerpc/kvm/book3s_emulate.c 				ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
vcpu              381 arch/powerpc/kvm/book3s_emulate.c 			vcpu->arch.hcall_needed = 1;
vcpu              389 arch/powerpc/kvm/book3s_emulate.c 			if (!vcpu->arch.mmu.slbmte)
vcpu              392 arch/powerpc/kvm/book3s_emulate.c 			vcpu->arch.mmu.slbmte(vcpu,
vcpu              393 arch/powerpc/kvm/book3s_emulate.c 					kvmppc_get_gpr(vcpu, rs),
vcpu              394 arch/powerpc/kvm/book3s_emulate.c 					kvmppc_get_gpr(vcpu, rb));
vcpu              397 arch/powerpc/kvm/book3s_emulate.c 			if (!vcpu->arch.mmu.slbie)
vcpu              400 arch/powerpc/kvm/book3s_emulate.c 			vcpu->arch.mmu.slbie(vcpu,
vcpu              401 arch/powerpc/kvm/book3s_emulate.c 					kvmppc_get_gpr(vcpu, rb));
vcpu              404 arch/powerpc/kvm/book3s_emulate.c 			if (!vcpu->arch.mmu.slbia)
vcpu              407 arch/powerpc/kvm/book3s_emulate.c 			vcpu->arch.mmu.slbia(vcpu);
vcpu              410 arch/powerpc/kvm/book3s_emulate.c 			if (!(inst & 1) || !vcpu->arch.mmu.slbfee) {
vcpu              414 arch/powerpc/kvm/book3s_emulate.c 				ulong cr = kvmppc_get_cr(vcpu) & ~CR0_MASK;
vcpu              416 arch/powerpc/kvm/book3s_emulate.c 				b = kvmppc_get_gpr(vcpu, rb);
vcpu              417 arch/powerpc/kvm/book3s_emulate.c 				if (!vcpu->arch.mmu.slbfee(vcpu, b, &t))
vcpu              419 arch/powerpc/kvm/book3s_emulate.c 				kvmppc_set_gpr(vcpu, rt, t);
vcpu              421 arch/powerpc/kvm/book3s_emulate.c 				cr |= (vcpu->arch.regs.xer & 0x80000000) >>
vcpu              423 arch/powerpc/kvm/book3s_emulate.c 				kvmppc_set_cr(vcpu, cr);
vcpu              427 arch/powerpc/kvm/book3s_emulate.c 			if (!vcpu->arch.mmu.slbmfee) {
vcpu              432 arch/powerpc/kvm/book3s_emulate.c 				rb_val = kvmppc_get_gpr(vcpu, rb);
vcpu              433 arch/powerpc/kvm/book3s_emulate.c 				t = vcpu->arch.mmu.slbmfee(vcpu, rb_val);
vcpu              434 arch/powerpc/kvm/book3s_emulate.c 				kvmppc_set_gpr(vcpu, rt, t);
vcpu              438 arch/powerpc/kvm/book3s_emulate.c 			if (!vcpu->arch.mmu.slbmfev) {
vcpu              443 arch/powerpc/kvm/book3s_emulate.c 				rb_val = kvmppc_get_gpr(vcpu, rb);
vcpu              444 arch/powerpc/kvm/book3s_emulate.c 				t = vcpu->arch.mmu.slbmfev(vcpu, rb_val);
vcpu              445 arch/powerpc/kvm/book3s_emulate.c 				kvmppc_set_gpr(vcpu, rt, t);
vcpu              453 arch/powerpc/kvm/book3s_emulate.c 			ulong rb_val = kvmppc_get_gpr(vcpu, rb);
vcpu              461 arch/powerpc/kvm/book3s_emulate.c 				ra_val = kvmppc_get_gpr(vcpu, ra);
vcpu              464 arch/powerpc/kvm/book3s_emulate.c 			if (!(kvmppc_get_msr(vcpu) & MSR_SF))
vcpu              468 arch/powerpc/kvm/book3s_emulate.c 			r = kvmppc_st(vcpu, &addr, 32, zeros, true);
vcpu              471 arch/powerpc/kvm/book3s_emulate.c 				kvmppc_set_dar(vcpu, vaddr);
vcpu              472 arch/powerpc/kvm/book3s_emulate.c 				vcpu->arch.fault_dar = vaddr;
vcpu              480 arch/powerpc/kvm/book3s_emulate.c 				kvmppc_set_dsisr(vcpu, dsisr);
vcpu              481 arch/powerpc/kvm/book3s_emulate.c 				vcpu->arch.fault_dsisr = dsisr;
vcpu              483 arch/powerpc/kvm/book3s_emulate.c 				kvmppc_book3s_queue_irqprio(vcpu,
vcpu              495 arch/powerpc/kvm/book3s_emulate.c 			if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
vcpu              496 arch/powerpc/kvm/book3s_emulate.c 				kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
vcpu              501 arch/powerpc/kvm/book3s_emulate.c 			if (!(kvmppc_get_msr(vcpu) & MSR_PR)) {
vcpu              503 arch/powerpc/kvm/book3s_emulate.c 				vcpu->arch.regs.ccr = (CR0_TBEGIN_FAILURE |
vcpu              504 arch/powerpc/kvm/book3s_emulate.c 				  (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)));
vcpu              506 arch/powerpc/kvm/book3s_emulate.c 				vcpu->arch.texasr = (TEXASR_FS | TEXASR_EXACT |
vcpu              511 arch/powerpc/kvm/book3s_emulate.c 					vcpu->arch.texasr |= TEXASR_ROT;
vcpu              513 arch/powerpc/kvm/book3s_emulate.c 				if (kvmppc_get_msr(vcpu) & MSR_HV)
vcpu              514 arch/powerpc/kvm/book3s_emulate.c 					vcpu->arch.texasr |= TEXASR_HV;
vcpu              516 arch/powerpc/kvm/book3s_emulate.c 				vcpu->arch.tfhar = kvmppc_get_pc(vcpu) + 4;
vcpu              517 arch/powerpc/kvm/book3s_emulate.c 				vcpu->arch.tfiar = kvmppc_get_pc(vcpu);
vcpu              519 arch/powerpc/kvm/book3s_emulate.c 				kvmppc_restore_tm_sprs(vcpu);
vcpu              527 arch/powerpc/kvm/book3s_emulate.c 			ulong guest_msr = kvmppc_get_msr(vcpu);
vcpu              533 arch/powerpc/kvm/book3s_emulate.c 			if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
vcpu              534 arch/powerpc/kvm/book3s_emulate.c 				kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
vcpu              546 arch/powerpc/kvm/book3s_emulate.c 				ra_val = kvmppc_get_gpr(vcpu, ra);
vcpu              548 arch/powerpc/kvm/book3s_emulate.c 			kvmppc_emulate_tabort(vcpu, ra_val);
vcpu              553 arch/powerpc/kvm/book3s_emulate.c 			ulong guest_msr = kvmppc_get_msr(vcpu);
vcpu              559 arch/powerpc/kvm/book3s_emulate.c 			if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
vcpu              560 arch/powerpc/kvm/book3s_emulate.c 				kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
vcpu              568 arch/powerpc/kvm/book3s_emulate.c 				kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
vcpu              575 arch/powerpc/kvm/book3s_emulate.c 				kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
vcpu              581 arch/powerpc/kvm/book3s_emulate.c 				ra_val = kvmppc_get_gpr(vcpu, ra);
vcpu              582 arch/powerpc/kvm/book3s_emulate.c 			kvmppc_emulate_treclaim(vcpu, ra_val);
vcpu              587 arch/powerpc/kvm/book3s_emulate.c 			ulong guest_msr = kvmppc_get_msr(vcpu);
vcpu              593 arch/powerpc/kvm/book3s_emulate.c 			if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
vcpu              594 arch/powerpc/kvm/book3s_emulate.c 				kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
vcpu              602 arch/powerpc/kvm/book3s_emulate.c 				kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
vcpu              614 arch/powerpc/kvm/book3s_emulate.c 				kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
vcpu              619 arch/powerpc/kvm/book3s_emulate.c 			kvmppc_emulate_trchkpt(vcpu);
vcpu              632 arch/powerpc/kvm/book3s_emulate.c 		emulated = kvmppc_emulate_paired_single(run, vcpu);
vcpu              637 arch/powerpc/kvm/book3s_emulate.c void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper,
vcpu              657 arch/powerpc/kvm/book3s_emulate.c static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn)
vcpu              659 arch/powerpc/kvm/book3s_emulate.c 	struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
vcpu              682 arch/powerpc/kvm/book3s_emulate.c int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
vcpu              688 arch/powerpc/kvm/book3s_emulate.c 		if (!spr_allowed(vcpu, PRIV_HYPER))
vcpu              690 arch/powerpc/kvm/book3s_emulate.c 		to_book3s(vcpu)->sdr1 = spr_val;
vcpu              693 arch/powerpc/kvm/book3s_emulate.c 		kvmppc_set_dsisr(vcpu, spr_val);
vcpu              696 arch/powerpc/kvm/book3s_emulate.c 		kvmppc_set_dar(vcpu, spr_val);
vcpu              699 arch/powerpc/kvm/book3s_emulate.c 		to_book3s(vcpu)->hior = spr_val;
vcpu              706 arch/powerpc/kvm/book3s_emulate.c 		struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
vcpu              708 arch/powerpc/kvm/book3s_emulate.c 		kvmppc_set_bat(vcpu, bat, !(sprn % 2), (u32)spr_val);
vcpu              711 arch/powerpc/kvm/book3s_emulate.c 		kvmppc_mmu_pte_flush(vcpu, 0, 0);
vcpu              712 arch/powerpc/kvm/book3s_emulate.c 		kvmppc_mmu_flush_segments(vcpu);
vcpu              716 arch/powerpc/kvm/book3s_emulate.c 		to_book3s(vcpu)->hid[0] = spr_val;
vcpu              719 arch/powerpc/kvm/book3s_emulate.c 		to_book3s(vcpu)->hid[1] = spr_val;
vcpu              722 arch/powerpc/kvm/book3s_emulate.c 		to_book3s(vcpu)->hid[2] = spr_val;
vcpu              725 arch/powerpc/kvm/book3s_emulate.c 		to_book3s(vcpu)->hid[2] = spr_val;
vcpu              727 arch/powerpc/kvm/book3s_emulate.c 		switch (vcpu->arch.pvr) {
vcpu              737 arch/powerpc/kvm/book3s_emulate.c 			if (vcpu->arch.hflags & BOOK3S_HFLAG_NATIVE_PS) {
vcpu              740 arch/powerpc/kvm/book3s_emulate.c 				vcpu->arch.hflags |= BOOK3S_HFLAG_PAIRED_SINGLE;
vcpu              741 arch/powerpc/kvm/book3s_emulate.c 				kvmppc_giveup_ext(vcpu, MSR_FP);
vcpu              743 arch/powerpc/kvm/book3s_emulate.c 				vcpu->arch.hflags &= ~BOOK3S_HFLAG_PAIRED_SINGLE;
vcpu              750 arch/powerpc/kvm/book3s_emulate.c 		to_book3s(vcpu)->hid[4] = spr_val;
vcpu              753 arch/powerpc/kvm/book3s_emulate.c 		to_book3s(vcpu)->hid[5] = spr_val;
vcpu              755 arch/powerpc/kvm/book3s_emulate.c 		if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
vcpu              757 arch/powerpc/kvm/book3s_emulate.c 			vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
vcpu              767 arch/powerpc/kvm/book3s_emulate.c 		to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val;
vcpu              771 arch/powerpc/kvm/book3s_emulate.c 		kvmppc_set_fscr(vcpu, spr_val);
vcpu              774 arch/powerpc/kvm/book3s_emulate.c 		vcpu->arch.bescr = spr_val;
vcpu              777 arch/powerpc/kvm/book3s_emulate.c 		vcpu->arch.ebbhr = spr_val;
vcpu              780 arch/powerpc/kvm/book3s_emulate.c 		vcpu->arch.ebbrr = spr_val;
vcpu              789 arch/powerpc/kvm/book3s_emulate.c 		if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
vcpu              790 arch/powerpc/kvm/book3s_emulate.c 			kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
vcpu              795 arch/powerpc/kvm/book3s_emulate.c 		if (MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)) &&
vcpu              796 arch/powerpc/kvm/book3s_emulate.c 			!((MSR_TM_SUSPENDED(kvmppc_get_msr(vcpu))) &&
vcpu              802 arch/powerpc/kvm/book3s_emulate.c 			kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
vcpu              849 arch/powerpc/kvm/book3s_emulate.c 			if (kvmppc_get_msr(vcpu) & MSR_PR) {
vcpu              850 arch/powerpc/kvm/book3s_emulate.c 				kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
vcpu              854 arch/powerpc/kvm/book3s_emulate.c 			if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0) {
vcpu              855 arch/powerpc/kvm/book3s_emulate.c 				kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
vcpu              865 arch/powerpc/kvm/book3s_emulate.c int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
vcpu              875 arch/powerpc/kvm/book3s_emulate.c 		struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
vcpu              885 arch/powerpc/kvm/book3s_emulate.c 		if (!spr_allowed(vcpu, PRIV_HYPER))
vcpu              887 arch/powerpc/kvm/book3s_emulate.c 		*spr_val = to_book3s(vcpu)->sdr1;
vcpu              890 arch/powerpc/kvm/book3s_emulate.c 		*spr_val = kvmppc_get_dsisr(vcpu);
vcpu              893 arch/powerpc/kvm/book3s_emulate.c 		*spr_val = kvmppc_get_dar(vcpu);
vcpu              896 arch/powerpc/kvm/book3s_emulate.c 		*spr_val = to_book3s(vcpu)->hior;
vcpu              899 arch/powerpc/kvm/book3s_emulate.c 		*spr_val = to_book3s(vcpu)->hid[0];
vcpu              902 arch/powerpc/kvm/book3s_emulate.c 		*spr_val = to_book3s(vcpu)->hid[1];
vcpu              906 arch/powerpc/kvm/book3s_emulate.c 		*spr_val = to_book3s(vcpu)->hid[2];
vcpu              910 arch/powerpc/kvm/book3s_emulate.c 		*spr_val = to_book3s(vcpu)->hid[4];
vcpu              913 arch/powerpc/kvm/book3s_emulate.c 		*spr_val = to_book3s(vcpu)->hid[5];
vcpu              923 arch/powerpc/kvm/book3s_emulate.c 		*spr_val = vcpu->arch.purr;
vcpu              929 arch/powerpc/kvm/book3s_emulate.c 		*spr_val = vcpu->arch.spurr;
vcpu              932 arch/powerpc/kvm/book3s_emulate.c 		*spr_val = to_book3s(vcpu)->vtb;
vcpu              935 arch/powerpc/kvm/book3s_emulate.c 		*spr_val = vcpu->arch.ic;
vcpu              945 arch/powerpc/kvm/book3s_emulate.c 		*spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0];
vcpu              949 arch/powerpc/kvm/book3s_emulate.c 		*spr_val = vcpu->arch.fscr;
vcpu              952 arch/powerpc/kvm/book3s_emulate.c 		*spr_val = vcpu->arch.bescr;
vcpu              955 arch/powerpc/kvm/book3s_emulate.c 		*spr_val = vcpu->arch.ebbhr;
vcpu              958 arch/powerpc/kvm/book3s_emulate.c 		*spr_val = vcpu->arch.ebbrr;
vcpu              967 arch/powerpc/kvm/book3s_emulate.c 		if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
vcpu              968 arch/powerpc/kvm/book3s_emulate.c 			kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
vcpu             1014 arch/powerpc/kvm/book3s_emulate.c 			if (kvmppc_get_msr(vcpu) & MSR_PR) {
vcpu             1015 arch/powerpc/kvm/book3s_emulate.c 				kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
vcpu             1019 arch/powerpc/kvm/book3s_emulate.c 			if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0 ||
vcpu             1021 arch/powerpc/kvm/book3s_emulate.c 				kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
vcpu             1032 arch/powerpc/kvm/book3s_emulate.c u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst)
vcpu             1037 arch/powerpc/kvm/book3s_emulate.c ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst)
vcpu             1043 arch/powerpc/kvm/book3s_emulate.c 	return vcpu->arch.fault_dar;
vcpu             1055 arch/powerpc/kvm/book3s_emulate.c 			dar = kvmppc_get_gpr(vcpu, ra);
vcpu             1060 arch/powerpc/kvm/book3s_emulate.c 			dar = kvmppc_get_gpr(vcpu, ra);
vcpu             1061 arch/powerpc/kvm/book3s_emulate.c 		dar += kvmppc_get_gpr(vcpu, rb);
vcpu              136 arch/powerpc/kvm/book3s_hv.c static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
vcpu              137 arch/powerpc/kvm/book3s_hv.c static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
vcpu              169 arch/powerpc/kvm/book3s_hv.c 	struct kvm_vcpu *vcpu;
vcpu              172 arch/powerpc/kvm/book3s_hv.c 		vcpu = READ_ONCE(vc->runnable_threads[i]);
vcpu              173 arch/powerpc/kvm/book3s_hv.c 		if (vcpu) {
vcpu              175 arch/powerpc/kvm/book3s_hv.c 			return vcpu;
vcpu              182 arch/powerpc/kvm/book3s_hv.c #define for_each_runnable_thread(i, vcpu, vc) \
vcpu              183 arch/powerpc/kvm/book3s_hv.c 	for (i = -1; (vcpu = next_runnable_thread(vc, &i)); )
vcpu              229 arch/powerpc/kvm/book3s_hv.c static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
vcpu              234 arch/powerpc/kvm/book3s_hv.c 	wqp = kvm_arch_vcpu_wq(vcpu);
vcpu              237 arch/powerpc/kvm/book3s_hv.c 		++vcpu->stat.halt_wakeup;
vcpu              240 arch/powerpc/kvm/book3s_hv.c 	cpu = READ_ONCE(vcpu->arch.thread_cpu);
vcpu              245 arch/powerpc/kvm/book3s_hv.c 	cpu = vcpu->cpu;
vcpu              304 arch/powerpc/kvm/book3s_hv.c static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
vcpu              306 arch/powerpc/kvm/book3s_hv.c 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
vcpu              315 arch/powerpc/kvm/book3s_hv.c 	if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
vcpu              318 arch/powerpc/kvm/book3s_hv.c 	spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
vcpu              319 arch/powerpc/kvm/book3s_hv.c 	if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST &&
vcpu              320 arch/powerpc/kvm/book3s_hv.c 	    vcpu->arch.busy_preempt != TB_NIL) {
vcpu              321 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
vcpu              322 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.busy_preempt = TB_NIL;
vcpu              324 arch/powerpc/kvm/book3s_hv.c 	spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
vcpu              327 arch/powerpc/kvm/book3s_hv.c static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
vcpu              329 arch/powerpc/kvm/book3s_hv.c 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
vcpu              332 arch/powerpc/kvm/book3s_hv.c 	if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
vcpu              335 arch/powerpc/kvm/book3s_hv.c 	spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
vcpu              336 arch/powerpc/kvm/book3s_hv.c 	if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
vcpu              337 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.busy_preempt = mftb();
vcpu              338 arch/powerpc/kvm/book3s_hv.c 	spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
vcpu              341 arch/powerpc/kvm/book3s_hv.c static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
vcpu              349 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.shregs.msr = msr;
vcpu              350 arch/powerpc/kvm/book3s_hv.c 	kvmppc_end_cede(vcpu);
vcpu              353 arch/powerpc/kvm/book3s_hv.c static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
vcpu              355 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.pvr = pvr;
vcpu              361 arch/powerpc/kvm/book3s_hv.c static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
vcpu              364 arch/powerpc/kvm/book3s_hv.c 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
vcpu              414 arch/powerpc/kvm/book3s_hv.c static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
vcpu              418 arch/powerpc/kvm/book3s_hv.c 	pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id);
vcpu              420 arch/powerpc/kvm/book3s_hv.c 	       vcpu->arch.regs.nip, vcpu->arch.shregs.msr, vcpu->arch.trap);
vcpu              423 arch/powerpc/kvm/book3s_hv.c 		       r, kvmppc_get_gpr(vcpu, r),
vcpu              424 arch/powerpc/kvm/book3s_hv.c 		       r+16, kvmppc_get_gpr(vcpu, r+16));
vcpu              426 arch/powerpc/kvm/book3s_hv.c 	       vcpu->arch.regs.ctr, vcpu->arch.regs.link);
vcpu              428 arch/powerpc/kvm/book3s_hv.c 	       vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
vcpu              430 arch/powerpc/kvm/book3s_hv.c 	       vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
vcpu              432 arch/powerpc/kvm/book3s_hv.c 	       vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
vcpu              434 arch/powerpc/kvm/book3s_hv.c 	       vcpu->arch.regs.ccr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr);
vcpu              435 arch/powerpc/kvm/book3s_hv.c 	pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
vcpu              437 arch/powerpc/kvm/book3s_hv.c 	       vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
vcpu              438 arch/powerpc/kvm/book3s_hv.c 	pr_err("SLB (%d entries):\n", vcpu->arch.slb_max);
vcpu              439 arch/powerpc/kvm/book3s_hv.c 	for (r = 0; r < vcpu->arch.slb_max; ++r)
vcpu              441 arch/powerpc/kvm/book3s_hv.c 		       vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
vcpu              443 arch/powerpc/kvm/book3s_hv.c 	       vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1,
vcpu              444 arch/powerpc/kvm/book3s_hv.c 	       vcpu->arch.last_inst);
vcpu              452 arch/powerpc/kvm/book3s_hv.c static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
vcpu              458 arch/powerpc/kvm/book3s_hv.c static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v,
vcpu              464 arch/powerpc/kvm/book3s_hv.c 	spin_lock(&vcpu->arch.vpa_update_lock);
vcpu              470 arch/powerpc/kvm/book3s_hv.c 	spin_unlock(&vcpu->arch.vpa_update_lock);
vcpu              490 arch/powerpc/kvm/book3s_hv.c static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
vcpu              494 arch/powerpc/kvm/book3s_hv.c 	struct kvm *kvm = vcpu->kvm;
vcpu              607 arch/powerpc/kvm/book3s_hv.c static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
vcpu              609 arch/powerpc/kvm/book3s_hv.c 	struct kvm *kvm = vcpu->kvm;
vcpu              624 arch/powerpc/kvm/book3s_hv.c 		spin_unlock(&vcpu->arch.vpa_update_lock);
vcpu              629 arch/powerpc/kvm/book3s_hv.c 		spin_lock(&vcpu->arch.vpa_update_lock);
vcpu              657 arch/powerpc/kvm/book3s_hv.c static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
vcpu              659 arch/powerpc/kvm/book3s_hv.c 	if (!(vcpu->arch.vpa.update_pending ||
vcpu              660 arch/powerpc/kvm/book3s_hv.c 	      vcpu->arch.slb_shadow.update_pending ||
vcpu              661 arch/powerpc/kvm/book3s_hv.c 	      vcpu->arch.dtl.update_pending))
vcpu              664 arch/powerpc/kvm/book3s_hv.c 	spin_lock(&vcpu->arch.vpa_update_lock);
vcpu              665 arch/powerpc/kvm/book3s_hv.c 	if (vcpu->arch.vpa.update_pending) {
vcpu              666 arch/powerpc/kvm/book3s_hv.c 		kvmppc_update_vpa(vcpu, &vcpu->arch.vpa);
vcpu              667 arch/powerpc/kvm/book3s_hv.c 		if (vcpu->arch.vpa.pinned_addr)
vcpu              668 arch/powerpc/kvm/book3s_hv.c 			init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
vcpu              670 arch/powerpc/kvm/book3s_hv.c 	if (vcpu->arch.dtl.update_pending) {
vcpu              671 arch/powerpc/kvm/book3s_hv.c 		kvmppc_update_vpa(vcpu, &vcpu->arch.dtl);
vcpu              672 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
vcpu              673 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.dtl_index = 0;
vcpu              675 arch/powerpc/kvm/book3s_hv.c 	if (vcpu->arch.slb_shadow.update_pending)
vcpu              676 arch/powerpc/kvm/book3s_hv.c 		kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow);
vcpu              677 arch/powerpc/kvm/book3s_hv.c 	spin_unlock(&vcpu->arch.vpa_update_lock);
vcpu              698 arch/powerpc/kvm/book3s_hv.c static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
vcpu              708 arch/powerpc/kvm/book3s_hv.c 	dt = vcpu->arch.dtl_ptr;
vcpu              709 arch/powerpc/kvm/book3s_hv.c 	vpa = vcpu->arch.vpa.pinned_addr;
vcpu              712 arch/powerpc/kvm/book3s_hv.c 	stolen = core_stolen - vcpu->arch.stolen_logged;
vcpu              713 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.stolen_logged = core_stolen;
vcpu              714 arch/powerpc/kvm/book3s_hv.c 	spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
vcpu              715 arch/powerpc/kvm/book3s_hv.c 	stolen += vcpu->arch.busy_stolen;
vcpu              716 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.busy_stolen = 0;
vcpu              717 arch/powerpc/kvm/book3s_hv.c 	spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
vcpu              722 arch/powerpc/kvm/book3s_hv.c 	dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid);
vcpu              725 arch/powerpc/kvm/book3s_hv.c 	dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu));
vcpu              726 arch/powerpc/kvm/book3s_hv.c 	dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr);
vcpu              728 arch/powerpc/kvm/book3s_hv.c 	if (dt == vcpu->arch.dtl.pinned_end)
vcpu              729 arch/powerpc/kvm/book3s_hv.c 		dt = vcpu->arch.dtl.pinned_addr;
vcpu              730 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.dtl_ptr = dt;
vcpu              733 arch/powerpc/kvm/book3s_hv.c 	vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index);
vcpu              734 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.dtl.dirty = true;
vcpu              738 arch/powerpc/kvm/book3s_hv.c static bool kvmppc_doorbell_pending(struct kvm_vcpu *vcpu)
vcpu              743 arch/powerpc/kvm/book3s_hv.c 	if (vcpu->arch.doorbell_request)
vcpu              751 arch/powerpc/kvm/book3s_hv.c 	vc = vcpu->arch.vcore;
vcpu              752 arch/powerpc/kvm/book3s_hv.c 	thr = vcpu->vcpu_id - vc->first_vcpuid;
vcpu              756 arch/powerpc/kvm/book3s_hv.c static bool kvmppc_power8_compatible(struct kvm_vcpu *vcpu)
vcpu              758 arch/powerpc/kvm/book3s_hv.c 	if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207)
vcpu              760 arch/powerpc/kvm/book3s_hv.c 	if ((!vcpu->arch.vcore->arch_compat) &&
vcpu              766 arch/powerpc/kvm/book3s_hv.c static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
vcpu              772 arch/powerpc/kvm/book3s_hv.c 		if (!kvmppc_power8_compatible(vcpu))
vcpu              781 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.ciabr  = value1;
vcpu              784 arch/powerpc/kvm/book3s_hv.c 		if (!kvmppc_power8_compatible(vcpu))
vcpu              792 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.dawr  = value1;
vcpu              793 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.dawrx = value2;
vcpu              842 arch/powerpc/kvm/book3s_hv.c static long kvmppc_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
vcpu              860 arch/powerpc/kvm/book3s_hv.c 		ret = kvmppc_copy_guest(vcpu->kvm, dest, src, pg_sz);
vcpu              864 arch/powerpc/kvm/book3s_hv.c 		ret = kvm_clear_guest(vcpu->kvm, dest, pg_sz);
vcpu              896 arch/powerpc/kvm/book3s_hv.c static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
vcpu              901 arch/powerpc/kvm/book3s_hv.c 	spin_lock(&vcpu->arch.vpa_update_lock);
vcpu              902 arch/powerpc/kvm/book3s_hv.c 	lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr;
vcpu              905 arch/powerpc/kvm/book3s_hv.c 	spin_unlock(&vcpu->arch.vpa_update_lock);
vcpu              909 arch/powerpc/kvm/book3s_hv.c int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
vcpu              911 arch/powerpc/kvm/book3s_hv.c 	unsigned long req = kvmppc_get_gpr(vcpu, 3);
vcpu              918 arch/powerpc/kvm/book3s_hv.c 	    !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls))
vcpu              925 arch/powerpc/kvm/book3s_hv.c 		target = kvmppc_get_gpr(vcpu, 4);
vcpu              926 arch/powerpc/kvm/book3s_hv.c 		tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
vcpu              937 arch/powerpc/kvm/book3s_hv.c 		target = kvmppc_get_gpr(vcpu, 4);
vcpu              940 arch/powerpc/kvm/book3s_hv.c 		tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
vcpu              945 arch/powerpc/kvm/book3s_hv.c 		yield_count = kvmppc_get_gpr(vcpu, 5);
vcpu              951 arch/powerpc/kvm/book3s_hv.c 		ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
vcpu              952 arch/powerpc/kvm/book3s_hv.c 					kvmppc_get_gpr(vcpu, 5),
vcpu              953 arch/powerpc/kvm/book3s_hv.c 					kvmppc_get_gpr(vcpu, 6));
vcpu              956 arch/powerpc/kvm/book3s_hv.c 		if (list_empty(&vcpu->kvm->arch.rtas_tokens))
vcpu              959 arch/powerpc/kvm/book3s_hv.c 		idx = srcu_read_lock(&vcpu->kvm->srcu);
vcpu              960 arch/powerpc/kvm/book3s_hv.c 		rc = kvmppc_rtas_hcall(vcpu);
vcpu              961 arch/powerpc/kvm/book3s_hv.c 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
vcpu              971 arch/powerpc/kvm/book3s_hv.c 		ret = kvmppc_h_logical_ci_load(vcpu);
vcpu              976 arch/powerpc/kvm/book3s_hv.c 		ret = kvmppc_h_logical_ci_store(vcpu);
vcpu              981 arch/powerpc/kvm/book3s_hv.c 		ret = kvmppc_h_set_mode(vcpu, kvmppc_get_gpr(vcpu, 4),
vcpu              982 arch/powerpc/kvm/book3s_hv.c 					kvmppc_get_gpr(vcpu, 5),
vcpu              983 arch/powerpc/kvm/book3s_hv.c 					kvmppc_get_gpr(vcpu, 6),
vcpu              984 arch/powerpc/kvm/book3s_hv.c 					kvmppc_get_gpr(vcpu, 7));
vcpu              994 arch/powerpc/kvm/book3s_hv.c 		if (kvmppc_xics_enabled(vcpu)) {
vcpu              999 arch/powerpc/kvm/book3s_hv.c 			ret = kvmppc_xics_hcall(vcpu, req);
vcpu             1004 arch/powerpc/kvm/book3s_hv.c 		ret = kvmppc_h_set_dabr(vcpu, kvmppc_get_gpr(vcpu, 4));
vcpu             1007 arch/powerpc/kvm/book3s_hv.c 		ret = kvmppc_h_set_xdabr(vcpu, kvmppc_get_gpr(vcpu, 4),
vcpu             1008 arch/powerpc/kvm/book3s_hv.c 						kvmppc_get_gpr(vcpu, 5));
vcpu             1012 arch/powerpc/kvm/book3s_hv.c 		ret = kvmppc_h_get_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
vcpu             1013 arch/powerpc/kvm/book3s_hv.c 						kvmppc_get_gpr(vcpu, 5));
vcpu             1018 arch/powerpc/kvm/book3s_hv.c 		ret = kvmppc_h_put_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
vcpu             1019 arch/powerpc/kvm/book3s_hv.c 						kvmppc_get_gpr(vcpu, 5),
vcpu             1020 arch/powerpc/kvm/book3s_hv.c 						kvmppc_get_gpr(vcpu, 6));
vcpu             1025 arch/powerpc/kvm/book3s_hv.c 		ret = kvmppc_h_put_tce_indirect(vcpu, kvmppc_get_gpr(vcpu, 4),
vcpu             1026 arch/powerpc/kvm/book3s_hv.c 						kvmppc_get_gpr(vcpu, 5),
vcpu             1027 arch/powerpc/kvm/book3s_hv.c 						kvmppc_get_gpr(vcpu, 6),
vcpu             1028 arch/powerpc/kvm/book3s_hv.c 						kvmppc_get_gpr(vcpu, 7));
vcpu             1033 arch/powerpc/kvm/book3s_hv.c 		ret = kvmppc_h_stuff_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
vcpu             1034 arch/powerpc/kvm/book3s_hv.c 						kvmppc_get_gpr(vcpu, 5),
vcpu             1035 arch/powerpc/kvm/book3s_hv.c 						kvmppc_get_gpr(vcpu, 6),
vcpu             1036 arch/powerpc/kvm/book3s_hv.c 						kvmppc_get_gpr(vcpu, 7));
vcpu             1042 arch/powerpc/kvm/book3s_hv.c 		if (!powernv_get_random_long(&vcpu->arch.regs.gpr[4]))
vcpu             1048 arch/powerpc/kvm/book3s_hv.c 		if (nesting_enabled(vcpu->kvm))
vcpu             1049 arch/powerpc/kvm/book3s_hv.c 			ret = kvmhv_set_partition_table(vcpu);
vcpu             1053 arch/powerpc/kvm/book3s_hv.c 		if (!nesting_enabled(vcpu->kvm))
vcpu             1055 arch/powerpc/kvm/book3s_hv.c 		ret = kvmhv_enter_nested_guest(vcpu);
vcpu             1057 arch/powerpc/kvm/book3s_hv.c 			kvmppc_set_gpr(vcpu, 3, 0);
vcpu             1058 arch/powerpc/kvm/book3s_hv.c 			vcpu->arch.hcall_needed = 0;
vcpu             1061 arch/powerpc/kvm/book3s_hv.c 			kvmppc_set_gpr(vcpu, 3, 0);
vcpu             1062 arch/powerpc/kvm/book3s_hv.c 			vcpu->arch.hcall_needed = 0;
vcpu             1068 arch/powerpc/kvm/book3s_hv.c 		if (nesting_enabled(vcpu->kvm))
vcpu             1069 arch/powerpc/kvm/book3s_hv.c 			ret = kvmhv_do_nested_tlbie(vcpu);
vcpu             1073 arch/powerpc/kvm/book3s_hv.c 		if (nesting_enabled(vcpu->kvm))
vcpu             1074 arch/powerpc/kvm/book3s_hv.c 			ret = kvmhv_copy_tofrom_guest_nested(vcpu);
vcpu             1077 arch/powerpc/kvm/book3s_hv.c 		ret = kvmppc_h_page_init(vcpu, kvmppc_get_gpr(vcpu, 4),
vcpu             1078 arch/powerpc/kvm/book3s_hv.c 					 kvmppc_get_gpr(vcpu, 5),
vcpu             1079 arch/powerpc/kvm/book3s_hv.c 					 kvmppc_get_gpr(vcpu, 6));
vcpu             1084 arch/powerpc/kvm/book3s_hv.c 	kvmppc_set_gpr(vcpu, 3, ret);
vcpu             1085 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.hcall_needed = 0;
vcpu             1095 arch/powerpc/kvm/book3s_hv.c static void kvmppc_nested_cede(struct kvm_vcpu *vcpu)
vcpu             1097 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.shregs.msr |= MSR_EE;
vcpu             1098 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.ceded = 1;
vcpu             1100 arch/powerpc/kvm/book3s_hv.c 	if (vcpu->arch.prodded) {
vcpu             1101 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.prodded = 0;
vcpu             1103 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.ceded = 0;
vcpu             1134 arch/powerpc/kvm/book3s_hv.c 					struct kvm_vcpu *vcpu)
vcpu             1138 arch/powerpc/kvm/book3s_hv.c 	if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) !=
vcpu             1149 arch/powerpc/kvm/book3s_hv.c 		run->debug.arch.address = kvmppc_get_pc(vcpu);
vcpu             1152 arch/powerpc/kvm/book3s_hv.c 		kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
vcpu             1161 arch/powerpc/kvm/book3s_hv.c static unsigned long kvmppc_read_dpdes(struct kvm_vcpu *vcpu)
vcpu             1167 arch/powerpc/kvm/book3s_hv.c 	nthreads = vcpu->kvm->arch.emul_smt_mode;
vcpu             1169 arch/powerpc/kvm/book3s_hv.c 	cpu = vcpu->vcpu_id & ~(nthreads - 1);
vcpu             1171 arch/powerpc/kvm/book3s_hv.c 		v = kvmppc_find_vcpu(vcpu->kvm, cpu);
vcpu             1194 arch/powerpc/kvm/book3s_hv.c static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu)
vcpu             1198 arch/powerpc/kvm/book3s_hv.c 	struct kvm *kvm = vcpu->kvm;
vcpu             1201 arch/powerpc/kvm/book3s_hv.c 	if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst) != EMULATE_DONE)
vcpu             1206 arch/powerpc/kvm/book3s_hv.c 	thr = vcpu->vcpu_id & (kvm->arch.emul_smt_mode - 1);
vcpu             1209 arch/powerpc/kvm/book3s_hv.c 		arg = kvmppc_get_gpr(vcpu, rb);
vcpu             1215 arch/powerpc/kvm/book3s_hv.c 		tvcpu = kvmppc_find_vcpu(kvm, vcpu->vcpu_id - thr + arg);
vcpu             1224 arch/powerpc/kvm/book3s_hv.c 		arg = kvmppc_get_gpr(vcpu, rb);
vcpu             1227 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.vcore->dpdes = 0;
vcpu             1228 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.doorbell_request = 0;
vcpu             1236 arch/powerpc/kvm/book3s_hv.c 			arg = kvmppc_read_dpdes(vcpu);
vcpu             1241 arch/powerpc/kvm/book3s_hv.c 		kvmppc_set_gpr(vcpu, get_rt(inst), arg);
vcpu             1246 arch/powerpc/kvm/book3s_hv.c 	kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
vcpu             1250 arch/powerpc/kvm/book3s_hv.c static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu             1255 arch/powerpc/kvm/book3s_hv.c 	vcpu->stat.sum_exits++;
vcpu             1265 arch/powerpc/kvm/book3s_hv.c 	if (vcpu->arch.shregs.msr & MSR_HV) {
vcpu             1268 arch/powerpc/kvm/book3s_hv.c 			vcpu->arch.trap, kvmppc_get_pc(vcpu),
vcpu             1269 arch/powerpc/kvm/book3s_hv.c 			vcpu->arch.shregs.msr);
vcpu             1270 arch/powerpc/kvm/book3s_hv.c 		kvmppc_dump_regs(vcpu);
vcpu             1272 arch/powerpc/kvm/book3s_hv.c 		run->hw.hardware_exit_reason = vcpu->arch.trap;
vcpu             1277 arch/powerpc/kvm/book3s_hv.c 	switch (vcpu->arch.trap) {
vcpu             1280 arch/powerpc/kvm/book3s_hv.c 		vcpu->stat.dec_exits++;
vcpu             1286 arch/powerpc/kvm/book3s_hv.c 		vcpu->stat.ext_intr_exits++;
vcpu             1297 arch/powerpc/kvm/book3s_hv.c 		machine_check_print_event_info(&vcpu->arch.mce_evt, false, true);
vcpu             1305 arch/powerpc/kvm/book3s_hv.c 		if (!vcpu->kvm->arch.fwnmi_enabled) {
vcpu             1306 arch/powerpc/kvm/book3s_hv.c 			ulong flags = vcpu->arch.shregs.msr & 0x083c0000;
vcpu             1307 arch/powerpc/kvm/book3s_hv.c 			kvmppc_core_queue_machine_check(vcpu, flags);
vcpu             1314 arch/powerpc/kvm/book3s_hv.c 		run->hw.hardware_exit_reason = vcpu->arch.trap;
vcpu             1318 arch/powerpc/kvm/book3s_hv.c 		if (vcpu->arch.mce_evt.disposition == MCE_DISPOSITION_RECOVERED)
vcpu             1334 arch/powerpc/kvm/book3s_hv.c 		flags = vcpu->arch.shregs.msr & 0x1f0000ull;
vcpu             1335 arch/powerpc/kvm/book3s_hv.c 		kvmppc_core_queue_program(vcpu, flags);
vcpu             1348 arch/powerpc/kvm/book3s_hv.c 		run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
vcpu             1350 arch/powerpc/kvm/book3s_hv.c 			run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
vcpu             1352 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.hcall_needed = 1;
vcpu             1367 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
vcpu             1368 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.fault_dsisr = vcpu->arch.shregs.msr &
vcpu             1370 arch/powerpc/kvm/book3s_hv.c 		if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
vcpu             1371 arch/powerpc/kvm/book3s_hv.c 			vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
vcpu             1382 arch/powerpc/kvm/book3s_hv.c 		if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED)
vcpu             1383 arch/powerpc/kvm/book3s_hv.c 			vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ?
vcpu             1384 arch/powerpc/kvm/book3s_hv.c 				swab32(vcpu->arch.emul_inst) :
vcpu             1385 arch/powerpc/kvm/book3s_hv.c 				vcpu->arch.emul_inst;
vcpu             1386 arch/powerpc/kvm/book3s_hv.c 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
vcpu             1387 arch/powerpc/kvm/book3s_hv.c 			r = kvmppc_emulate_debug_inst(run, vcpu);
vcpu             1389 arch/powerpc/kvm/book3s_hv.c 			kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
vcpu             1402 arch/powerpc/kvm/book3s_hv.c 		if (((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) &&
vcpu             1404 arch/powerpc/kvm/book3s_hv.c 			r = kvmppc_emulate_doorbell_instr(vcpu);
vcpu             1406 arch/powerpc/kvm/book3s_hv.c 			kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
vcpu             1419 arch/powerpc/kvm/book3s_hv.c 		r = kvmhv_p9_tm_emulation(vcpu);
vcpu             1427 arch/powerpc/kvm/book3s_hv.c 		kvmppc_dump_regs(vcpu);
vcpu             1429 arch/powerpc/kvm/book3s_hv.c 			vcpu->arch.trap, kvmppc_get_pc(vcpu),
vcpu             1430 arch/powerpc/kvm/book3s_hv.c 			vcpu->arch.shregs.msr);
vcpu             1431 arch/powerpc/kvm/book3s_hv.c 		run->hw.hardware_exit_reason = vcpu->arch.trap;
vcpu             1439 arch/powerpc/kvm/book3s_hv.c static int kvmppc_handle_nested_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
vcpu             1444 arch/powerpc/kvm/book3s_hv.c 	vcpu->stat.sum_exits++;
vcpu             1454 arch/powerpc/kvm/book3s_hv.c 	if (vcpu->arch.shregs.msr & MSR_HV) {
vcpu             1457 arch/powerpc/kvm/book3s_hv.c 			 vcpu->arch.trap, kvmppc_get_pc(vcpu),
vcpu             1458 arch/powerpc/kvm/book3s_hv.c 			 vcpu->arch.shregs.msr);
vcpu             1459 arch/powerpc/kvm/book3s_hv.c 		kvmppc_dump_regs(vcpu);
vcpu             1462 arch/powerpc/kvm/book3s_hv.c 	switch (vcpu->arch.trap) {
vcpu             1465 arch/powerpc/kvm/book3s_hv.c 		vcpu->stat.dec_exits++;
vcpu             1469 arch/powerpc/kvm/book3s_hv.c 		vcpu->stat.ext_intr_exits++;
vcpu             1474 arch/powerpc/kvm/book3s_hv.c 		vcpu->stat.ext_intr_exits++;
vcpu             1487 arch/powerpc/kvm/book3s_hv.c 		machine_check_print_event_info(&vcpu->arch.mce_evt, false, true);
vcpu             1496 arch/powerpc/kvm/book3s_hv.c 		srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
vcpu             1497 arch/powerpc/kvm/book3s_hv.c 		r = kvmhv_nested_page_fault(run, vcpu);
vcpu             1498 arch/powerpc/kvm/book3s_hv.c 		srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
vcpu             1501 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
vcpu             1502 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.fault_dsisr = kvmppc_get_msr(vcpu) &
vcpu             1504 arch/powerpc/kvm/book3s_hv.c 		if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
vcpu             1505 arch/powerpc/kvm/book3s_hv.c 			vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
vcpu             1506 arch/powerpc/kvm/book3s_hv.c 		srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
vcpu             1507 arch/powerpc/kvm/book3s_hv.c 		r = kvmhv_nested_page_fault(run, vcpu);
vcpu             1508 arch/powerpc/kvm/book3s_hv.c 		srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
vcpu             1519 arch/powerpc/kvm/book3s_hv.c 		r = kvmhv_p9_tm_emulation(vcpu);
vcpu             1524 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.trap = 0;
vcpu             1527 arch/powerpc/kvm/book3s_hv.c 			kvmppc_xics_rm_complete(vcpu, 0);
vcpu             1537 arch/powerpc/kvm/book3s_hv.c static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu,
vcpu             1543 arch/powerpc/kvm/book3s_hv.c 	sregs->pvr = vcpu->arch.pvr;
vcpu             1544 arch/powerpc/kvm/book3s_hv.c 	for (i = 0; i < vcpu->arch.slb_max; i++) {
vcpu             1545 arch/powerpc/kvm/book3s_hv.c 		sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige;
vcpu             1546 arch/powerpc/kvm/book3s_hv.c 		sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
vcpu             1552 arch/powerpc/kvm/book3s_hv.c static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
vcpu             1558 arch/powerpc/kvm/book3s_hv.c 	if (sregs->pvr != vcpu->arch.pvr)
vcpu             1562 arch/powerpc/kvm/book3s_hv.c 	for (i = 0; i < vcpu->arch.slb_nr; i++) {
vcpu             1564 arch/powerpc/kvm/book3s_hv.c 			vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe;
vcpu             1565 arch/powerpc/kvm/book3s_hv.c 			vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv;
vcpu             1569 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.slb_max = j;
vcpu             1574 arch/powerpc/kvm/book3s_hv.c static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
vcpu             1577 arch/powerpc/kvm/book3s_hv.c 	struct kvm *kvm = vcpu->kvm;
vcpu             1578 arch/powerpc/kvm/book3s_hv.c 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
vcpu             1587 arch/powerpc/kvm/book3s_hv.c 		struct kvm_vcpu *vcpu;
vcpu             1590 arch/powerpc/kvm/book3s_hv.c 		kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu             1591 arch/powerpc/kvm/book3s_hv.c 			if (vcpu->arch.vcore != vc)
vcpu             1594 arch/powerpc/kvm/book3s_hv.c 				vcpu->arch.intr_msr |= MSR_LE;
vcpu             1596 arch/powerpc/kvm/book3s_hv.c 				vcpu->arch.intr_msr &= ~MSR_LE;
vcpu             1622 arch/powerpc/kvm/book3s_hv.c static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
vcpu             1636 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.dabr);
vcpu             1639 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.dabrx);
vcpu             1642 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.dscr);
vcpu             1645 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.purr);
vcpu             1648 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.spurr);
vcpu             1651 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.amr);
vcpu             1654 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.uamor);
vcpu             1658 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.mmcr[i]);
vcpu             1662 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.pmc[i]);
vcpu             1666 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.spmc[i]);
vcpu             1669 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.siar);
vcpu             1672 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.sdar);
vcpu             1675 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.sier);
vcpu             1678 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.iamr);
vcpu             1681 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.pspb);
vcpu             1690 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.vcore->dpdes |
vcpu             1691 arch/powerpc/kvm/book3s_hv.c 				   vcpu->arch.doorbell_request);
vcpu             1694 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.vcore->vtb);
vcpu             1697 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.dawr);
vcpu             1700 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.dawrx);
vcpu             1703 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.ciabr);
vcpu             1706 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.csigr);
vcpu             1709 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.tacr);
vcpu             1712 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.tcscr);
vcpu             1715 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.pid);
vcpu             1718 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.acop);
vcpu             1721 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.wort);
vcpu             1724 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.tid);
vcpu             1727 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.psscr);
vcpu             1730 arch/powerpc/kvm/book3s_hv.c 		spin_lock(&vcpu->arch.vpa_update_lock);
vcpu             1731 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.vpa.next_gpa);
vcpu             1732 arch/powerpc/kvm/book3s_hv.c 		spin_unlock(&vcpu->arch.vpa_update_lock);
vcpu             1735 arch/powerpc/kvm/book3s_hv.c 		spin_lock(&vcpu->arch.vpa_update_lock);
vcpu             1736 arch/powerpc/kvm/book3s_hv.c 		val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa;
vcpu             1737 arch/powerpc/kvm/book3s_hv.c 		val->vpaval.length = vcpu->arch.slb_shadow.len;
vcpu             1738 arch/powerpc/kvm/book3s_hv.c 		spin_unlock(&vcpu->arch.vpa_update_lock);
vcpu             1741 arch/powerpc/kvm/book3s_hv.c 		spin_lock(&vcpu->arch.vpa_update_lock);
vcpu             1742 arch/powerpc/kvm/book3s_hv.c 		val->vpaval.addr = vcpu->arch.dtl.next_gpa;
vcpu             1743 arch/powerpc/kvm/book3s_hv.c 		val->vpaval.length = vcpu->arch.dtl.len;
vcpu             1744 arch/powerpc/kvm/book3s_hv.c 		spin_unlock(&vcpu->arch.vpa_update_lock);
vcpu             1747 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.vcore->tb_offset);
vcpu             1751 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.vcore->lpcr);
vcpu             1754 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.ppr);
vcpu             1758 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.tfhar);
vcpu             1761 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.tfiar);
vcpu             1764 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.texasr);
vcpu             1768 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.gpr_tm[i]);
vcpu             1776 arch/powerpc/kvm/book3s_hv.c 				val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j];
vcpu             1779 arch/powerpc/kvm/book3s_hv.c 				val->vval = vcpu->arch.vr_tm.vr[i-32];
vcpu             1786 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.cr_tm);
vcpu             1789 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.xer_tm);
vcpu             1792 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.lr_tm);
vcpu             1795 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.ctr_tm);
vcpu             1798 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.fp_tm.fpscr);
vcpu             1801 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.amr_tm);
vcpu             1804 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.ppr_tm);
vcpu             1807 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.vrsave_tm);
vcpu             1811 arch/powerpc/kvm/book3s_hv.c 			*val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]);
vcpu             1816 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.dscr_tm);
vcpu             1819 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.tar_tm);
vcpu             1823 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.vcore->arch_compat);
vcpu             1826 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.dec_expires +
vcpu             1827 arch/powerpc/kvm/book3s_hv.c 				   vcpu->arch.vcore->tb_offset);
vcpu             1830 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.online);
vcpu             1833 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->kvm->arch.l1_ptcr);
vcpu             1843 arch/powerpc/kvm/book3s_hv.c static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
vcpu             1857 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.dabr = set_reg_val(id, *val);
vcpu             1860 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP;
vcpu             1863 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.dscr = set_reg_val(id, *val);
vcpu             1866 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.purr = set_reg_val(id, *val);
vcpu             1869 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.spurr = set_reg_val(id, *val);
vcpu             1872 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.amr = set_reg_val(id, *val);
vcpu             1875 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.uamor = set_reg_val(id, *val);
vcpu             1879 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.mmcr[i] = set_reg_val(id, *val);
vcpu             1883 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.pmc[i] = set_reg_val(id, *val);
vcpu             1887 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.spmc[i] = set_reg_val(id, *val);
vcpu             1890 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.siar = set_reg_val(id, *val);
vcpu             1893 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.sdar = set_reg_val(id, *val);
vcpu             1896 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.sier = set_reg_val(id, *val);
vcpu             1899 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.iamr = set_reg_val(id, *val);
vcpu             1902 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.pspb = set_reg_val(id, *val);
vcpu             1905 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.vcore->dpdes = set_reg_val(id, *val);
vcpu             1908 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.vcore->vtb = set_reg_val(id, *val);
vcpu             1911 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.dawr = set_reg_val(id, *val);
vcpu             1914 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP;
vcpu             1917 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.ciabr = set_reg_val(id, *val);
vcpu             1919 arch/powerpc/kvm/book3s_hv.c 		if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
vcpu             1920 arch/powerpc/kvm/book3s_hv.c 			vcpu->arch.ciabr &= ~CIABR_PRIV;	/* disable */
vcpu             1923 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.csigr = set_reg_val(id, *val);
vcpu             1926 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.tacr = set_reg_val(id, *val);
vcpu             1929 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.tcscr = set_reg_val(id, *val);
vcpu             1932 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.pid = set_reg_val(id, *val);
vcpu             1935 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.acop = set_reg_val(id, *val);
vcpu             1938 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.wort = set_reg_val(id, *val);
vcpu             1941 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.tid = set_reg_val(id, *val);
vcpu             1944 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.psscr = set_reg_val(id, *val) & PSSCR_GUEST_VIS;
vcpu             1949 arch/powerpc/kvm/book3s_hv.c 		if (!addr && (vcpu->arch.slb_shadow.next_gpa ||
vcpu             1950 arch/powerpc/kvm/book3s_hv.c 			      vcpu->arch.dtl.next_gpa))
vcpu             1952 arch/powerpc/kvm/book3s_hv.c 		r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca));
vcpu             1958 arch/powerpc/kvm/book3s_hv.c 		if (addr && !vcpu->arch.vpa.next_gpa)
vcpu             1960 arch/powerpc/kvm/book3s_hv.c 		r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len);
vcpu             1967 arch/powerpc/kvm/book3s_hv.c 			     !vcpu->arch.vpa.next_gpa))
vcpu             1970 arch/powerpc/kvm/book3s_hv.c 		r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
vcpu             1974 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.vcore->tb_offset =
vcpu             1978 arch/powerpc/kvm/book3s_hv.c 		kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), true);
vcpu             1981 arch/powerpc/kvm/book3s_hv.c 		kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), false);
vcpu             1984 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.ppr = set_reg_val(id, *val);
vcpu             1988 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.tfhar = set_reg_val(id, *val);
vcpu             1991 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.tfiar = set_reg_val(id, *val);
vcpu             1994 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.texasr = set_reg_val(id, *val);
vcpu             1998 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.gpr_tm[i] = set_reg_val(id, *val);
vcpu             2006 arch/powerpc/kvm/book3s_hv.c 				vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j];
vcpu             2009 arch/powerpc/kvm/book3s_hv.c 				vcpu->arch.vr_tm.vr[i-32] = val->vval;
vcpu             2015 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.cr_tm = set_reg_val(id, *val);
vcpu             2018 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.xer_tm = set_reg_val(id, *val);
vcpu             2021 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.lr_tm = set_reg_val(id, *val);
vcpu             2024 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.ctr_tm = set_reg_val(id, *val);
vcpu             2027 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val);
vcpu             2030 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.amr_tm = set_reg_val(id, *val);
vcpu             2033 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.ppr_tm = set_reg_val(id, *val);
vcpu             2036 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.vrsave_tm = set_reg_val(id, *val);
vcpu             2040 arch/powerpc/kvm/book3s_hv.c 			vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val);
vcpu             2045 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.dscr_tm = set_reg_val(id, *val);
vcpu             2048 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.tar_tm = set_reg_val(id, *val);
vcpu             2052 arch/powerpc/kvm/book3s_hv.c 		r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val));
vcpu             2055 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.dec_expires = set_reg_val(id, *val) -
vcpu             2056 arch/powerpc/kvm/book3s_hv.c 			vcpu->arch.vcore->tb_offset;
vcpu             2060 arch/powerpc/kvm/book3s_hv.c 		if (i && !vcpu->arch.online)
vcpu             2061 arch/powerpc/kvm/book3s_hv.c 			atomic_inc(&vcpu->arch.vcore->online_count);
vcpu             2062 arch/powerpc/kvm/book3s_hv.c 		else if (!i && vcpu->arch.online)
vcpu             2063 arch/powerpc/kvm/book3s_hv.c 			atomic_dec(&vcpu->arch.vcore->online_count);
vcpu             2064 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.online = i;
vcpu             2067 arch/powerpc/kvm/book3s_hv.c 		vcpu->kvm->arch.l1_ptcr = set_reg_val(id, *val);
vcpu             2127 arch/powerpc/kvm/book3s_hv.c 	struct kvm_vcpu	*vcpu;
vcpu             2134 arch/powerpc/kvm/book3s_hv.c 	struct kvm_vcpu *vcpu = inode->i_private;
vcpu             2141 arch/powerpc/kvm/book3s_hv.c 	kvm_get_kvm(vcpu->kvm);
vcpu             2142 arch/powerpc/kvm/book3s_hv.c 	p->vcpu = vcpu;
vcpu             2152 arch/powerpc/kvm/book3s_hv.c 	kvm_put_kvm(p->vcpu->kvm);
vcpu             2161 arch/powerpc/kvm/book3s_hv.c 	struct kvm_vcpu *vcpu = p->vcpu;
vcpu             2177 arch/powerpc/kvm/book3s_hv.c 				((unsigned long)vcpu + timings[i].offset);
vcpu             2238 arch/powerpc/kvm/book3s_hv.c static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id)
vcpu             2241 arch/powerpc/kvm/book3s_hv.c 	struct kvm *kvm = vcpu->kvm;
vcpu             2246 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.debugfs_dir = debugfs_create_dir(buf, kvm->arch.debugfs_dir);
vcpu             2247 arch/powerpc/kvm/book3s_hv.c 	if (IS_ERR_OR_NULL(vcpu->arch.debugfs_dir))
vcpu             2249 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.debugfs_timings =
vcpu             2250 arch/powerpc/kvm/book3s_hv.c 		debugfs_create_file("timings", 0444, vcpu->arch.debugfs_dir,
vcpu             2251 arch/powerpc/kvm/book3s_hv.c 				    vcpu, &debugfs_timings_ops);
vcpu             2255 arch/powerpc/kvm/book3s_hv.c static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id)
vcpu             2263 arch/powerpc/kvm/book3s_hv.c 	struct kvm_vcpu *vcpu;
vcpu             2269 arch/powerpc/kvm/book3s_hv.c 	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
vcpu             2270 arch/powerpc/kvm/book3s_hv.c 	if (!vcpu)
vcpu             2273 arch/powerpc/kvm/book3s_hv.c 	err = kvm_vcpu_init(vcpu, kvm, id);
vcpu             2277 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.shared = &vcpu->arch.shregs;
vcpu             2284 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.shared_big_endian = true;
vcpu             2286 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.shared_big_endian = false;
vcpu             2289 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.mmcr[0] = MMCR0_FC;
vcpu             2290 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.ctrl = CTRL_RUNLATCH;
vcpu             2292 arch/powerpc/kvm/book3s_hv.c 	kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR));
vcpu             2293 arch/powerpc/kvm/book3s_hv.c 	spin_lock_init(&vcpu->arch.vpa_update_lock);
vcpu             2294 arch/powerpc/kvm/book3s_hv.c 	spin_lock_init(&vcpu->arch.tbacct_lock);
vcpu             2295 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.busy_preempt = TB_NIL;
vcpu             2296 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.intr_msr = MSR_SF | MSR_ME;
vcpu             2305 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.hfscr = HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB |
vcpu             2308 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.hfscr &= mfspr(SPRN_HFSCR);
vcpu             2310 arch/powerpc/kvm/book3s_hv.c 			vcpu->arch.hfscr |= HFSCR_TM;
vcpu             2313 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.hfscr |= HFSCR_TM;
vcpu             2315 arch/powerpc/kvm/book3s_hv.c 	kvmppc_mmu_book3s_hv_init(vcpu);
vcpu             2317 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
vcpu             2319 arch/powerpc/kvm/book3s_hv.c 	init_waitqueue_head(&vcpu->arch.cpu_run);
vcpu             2362 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.vcore = vcore;
vcpu             2363 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid;
vcpu             2364 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.thread_cpu = -1;
vcpu             2365 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.prev_cpu = -1;
vcpu             2367 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.cpu_type = KVM_CPU_3S_64;
vcpu             2368 arch/powerpc/kvm/book3s_hv.c 	kvmppc_sanity_check(vcpu);
vcpu             2370 arch/powerpc/kvm/book3s_hv.c 	debugfs_vcpu_init(vcpu, id);
vcpu             2372 arch/powerpc/kvm/book3s_hv.c 	return vcpu;
vcpu             2375 arch/powerpc/kvm/book3s_hv.c 	kvm_vcpu_uninit(vcpu);
vcpu             2377 arch/powerpc/kvm/book3s_hv.c 	kmem_cache_free(kvm_vcpu_cache, vcpu);
vcpu             2426 arch/powerpc/kvm/book3s_hv.c static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu)
vcpu             2428 arch/powerpc/kvm/book3s_hv.c 	spin_lock(&vcpu->arch.vpa_update_lock);
vcpu             2429 arch/powerpc/kvm/book3s_hv.c 	unpin_vpa(vcpu->kvm, &vcpu->arch.dtl);
vcpu             2430 arch/powerpc/kvm/book3s_hv.c 	unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow);
vcpu             2431 arch/powerpc/kvm/book3s_hv.c 	unpin_vpa(vcpu->kvm, &vcpu->arch.vpa);
vcpu             2432 arch/powerpc/kvm/book3s_hv.c 	spin_unlock(&vcpu->arch.vpa_update_lock);
vcpu             2433 arch/powerpc/kvm/book3s_hv.c 	kvm_vcpu_uninit(vcpu);
vcpu             2434 arch/powerpc/kvm/book3s_hv.c 	kmem_cache_free(kvm_vcpu_cache, vcpu);
vcpu             2437 arch/powerpc/kvm/book3s_hv.c static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu)
vcpu             2443 arch/powerpc/kvm/book3s_hv.c static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
vcpu             2448 arch/powerpc/kvm/book3s_hv.c 	if (now > vcpu->arch.dec_expires) {
vcpu             2450 arch/powerpc/kvm/book3s_hv.c 		kvmppc_core_queue_dec(vcpu);
vcpu             2451 arch/powerpc/kvm/book3s_hv.c 		kvmppc_core_prepare_to_enter(vcpu);
vcpu             2454 arch/powerpc/kvm/book3s_hv.c 	dec_nsec = tb_to_ns(vcpu->arch.dec_expires - now);
vcpu             2455 arch/powerpc/kvm/book3s_hv.c 	hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL);
vcpu             2456 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.timer_running = 1;
vcpu             2459 arch/powerpc/kvm/book3s_hv.c static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
vcpu             2461 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.ceded = 0;
vcpu             2462 arch/powerpc/kvm/book3s_hv.c 	if (vcpu->arch.timer_running) {
vcpu             2463 arch/powerpc/kvm/book3s_hv.c 		hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
vcpu             2464 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.timer_running = 0;
vcpu             2471 arch/powerpc/kvm/book3s_hv.c 				   struct kvm_vcpu *vcpu)
vcpu             2475 arch/powerpc/kvm/book3s_hv.c 	if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
vcpu             2477 arch/powerpc/kvm/book3s_hv.c 	spin_lock_irq(&vcpu->arch.tbacct_lock);
vcpu             2479 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
vcpu             2480 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.stolen_logged;
vcpu             2481 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.busy_preempt = now;
vcpu             2482 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
vcpu             2483 arch/powerpc/kvm/book3s_hv.c 	spin_unlock_irq(&vcpu->arch.tbacct_lock);
vcpu             2485 arch/powerpc/kvm/book3s_hv.c 	WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], NULL);
vcpu             2533 arch/powerpc/kvm/book3s_hv.c static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
vcpu             2535 arch/powerpc/kvm/book3s_hv.c 	struct kvm_nested_guest *nested = vcpu->arch.nested;
vcpu             2558 arch/powerpc/kvm/book3s_hv.c static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
vcpu             2560 arch/powerpc/kvm/book3s_hv.c 	struct kvm_nested_guest *nested = vcpu->arch.nested;
vcpu             2561 arch/powerpc/kvm/book3s_hv.c 	struct kvm *kvm = vcpu->kvm;
vcpu             2568 arch/powerpc/kvm/book3s_hv.c 		prev_cpu = nested->prev_cpu[vcpu->arch.nested_vcpu_id];
vcpu             2570 arch/powerpc/kvm/book3s_hv.c 		prev_cpu = vcpu->arch.prev_cpu;
vcpu             2588 arch/powerpc/kvm/book3s_hv.c 			radix_flush_cpu(kvm, prev_cpu, vcpu);
vcpu             2590 arch/powerpc/kvm/book3s_hv.c 			nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu;
vcpu             2592 arch/powerpc/kvm/book3s_hv.c 			vcpu->arch.prev_cpu = pcpu;
vcpu             2596 arch/powerpc/kvm/book3s_hv.c static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc)
vcpu             2603 arch/powerpc/kvm/book3s_hv.c 	if (vcpu) {
vcpu             2604 arch/powerpc/kvm/book3s_hv.c 		if (vcpu->arch.timer_running) {
vcpu             2605 arch/powerpc/kvm/book3s_hv.c 			hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
vcpu             2606 arch/powerpc/kvm/book3s_hv.c 			vcpu->arch.timer_running = 0;
vcpu             2608 arch/powerpc/kvm/book3s_hv.c 		cpu += vcpu->arch.ptid;
vcpu             2609 arch/powerpc/kvm/book3s_hv.c 		vcpu->cpu = vc->pcpu;
vcpu             2610 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.thread_cpu = cpu;
vcpu             2614 arch/powerpc/kvm/book3s_hv.c 	tpaca->kvm_hstate.kvm_vcpu = vcpu;
vcpu             2849 arch/powerpc/kvm/book3s_hv.c 	struct kvm_vcpu *vcpu;
vcpu             2851 arch/powerpc/kvm/book3s_hv.c 	for_each_runnable_thread(i, vcpu, vc) {
vcpu             2852 arch/powerpc/kvm/book3s_hv.c 		if (signal_pending(vcpu->arch.run_task))
vcpu             2853 arch/powerpc/kvm/book3s_hv.c 			vcpu->arch.ret = -EINTR;
vcpu             2854 arch/powerpc/kvm/book3s_hv.c 		else if (vcpu->arch.vpa.update_pending ||
vcpu             2855 arch/powerpc/kvm/book3s_hv.c 			 vcpu->arch.slb_shadow.update_pending ||
vcpu             2856 arch/powerpc/kvm/book3s_hv.c 			 vcpu->arch.dtl.update_pending)
vcpu             2857 arch/powerpc/kvm/book3s_hv.c 			vcpu->arch.ret = RESUME_GUEST;
vcpu             2860 arch/powerpc/kvm/book3s_hv.c 		kvmppc_remove_runnable(vc, vcpu);
vcpu             2861 arch/powerpc/kvm/book3s_hv.c 		wake_up(&vcpu->arch.cpu_run);
vcpu             2899 arch/powerpc/kvm/book3s_hv.c 	struct kvm_vcpu *vcpu;
vcpu             2906 arch/powerpc/kvm/book3s_hv.c 		for_each_runnable_thread(i, vcpu, vc)
vcpu             2907 arch/powerpc/kvm/book3s_hv.c 			if (signal_pending(vcpu->arch.run_task))
vcpu             2918 arch/powerpc/kvm/book3s_hv.c 	struct kvm_vcpu *vcpu;
vcpu             2922 arch/powerpc/kvm/book3s_hv.c 	for_each_runnable_thread(i, vcpu, vc) {
vcpu             2932 arch/powerpc/kvm/book3s_hv.c 		if (now < vcpu->arch.dec_expires &&
vcpu             2933 arch/powerpc/kvm/book3s_hv.c 		    kvmppc_core_pending_dec(vcpu))
vcpu             2934 arch/powerpc/kvm/book3s_hv.c 			kvmppc_core_dequeue_dec(vcpu);
vcpu             2936 arch/powerpc/kvm/book3s_hv.c 		trace_kvm_guest_exit(vcpu);
vcpu             2939 arch/powerpc/kvm/book3s_hv.c 		if (vcpu->arch.trap)
vcpu             2940 arch/powerpc/kvm/book3s_hv.c 			ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
vcpu             2941 arch/powerpc/kvm/book3s_hv.c 						    vcpu->arch.run_task);
vcpu             2943 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.ret = ret;
vcpu             2944 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.trap = 0;
vcpu             2947 arch/powerpc/kvm/book3s_hv.c 		if (is_kvmppc_resume_guest(vcpu->arch.ret)) {
vcpu             2948 arch/powerpc/kvm/book3s_hv.c 			if (vcpu->arch.pending_exceptions)
vcpu             2949 arch/powerpc/kvm/book3s_hv.c 				kvmppc_core_prepare_to_enter(vcpu);
vcpu             2950 arch/powerpc/kvm/book3s_hv.c 			if (vcpu->arch.ceded)
vcpu             2951 arch/powerpc/kvm/book3s_hv.c 				kvmppc_set_timer(vcpu);
vcpu             2955 arch/powerpc/kvm/book3s_hv.c 			kvmppc_remove_runnable(vc, vcpu);
vcpu             2956 arch/powerpc/kvm/book3s_hv.c 			wake_up(&vcpu->arch.cpu_run);
vcpu             2971 arch/powerpc/kvm/book3s_hv.c 			vcpu = next_runnable_thread(vc, &i);
vcpu             2972 arch/powerpc/kvm/book3s_hv.c 			wake_up(&vcpu->arch.cpu_run);
vcpu             3044 arch/powerpc/kvm/book3s_hv.c 	struct kvm_vcpu *vcpu;
vcpu             3097 arch/powerpc/kvm/book3s_hv.c 		for_each_runnable_thread(i, vcpu, vc) {
vcpu             3098 arch/powerpc/kvm/book3s_hv.c 			vcpu->arch.ret = -EBUSY;
vcpu             3099 arch/powerpc/kvm/book3s_hv.c 			kvmppc_remove_runnable(vc, vcpu);
vcpu             3100 arch/powerpc/kvm/book3s_hv.c 			wake_up(&vcpu->arch.cpu_run);
vcpu             3125 arch/powerpc/kvm/book3s_hv.c 			for_each_runnable_thread(i, vcpu, core_info.vc[sub])
vcpu             3126 arch/powerpc/kvm/book3s_hv.c 				kvmppc_prepare_radix_vcpu(vcpu, pcpu);
vcpu             3250 arch/powerpc/kvm/book3s_hv.c 		for_each_runnable_thread(i, vcpu, pvc) {
vcpu             3251 arch/powerpc/kvm/book3s_hv.c 			kvmppc_start_thread(vcpu, pvc);
vcpu             3252 arch/powerpc/kvm/book3s_hv.c 			kvmppc_create_dtl_entry(vcpu, pvc);
vcpu             3253 arch/powerpc/kvm/book3s_hv.c 			trace_kvm_guest_enter(vcpu);
vcpu             3254 arch/powerpc/kvm/book3s_hv.c 			if (!vcpu->arch.ptid)
vcpu             3256 arch/powerpc/kvm/book3s_hv.c 			active |= 1 << (thr + vcpu->arch.ptid);
vcpu             3389 arch/powerpc/kvm/book3s_hv.c static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
vcpu             3392 arch/powerpc/kvm/book3s_hv.c 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
vcpu             3424 arch/powerpc/kvm/book3s_hv.c 	mtspr(SPRN_PURR, vcpu->arch.purr);
vcpu             3425 arch/powerpc/kvm/book3s_hv.c 	mtspr(SPRN_SPURR, vcpu->arch.spurr);
vcpu             3428 arch/powerpc/kvm/book3s_hv.c 		mtspr(SPRN_DAWR, vcpu->arch.dawr);
vcpu             3429 arch/powerpc/kvm/book3s_hv.c 		mtspr(SPRN_DAWRX, vcpu->arch.dawrx);
vcpu             3431 arch/powerpc/kvm/book3s_hv.c 	mtspr(SPRN_CIABR, vcpu->arch.ciabr);
vcpu             3432 arch/powerpc/kvm/book3s_hv.c 	mtspr(SPRN_IC, vcpu->arch.ic);
vcpu             3433 arch/powerpc/kvm/book3s_hv.c 	mtspr(SPRN_PID, vcpu->arch.pid);
vcpu             3435 arch/powerpc/kvm/book3s_hv.c 	mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC |
vcpu             3438 arch/powerpc/kvm/book3s_hv.c 	mtspr(SPRN_HFSCR, vcpu->arch.hfscr);
vcpu             3440 arch/powerpc/kvm/book3s_hv.c 	mtspr(SPRN_SPRG0, vcpu->arch.shregs.sprg0);
vcpu             3441 arch/powerpc/kvm/book3s_hv.c 	mtspr(SPRN_SPRG1, vcpu->arch.shregs.sprg1);
vcpu             3442 arch/powerpc/kvm/book3s_hv.c 	mtspr(SPRN_SPRG2, vcpu->arch.shregs.sprg2);
vcpu             3443 arch/powerpc/kvm/book3s_hv.c 	mtspr(SPRN_SPRG3, vcpu->arch.shregs.sprg3);
vcpu             3450 arch/powerpc/kvm/book3s_hv.c 	kvmppc_xive_push_vcpu(vcpu);
vcpu             3452 arch/powerpc/kvm/book3s_hv.c 	mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0);
vcpu             3453 arch/powerpc/kvm/book3s_hv.c 	mtspr(SPRN_SRR1, vcpu->arch.shregs.srr1);
vcpu             3455 arch/powerpc/kvm/book3s_hv.c 	trap = __kvmhv_vcpu_entry_p9(vcpu);
vcpu             3461 arch/powerpc/kvm/book3s_hv.c 	      purr - vcpu->arch.purr);
vcpu             3463 arch/powerpc/kvm/book3s_hv.c 	      spurr - vcpu->arch.spurr);
vcpu             3464 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.purr = purr;
vcpu             3465 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.spurr = spurr;
vcpu             3467 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.ic = mfspr(SPRN_IC);
vcpu             3468 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.pid = mfspr(SPRN_PID);
vcpu             3469 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.psscr = mfspr(SPRN_PSSCR) & PSSCR_GUEST_VIS;
vcpu             3471 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.shregs.sprg0 = mfspr(SPRN_SPRG0);
vcpu             3472 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.shregs.sprg1 = mfspr(SPRN_SPRG1);
vcpu             3473 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.shregs.sprg2 = mfspr(SPRN_SPRG2);
vcpu             3474 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.shregs.sprg3 = mfspr(SPRN_SPRG3);
vcpu             3491 arch/powerpc/kvm/book3s_hv.c 	mtspr(SPRN_LPID, vcpu->kvm->arch.host_lpid);	/* restore host LPID */
vcpu             3510 arch/powerpc/kvm/book3s_hv.c 	mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr);
vcpu             3519 arch/powerpc/kvm/book3s_hv.c int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
vcpu             3522 arch/powerpc/kvm/book3s_hv.c 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
vcpu             3539 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.ceded = 0;
vcpu             3548 arch/powerpc/kvm/book3s_hv.c 	if (vcpu->arch.vpa.pinned_addr) {
vcpu             3549 arch/powerpc/kvm/book3s_hv.c 		struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
vcpu             3552 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.vpa.dirty = 1;
vcpu             3557 arch/powerpc/kvm/book3s_hv.c 		kvmppc_restore_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
vcpu             3559 arch/powerpc/kvm/book3s_hv.c 	kvmhv_load_guest_pmu(vcpu);
vcpu             3562 arch/powerpc/kvm/book3s_hv.c 	load_fp_state(&vcpu->arch.fp);
vcpu             3564 arch/powerpc/kvm/book3s_hv.c 	load_vr_state(&vcpu->arch.vr);
vcpu             3566 arch/powerpc/kvm/book3s_hv.c 	mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
vcpu             3568 arch/powerpc/kvm/book3s_hv.c 	mtspr(SPRN_DSCR, vcpu->arch.dscr);
vcpu             3569 arch/powerpc/kvm/book3s_hv.c 	mtspr(SPRN_IAMR, vcpu->arch.iamr);
vcpu             3570 arch/powerpc/kvm/book3s_hv.c 	mtspr(SPRN_PSPB, vcpu->arch.pspb);
vcpu             3571 arch/powerpc/kvm/book3s_hv.c 	mtspr(SPRN_FSCR, vcpu->arch.fscr);
vcpu             3572 arch/powerpc/kvm/book3s_hv.c 	mtspr(SPRN_TAR, vcpu->arch.tar);
vcpu             3573 arch/powerpc/kvm/book3s_hv.c 	mtspr(SPRN_EBBHR, vcpu->arch.ebbhr);
vcpu             3574 arch/powerpc/kvm/book3s_hv.c 	mtspr(SPRN_EBBRR, vcpu->arch.ebbrr);
vcpu             3575 arch/powerpc/kvm/book3s_hv.c 	mtspr(SPRN_BESCR, vcpu->arch.bescr);
vcpu             3576 arch/powerpc/kvm/book3s_hv.c 	mtspr(SPRN_WORT, vcpu->arch.wort);
vcpu             3577 arch/powerpc/kvm/book3s_hv.c 	mtspr(SPRN_TIDR, vcpu->arch.tid);
vcpu             3578 arch/powerpc/kvm/book3s_hv.c 	mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
vcpu             3579 arch/powerpc/kvm/book3s_hv.c 	mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
vcpu             3580 arch/powerpc/kvm/book3s_hv.c 	mtspr(SPRN_AMR, vcpu->arch.amr);
vcpu             3581 arch/powerpc/kvm/book3s_hv.c 	mtspr(SPRN_UAMOR, vcpu->arch.uamor);
vcpu             3583 arch/powerpc/kvm/book3s_hv.c 	if (!(vcpu->arch.ctrl & 1))
vcpu             3586 arch/powerpc/kvm/book3s_hv.c 	mtspr(SPRN_DEC, vcpu->arch.dec_expires - mftb());
vcpu             3600 arch/powerpc/kvm/book3s_hv.c 		mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr);
vcpu             3601 arch/powerpc/kvm/book3s_hv.c 		kvmhv_save_hv_regs(vcpu, &hvregs);
vcpu             3603 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
vcpu             3605 arch/powerpc/kvm/book3s_hv.c 		if (vcpu->arch.nested) {
vcpu             3606 arch/powerpc/kvm/book3s_hv.c 			hvregs.lpid = vcpu->arch.nested->shadow_lpid;
vcpu             3607 arch/powerpc/kvm/book3s_hv.c 			hvregs.vcpu_token = vcpu->arch.nested_vcpu_id;
vcpu             3609 arch/powerpc/kvm/book3s_hv.c 			hvregs.lpid = vcpu->kvm->arch.lpid;
vcpu             3610 arch/powerpc/kvm/book3s_hv.c 			hvregs.vcpu_token = vcpu->vcpu_id;
vcpu             3614 arch/powerpc/kvm/book3s_hv.c 					  __pa(&vcpu->arch.regs));
vcpu             3615 arch/powerpc/kvm/book3s_hv.c 		kvmhv_restore_hv_return_state(vcpu, &hvregs);
vcpu             3616 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.shregs.msr = vcpu->arch.regs.msr;
vcpu             3617 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.shregs.dar = mfspr(SPRN_DAR);
vcpu             3618 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR);
vcpu             3619 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR);
vcpu             3623 arch/powerpc/kvm/book3s_hv.c 		if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested &&
vcpu             3624 arch/powerpc/kvm/book3s_hv.c 		    kvmppc_get_gpr(vcpu, 3) == H_CEDE) {
vcpu             3625 arch/powerpc/kvm/book3s_hv.c 			kvmppc_nested_cede(vcpu);
vcpu             3626 arch/powerpc/kvm/book3s_hv.c 			kvmppc_set_gpr(vcpu, 3, 0);
vcpu             3630 arch/powerpc/kvm/book3s_hv.c 		trap = kvmhv_load_hv_regs_and_go(vcpu, time_limit, lpcr);
vcpu             3633 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.slb_max = 0;
vcpu             3638 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.dec_expires = dec + tb;
vcpu             3639 arch/powerpc/kvm/book3s_hv.c 	vcpu->cpu = -1;
vcpu             3640 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.thread_cpu = -1;
vcpu             3641 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.ctrl = mfspr(SPRN_CTRLF);
vcpu             3643 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.iamr = mfspr(SPRN_IAMR);
vcpu             3644 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.pspb = mfspr(SPRN_PSPB);
vcpu             3645 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.fscr = mfspr(SPRN_FSCR);
vcpu             3646 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.tar = mfspr(SPRN_TAR);
vcpu             3647 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.ebbhr = mfspr(SPRN_EBBHR);
vcpu             3648 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.ebbrr = mfspr(SPRN_EBBRR);
vcpu             3649 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.bescr = mfspr(SPRN_BESCR);
vcpu             3650 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.wort = mfspr(SPRN_WORT);
vcpu             3651 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.tid = mfspr(SPRN_TIDR);
vcpu             3652 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.amr = mfspr(SPRN_AMR);
vcpu             3653 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.uamor = mfspr(SPRN_UAMOR);
vcpu             3654 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.dscr = mfspr(SPRN_DSCR);
vcpu             3664 arch/powerpc/kvm/book3s_hv.c 	if (host_amr != vcpu->arch.amr)
vcpu             3668 arch/powerpc/kvm/book3s_hv.c 	store_fp_state(&vcpu->arch.fp);
vcpu             3670 arch/powerpc/kvm/book3s_hv.c 	store_vr_state(&vcpu->arch.vr);
vcpu             3672 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
vcpu             3676 arch/powerpc/kvm/book3s_hv.c 		kvmppc_save_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
vcpu             3679 arch/powerpc/kvm/book3s_hv.c 	if (vcpu->arch.vpa.pinned_addr) {
vcpu             3680 arch/powerpc/kvm/book3s_hv.c 		struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
vcpu             3683 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.vpa.dirty = 1;
vcpu             3687 arch/powerpc/kvm/book3s_hv.c 	save_pmu |= nesting_enabled(vcpu->kvm);
vcpu             3689 arch/powerpc/kvm/book3s_hv.c 	kvmhv_save_guest_pmu(vcpu, save_pmu);
vcpu             3709 arch/powerpc/kvm/book3s_hv.c 				 struct kvm_vcpu *vcpu, int wait_state)
vcpu             3713 arch/powerpc/kvm/book3s_hv.c 	prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state);
vcpu             3714 arch/powerpc/kvm/book3s_hv.c 	if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
vcpu             3719 arch/powerpc/kvm/book3s_hv.c 	finish_wait(&vcpu->arch.cpu_run, &wait);
vcpu             3741 arch/powerpc/kvm/book3s_hv.c static inline bool xive_interrupt_pending(struct kvm_vcpu *vcpu)
vcpu             3745 arch/powerpc/kvm/book3s_hv.c 	return vcpu->arch.irq_pending || vcpu->arch.xive_saved_state.pipr <
vcpu             3746 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.xive_saved_state.cppr;
vcpu             3749 arch/powerpc/kvm/book3s_hv.c static inline bool xive_interrupt_pending(struct kvm_vcpu *vcpu)
vcpu             3755 arch/powerpc/kvm/book3s_hv.c static bool kvmppc_vcpu_woken(struct kvm_vcpu *vcpu)
vcpu             3757 arch/powerpc/kvm/book3s_hv.c 	if (vcpu->arch.pending_exceptions || vcpu->arch.prodded ||
vcpu             3758 arch/powerpc/kvm/book3s_hv.c 	    kvmppc_doorbell_pending(vcpu) || xive_interrupt_pending(vcpu))
vcpu             3770 arch/powerpc/kvm/book3s_hv.c 	struct kvm_vcpu *vcpu;
vcpu             3773 arch/powerpc/kvm/book3s_hv.c 	for_each_runnable_thread(i, vcpu, vc) {
vcpu             3774 arch/powerpc/kvm/book3s_hv.c 		if (!vcpu->arch.ceded || kvmppc_vcpu_woken(vcpu))
vcpu             3887 arch/powerpc/kvm/book3s_hv.c static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu)
vcpu             3890 arch/powerpc/kvm/book3s_hv.c 	struct kvm *kvm = vcpu->kvm;
vcpu             3895 arch/powerpc/kvm/book3s_hv.c 			r = kvmppc_hv_setup_htab_rma(vcpu);
vcpu             3906 arch/powerpc/kvm/book3s_hv.c static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
vcpu             3912 arch/powerpc/kvm/book3s_hv.c 	trace_kvmppc_run_vcpu_enter(vcpu);
vcpu             3915 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.ret = RESUME_GUEST;
vcpu             3916 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.trap = 0;
vcpu             3917 arch/powerpc/kvm/book3s_hv.c 	kvmppc_update_vpas(vcpu);
vcpu             3922 arch/powerpc/kvm/book3s_hv.c 	vc = vcpu->arch.vcore;
vcpu             3924 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.ceded = 0;
vcpu             3925 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.run_task = current;
vcpu             3926 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.kvm_run = kvm_run;
vcpu             3927 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
vcpu             3928 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
vcpu             3929 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.busy_preempt = TB_NIL;
vcpu             3930 arch/powerpc/kvm/book3s_hv.c 	WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], vcpu);
vcpu             3942 arch/powerpc/kvm/book3s_hv.c 			kvmppc_create_dtl_entry(vcpu, vc);
vcpu             3943 arch/powerpc/kvm/book3s_hv.c 			kvmppc_start_thread(vcpu, vc);
vcpu             3944 arch/powerpc/kvm/book3s_hv.c 			trace_kvm_guest_enter(vcpu);
vcpu             3951 arch/powerpc/kvm/book3s_hv.c 	while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
vcpu             3954 arch/powerpc/kvm/book3s_hv.c 		if (!vcpu->kvm->arch.mmu_ready) {
vcpu             3956 arch/powerpc/kvm/book3s_hv.c 			r = kvmhv_setup_mmu(vcpu);
vcpu             3962 arch/powerpc/kvm/book3s_hv.c 				vcpu->arch.ret = r;
vcpu             3971 arch/powerpc/kvm/book3s_hv.c 			kvmppc_wait_for_exec(vc, vcpu, TASK_INTERRUPTIBLE);
vcpu             3984 arch/powerpc/kvm/book3s_hv.c 		if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
vcpu             3993 arch/powerpc/kvm/book3s_hv.c 		vc->runner = vcpu;
vcpu             4008 arch/powerpc/kvm/book3s_hv.c 	while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
vcpu             4012 arch/powerpc/kvm/book3s_hv.c 		kvmppc_wait_for_exec(vc, vcpu, TASK_UNINTERRUPTIBLE);
vcpu             4017 arch/powerpc/kvm/book3s_hv.c 	if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
vcpu             4018 arch/powerpc/kvm/book3s_hv.c 		kvmppc_remove_runnable(vc, vcpu);
vcpu             4019 arch/powerpc/kvm/book3s_hv.c 		vcpu->stat.signal_exits++;
vcpu             4021 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.ret = -EINTR;
vcpu             4031 arch/powerpc/kvm/book3s_hv.c 	trace_kvmppc_run_vcpu_exit(vcpu, kvm_run);
vcpu             4033 arch/powerpc/kvm/book3s_hv.c 	return vcpu->arch.ret;
vcpu             4037 arch/powerpc/kvm/book3s_hv.c 			  struct kvm_vcpu *vcpu, u64 time_limit,
vcpu             4043 arch/powerpc/kvm/book3s_hv.c 	struct kvm *kvm = vcpu->kvm;
vcpu             4044 arch/powerpc/kvm/book3s_hv.c 	struct kvm_nested_guest *nested = vcpu->arch.nested;
vcpu             4046 arch/powerpc/kvm/book3s_hv.c 	trace_kvmppc_run_vcpu_enter(vcpu);
vcpu             4049 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.ret = RESUME_GUEST;
vcpu             4050 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.trap = 0;
vcpu             4052 arch/powerpc/kvm/book3s_hv.c 	vc = vcpu->arch.vcore;
vcpu             4053 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.ceded = 0;
vcpu             4054 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.run_task = current;
vcpu             4055 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.kvm_run = kvm_run;
vcpu             4056 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
vcpu             4057 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
vcpu             4058 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.busy_preempt = TB_NIL;
vcpu             4059 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.last_inst = KVM_INST_FETCH_FAILED;
vcpu             4060 arch/powerpc/kvm/book3s_hv.c 	vc->runnable_threads[0] = vcpu;
vcpu             4062 arch/powerpc/kvm/book3s_hv.c 	vc->runner = vcpu;
vcpu             4066 arch/powerpc/kvm/book3s_hv.c 		kvmhv_setup_mmu(vcpu);
vcpu             4071 arch/powerpc/kvm/book3s_hv.c 	kvmppc_update_vpas(vcpu);
vcpu             4079 arch/powerpc/kvm/book3s_hv.c 	kvmppc_prepare_radix_vcpu(vcpu, pcpu);
vcpu             4089 arch/powerpc/kvm/book3s_hv.c 		kvmppc_core_prepare_to_enter(vcpu);
vcpu             4090 arch/powerpc/kvm/book3s_hv.c 		if (vcpu->arch.doorbell_request) {
vcpu             4093 arch/powerpc/kvm/book3s_hv.c 			vcpu->arch.doorbell_request = 0;
vcpu             4096 arch/powerpc/kvm/book3s_hv.c 			     &vcpu->arch.pending_exceptions))
vcpu             4098 arch/powerpc/kvm/book3s_hv.c 	} else if (vcpu->arch.pending_exceptions ||
vcpu             4099 arch/powerpc/kvm/book3s_hv.c 		   vcpu->arch.doorbell_request ||
vcpu             4100 arch/powerpc/kvm/book3s_hv.c 		   xive_interrupt_pending(vcpu)) {
vcpu             4101 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.ret = RESUME_HOST;
vcpu             4110 arch/powerpc/kvm/book3s_hv.c 	kvmppc_start_thread(vcpu, vc);
vcpu             4111 arch/powerpc/kvm/book3s_hv.c 	kvmppc_create_dtl_entry(vcpu, vc);
vcpu             4112 arch/powerpc/kvm/book3s_hv.c 	trace_kvm_guest_enter(vcpu);
vcpu             4133 arch/powerpc/kvm/book3s_hv.c 	trap = kvmhv_p9_guest_entry(vcpu, time_limit, lpcr);
vcpu             4134 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.trap = trap;
vcpu             4163 arch/powerpc/kvm/book3s_hv.c 	if (kvmppc_core_pending_dec(vcpu) &&
vcpu             4164 arch/powerpc/kvm/book3s_hv.c 			((get_tb() < vcpu->arch.dec_expires) ||
vcpu             4166 arch/powerpc/kvm/book3s_hv.c 			  kvmppc_get_gpr(vcpu, 3) == H_ENTER_NESTED)))
vcpu             4167 arch/powerpc/kvm/book3s_hv.c 		kvmppc_core_dequeue_dec(vcpu);
vcpu             4169 arch/powerpc/kvm/book3s_hv.c 	trace_kvm_guest_exit(vcpu);
vcpu             4173 arch/powerpc/kvm/book3s_hv.c 			r = kvmppc_handle_exit_hv(kvm_run, vcpu, current);
vcpu             4175 arch/powerpc/kvm/book3s_hv.c 			r = kvmppc_handle_nested_exit(kvm_run, vcpu);
vcpu             4177 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.ret = r;
vcpu             4179 arch/powerpc/kvm/book3s_hv.c 	if (is_kvmppc_resume_guest(r) && vcpu->arch.ceded &&
vcpu             4180 arch/powerpc/kvm/book3s_hv.c 	    !kvmppc_vcpu_woken(vcpu)) {
vcpu             4181 arch/powerpc/kvm/book3s_hv.c 		kvmppc_set_timer(vcpu);
vcpu             4182 arch/powerpc/kvm/book3s_hv.c 		while (vcpu->arch.ceded && !kvmppc_vcpu_woken(vcpu)) {
vcpu             4184 arch/powerpc/kvm/book3s_hv.c 				vcpu->stat.signal_exits++;
vcpu             4186 arch/powerpc/kvm/book3s_hv.c 				vcpu->arch.ret = -EINTR;
vcpu             4194 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.ceded = 0;
vcpu             4200 arch/powerpc/kvm/book3s_hv.c 	kvmppc_remove_runnable(vc, vcpu);
vcpu             4201 arch/powerpc/kvm/book3s_hv.c 	trace_kvmppc_run_vcpu_exit(vcpu, kvm_run);
vcpu             4203 arch/powerpc/kvm/book3s_hv.c 	return vcpu->arch.ret;
vcpu             4206 arch/powerpc/kvm/book3s_hv.c 	vcpu->stat.signal_exits++;
vcpu             4208 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.ret = -EINTR;
vcpu             4215 arch/powerpc/kvm/book3s_hv.c static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
vcpu             4224 arch/powerpc/kvm/book3s_hv.c 	if (!vcpu->arch.sane) {
vcpu             4256 arch/powerpc/kvm/book3s_hv.c 	if (!vcpu->arch.online) {
vcpu             4257 arch/powerpc/kvm/book3s_hv.c 		atomic_inc(&vcpu->arch.vcore->online_count);
vcpu             4258 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.online = 1;
vcpu             4261 arch/powerpc/kvm/book3s_hv.c 	kvmppc_core_prepare_to_enter(vcpu);
vcpu             4269 arch/powerpc/kvm/book3s_hv.c 	kvm = vcpu->kvm;
vcpu             4285 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.wqp = &vcpu->arch.vcore->wq;
vcpu             4286 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.pgdir = current->mm->pgd;
vcpu             4287 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
vcpu             4300 arch/powerpc/kvm/book3s_hv.c 			r = kvmhv_run_single_vcpu(run, vcpu, ~(u64)0,
vcpu             4301 arch/powerpc/kvm/book3s_hv.c 						  vcpu->arch.vcore->lpcr);
vcpu             4303 arch/powerpc/kvm/book3s_hv.c 			r = kvmppc_run_vcpu(run, vcpu);
vcpu             4306 arch/powerpc/kvm/book3s_hv.c 		    !(vcpu->arch.shregs.msr & MSR_PR)) {
vcpu             4307 arch/powerpc/kvm/book3s_hv.c 			trace_kvm_hcall_enter(vcpu);
vcpu             4308 arch/powerpc/kvm/book3s_hv.c 			r = kvmppc_pseries_do_hcall(vcpu);
vcpu             4309 arch/powerpc/kvm/book3s_hv.c 			trace_kvm_hcall_exit(vcpu, r);
vcpu             4310 arch/powerpc/kvm/book3s_hv.c 			kvmppc_core_prepare_to_enter(vcpu);
vcpu             4313 arch/powerpc/kvm/book3s_hv.c 			r = kvmppc_book3s_hv_page_fault(run, vcpu,
vcpu             4314 arch/powerpc/kvm/book3s_hv.c 				vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
vcpu             4320 arch/powerpc/kvm/book3s_hv.c 				r = kvmppc_xics_rm_complete(vcpu, 0);
vcpu             4334 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
vcpu             4400 arch/powerpc/kvm/book3s_hv.c 	struct kvm_vcpu *vcpu;
vcpu             4441 arch/powerpc/kvm/book3s_hv.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu             4442 arch/powerpc/kvm/book3s_hv.c 		spin_lock(&vcpu->arch.vpa_update_lock);
vcpu             4443 arch/powerpc/kvm/book3s_hv.c 		kvmppc_harvest_vpa_dirty(&vcpu->arch.vpa, memslot, buf);
vcpu             4444 arch/powerpc/kvm/book3s_hv.c 		kvmppc_harvest_vpa_dirty(&vcpu->arch.dtl, memslot, buf);
vcpu             4445 arch/powerpc/kvm/book3s_hv.c 		spin_unlock(&vcpu->arch.vpa_update_lock);
vcpu             4546 arch/powerpc/kvm/book3s_hv.c static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu)
vcpu             4576 arch/powerpc/kvm/book3s_hv.c static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
vcpu             4579 arch/powerpc/kvm/book3s_hv.c 	struct kvm *kvm = vcpu->kvm;
vcpu             4640 arch/powerpc/kvm/book3s_hv.c 	kvmppc_map_vrma(vcpu, memslot, porder);
vcpu             4967 arch/powerpc/kvm/book3s_hv.c static int kvmppc_core_emulate_op_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu             4973 arch/powerpc/kvm/book3s_hv.c static int kvmppc_core_emulate_mtspr_hv(struct kvm_vcpu *vcpu, int sprn,
vcpu             4979 arch/powerpc/kvm/book3s_hv.c static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn,
vcpu             5362 arch/powerpc/kvm/book3s_hv.c static int kvmhv_load_from_eaddr(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
vcpu             5367 arch/powerpc/kvm/book3s_hv.c 	if (kvmhv_vcpu_is_radix(vcpu)) {
vcpu             5368 arch/powerpc/kvm/book3s_hv.c 		rc = kvmhv_copy_from_guest_radix(vcpu, *eaddr, ptr, size);
vcpu             5375 arch/powerpc/kvm/book3s_hv.c 	if (rc && vcpu->arch.nested)
vcpu             5381 arch/powerpc/kvm/book3s_hv.c static int kvmhv_store_to_eaddr(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
vcpu             5386 arch/powerpc/kvm/book3s_hv.c 	if (kvmhv_vcpu_is_radix(vcpu)) {
vcpu             5387 arch/powerpc/kvm/book3s_hv.c 		rc = kvmhv_copy_to_guest_radix(vcpu, *eaddr, ptr, size);
vcpu             5394 arch/powerpc/kvm/book3s_hv.c 	if (rc && vcpu->arch.nested)
vcpu               39 arch/powerpc/kvm/book3s_hv_builtin.c unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu);
vcpu               40 arch/powerpc/kvm/book3s_hv_builtin.c unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server);
vcpu               41 arch/powerpc/kvm/book3s_hv_builtin.c int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
vcpu               43 arch/powerpc/kvm/book3s_hv_builtin.c int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
vcpu               44 arch/powerpc/kvm/book3s_hv_builtin.c int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
vcpu              132 arch/powerpc/kvm/book3s_hv_builtin.c long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
vcpu              206 arch/powerpc/kvm/book3s_hv_builtin.c long kvmppc_h_random(struct kvm_vcpu *vcpu)
vcpu              211 arch/powerpc/kvm/book3s_hv_builtin.c 	if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR))
vcpu              212 arch/powerpc/kvm/book3s_hv_builtin.c 		r = powernv_get_random_long(&vcpu->arch.regs.gpr[4]);
vcpu              214 arch/powerpc/kvm/book3s_hv_builtin.c 		r = powernv_get_random_real_mode(&vcpu->arch.regs.gpr[4]);
vcpu              397 arch/powerpc/kvm/book3s_hv_builtin.c 	struct kvm_vcpu *vcpu;
vcpu              399 arch/powerpc/kvm/book3s_hv_builtin.c 	vcpu = local_paca->kvm_hstate.kvm_vcpu;
vcpu              400 arch/powerpc/kvm/book3s_hv_builtin.c 	if (!vcpu)
vcpu              402 arch/powerpc/kvm/book3s_hv_builtin.c 	pimap = kvmppc_get_passthru_irqmap(vcpu->kvm);
vcpu              412 arch/powerpc/kvm/book3s_hv_builtin.c 	return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap, again);
vcpu              573 arch/powerpc/kvm/book3s_hv_builtin.c unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
vcpu              575 arch/powerpc/kvm/book3s_hv_builtin.c 	if (!kvmppc_xics_enabled(vcpu))
vcpu              579 arch/powerpc/kvm/book3s_hv_builtin.c 			return xive_rm_h_xirr(vcpu);
vcpu              582 arch/powerpc/kvm/book3s_hv_builtin.c 		return __xive_vm_h_xirr(vcpu);
vcpu              584 arch/powerpc/kvm/book3s_hv_builtin.c 		return xics_rm_h_xirr(vcpu);
vcpu              587 arch/powerpc/kvm/book3s_hv_builtin.c unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu)
vcpu              589 arch/powerpc/kvm/book3s_hv_builtin.c 	if (!kvmppc_xics_enabled(vcpu))
vcpu              591 arch/powerpc/kvm/book3s_hv_builtin.c 	vcpu->arch.regs.gpr[5] = get_tb();
vcpu              594 arch/powerpc/kvm/book3s_hv_builtin.c 			return xive_rm_h_xirr(vcpu);
vcpu              597 arch/powerpc/kvm/book3s_hv_builtin.c 		return __xive_vm_h_xirr(vcpu);
vcpu              599 arch/powerpc/kvm/book3s_hv_builtin.c 		return xics_rm_h_xirr(vcpu);
vcpu              602 arch/powerpc/kvm/book3s_hv_builtin.c unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
vcpu              604 arch/powerpc/kvm/book3s_hv_builtin.c 	if (!kvmppc_xics_enabled(vcpu))
vcpu              608 arch/powerpc/kvm/book3s_hv_builtin.c 			return xive_rm_h_ipoll(vcpu, server);
vcpu              611 arch/powerpc/kvm/book3s_hv_builtin.c 		return __xive_vm_h_ipoll(vcpu, server);
vcpu              616 arch/powerpc/kvm/book3s_hv_builtin.c int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
vcpu              619 arch/powerpc/kvm/book3s_hv_builtin.c 	if (!kvmppc_xics_enabled(vcpu))
vcpu              623 arch/powerpc/kvm/book3s_hv_builtin.c 			return xive_rm_h_ipi(vcpu, server, mfrr);
vcpu              626 arch/powerpc/kvm/book3s_hv_builtin.c 		return __xive_vm_h_ipi(vcpu, server, mfrr);
vcpu              628 arch/powerpc/kvm/book3s_hv_builtin.c 		return xics_rm_h_ipi(vcpu, server, mfrr);
vcpu              631 arch/powerpc/kvm/book3s_hv_builtin.c int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
vcpu              633 arch/powerpc/kvm/book3s_hv_builtin.c 	if (!kvmppc_xics_enabled(vcpu))
vcpu              637 arch/powerpc/kvm/book3s_hv_builtin.c 			return xive_rm_h_cppr(vcpu, cppr);
vcpu              640 arch/powerpc/kvm/book3s_hv_builtin.c 		return __xive_vm_h_cppr(vcpu, cppr);
vcpu              642 arch/powerpc/kvm/book3s_hv_builtin.c 		return xics_rm_h_cppr(vcpu, cppr);
vcpu              645 arch/powerpc/kvm/book3s_hv_builtin.c int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
vcpu              647 arch/powerpc/kvm/book3s_hv_builtin.c 	if (!kvmppc_xics_enabled(vcpu))
vcpu              651 arch/powerpc/kvm/book3s_hv_builtin.c 			return xive_rm_h_eoi(vcpu, xirr);
vcpu              654 arch/powerpc/kvm/book3s_hv_builtin.c 		return __xive_vm_h_eoi(vcpu, xirr);
vcpu              656 arch/powerpc/kvm/book3s_hv_builtin.c 		return xics_rm_h_eoi(vcpu, xirr);
vcpu              762 arch/powerpc/kvm/book3s_hv_builtin.c void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu)
vcpu              769 arch/powerpc/kvm/book3s_hv_builtin.c 	ext = (vcpu->arch.pending_exceptions >> BOOK3S_IRQPRIO_EXTERNAL) & 1;
vcpu              775 arch/powerpc/kvm/book3s_hv_builtin.c 	if (vcpu->arch.shregs.msr & MSR_EE) {
vcpu              787 arch/powerpc/kvm/book3s_hv_builtin.c 		unsigned long msr, old_msr = vcpu->arch.shregs.msr;
vcpu              789 arch/powerpc/kvm/book3s_hv_builtin.c 		kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
vcpu              790 arch/powerpc/kvm/book3s_hv_builtin.c 		kvmppc_set_srr1(vcpu, old_msr);
vcpu              791 arch/powerpc/kvm/book3s_hv_builtin.c 		kvmppc_set_pc(vcpu, vec);
vcpu              792 arch/powerpc/kvm/book3s_hv_builtin.c 		msr = vcpu->arch.intr_msr;
vcpu              795 arch/powerpc/kvm/book3s_hv_builtin.c 		vcpu->arch.shregs.msr = msr;
vcpu              798 arch/powerpc/kvm/book3s_hv_builtin.c 	if (vcpu->arch.doorbell_request) {
vcpu              800 arch/powerpc/kvm/book3s_hv_builtin.c 		vcpu->arch.vcore->dpdes = 1;
vcpu              802 arch/powerpc/kvm/book3s_hv_builtin.c 		vcpu->arch.doorbell_request = 0;
vcpu               28 arch/powerpc/kvm/book3s_hv_nested.c void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
vcpu               30 arch/powerpc/kvm/book3s_hv_nested.c 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
vcpu               34 arch/powerpc/kvm/book3s_hv_nested.c 	hr->hfscr = vcpu->arch.hfscr;
vcpu               36 arch/powerpc/kvm/book3s_hv_nested.c 	hr->dawr0 = vcpu->arch.dawr;
vcpu               37 arch/powerpc/kvm/book3s_hv_nested.c 	hr->dawrx0 = vcpu->arch.dawrx;
vcpu               38 arch/powerpc/kvm/book3s_hv_nested.c 	hr->ciabr = vcpu->arch.ciabr;
vcpu               39 arch/powerpc/kvm/book3s_hv_nested.c 	hr->purr = vcpu->arch.purr;
vcpu               40 arch/powerpc/kvm/book3s_hv_nested.c 	hr->spurr = vcpu->arch.spurr;
vcpu               41 arch/powerpc/kvm/book3s_hv_nested.c 	hr->ic = vcpu->arch.ic;
vcpu               43 arch/powerpc/kvm/book3s_hv_nested.c 	hr->srr0 = vcpu->arch.shregs.srr0;
vcpu               44 arch/powerpc/kvm/book3s_hv_nested.c 	hr->srr1 = vcpu->arch.shregs.srr1;
vcpu               45 arch/powerpc/kvm/book3s_hv_nested.c 	hr->sprg[0] = vcpu->arch.shregs.sprg0;
vcpu               46 arch/powerpc/kvm/book3s_hv_nested.c 	hr->sprg[1] = vcpu->arch.shregs.sprg1;
vcpu               47 arch/powerpc/kvm/book3s_hv_nested.c 	hr->sprg[2] = vcpu->arch.shregs.sprg2;
vcpu               48 arch/powerpc/kvm/book3s_hv_nested.c 	hr->sprg[3] = vcpu->arch.shregs.sprg3;
vcpu               49 arch/powerpc/kvm/book3s_hv_nested.c 	hr->pidr = vcpu->arch.pid;
vcpu               50 arch/powerpc/kvm/book3s_hv_nested.c 	hr->cfar = vcpu->arch.cfar;
vcpu               51 arch/powerpc/kvm/book3s_hv_nested.c 	hr->ppr = vcpu->arch.ppr;
vcpu               96 arch/powerpc/kvm/book3s_hv_nested.c static void save_hv_return_state(struct kvm_vcpu *vcpu, int trap,
vcpu               99 arch/powerpc/kvm/book3s_hv_nested.c 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
vcpu              102 arch/powerpc/kvm/book3s_hv_nested.c 	hr->hfscr = vcpu->arch.hfscr;
vcpu              103 arch/powerpc/kvm/book3s_hv_nested.c 	hr->purr = vcpu->arch.purr;
vcpu              104 arch/powerpc/kvm/book3s_hv_nested.c 	hr->spurr = vcpu->arch.spurr;
vcpu              105 arch/powerpc/kvm/book3s_hv_nested.c 	hr->ic = vcpu->arch.ic;
vcpu              107 arch/powerpc/kvm/book3s_hv_nested.c 	hr->srr0 = vcpu->arch.shregs.srr0;
vcpu              108 arch/powerpc/kvm/book3s_hv_nested.c 	hr->srr1 = vcpu->arch.shregs.srr1;
vcpu              109 arch/powerpc/kvm/book3s_hv_nested.c 	hr->sprg[0] = vcpu->arch.shregs.sprg0;
vcpu              110 arch/powerpc/kvm/book3s_hv_nested.c 	hr->sprg[1] = vcpu->arch.shregs.sprg1;
vcpu              111 arch/powerpc/kvm/book3s_hv_nested.c 	hr->sprg[2] = vcpu->arch.shregs.sprg2;
vcpu              112 arch/powerpc/kvm/book3s_hv_nested.c 	hr->sprg[3] = vcpu->arch.shregs.sprg3;
vcpu              113 arch/powerpc/kvm/book3s_hv_nested.c 	hr->pidr = vcpu->arch.pid;
vcpu              114 arch/powerpc/kvm/book3s_hv_nested.c 	hr->cfar = vcpu->arch.cfar;
vcpu              115 arch/powerpc/kvm/book3s_hv_nested.c 	hr->ppr = vcpu->arch.ppr;
vcpu              118 arch/powerpc/kvm/book3s_hv_nested.c 		hr->hdar = vcpu->arch.fault_dar;
vcpu              119 arch/powerpc/kvm/book3s_hv_nested.c 		hr->hdsisr = vcpu->arch.fault_dsisr;
vcpu              120 arch/powerpc/kvm/book3s_hv_nested.c 		hr->asdr = vcpu->arch.fault_gpa;
vcpu              123 arch/powerpc/kvm/book3s_hv_nested.c 		hr->asdr = vcpu->arch.fault_gpa;
vcpu              126 arch/powerpc/kvm/book3s_hv_nested.c 		hr->heir = vcpu->arch.emul_inst;
vcpu              131 arch/powerpc/kvm/book3s_hv_nested.c static void sanitise_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
vcpu              137 arch/powerpc/kvm/book3s_hv_nested.c 	hr->hfscr &= (HFSCR_INTR_CAUSE | vcpu->arch.hfscr);
vcpu              147 arch/powerpc/kvm/book3s_hv_nested.c static void restore_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
vcpu              149 arch/powerpc/kvm/book3s_hv_nested.c 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
vcpu              153 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.hfscr = hr->hfscr;
vcpu              154 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.dawr = hr->dawr0;
vcpu              155 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.dawrx = hr->dawrx0;
vcpu              156 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.ciabr = hr->ciabr;
vcpu              157 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.purr = hr->purr;
vcpu              158 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.spurr = hr->spurr;
vcpu              159 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.ic = hr->ic;
vcpu              161 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.shregs.srr0 = hr->srr0;
vcpu              162 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.shregs.srr1 = hr->srr1;
vcpu              163 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.shregs.sprg0 = hr->sprg[0];
vcpu              164 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.shregs.sprg1 = hr->sprg[1];
vcpu              165 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.shregs.sprg2 = hr->sprg[2];
vcpu              166 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.shregs.sprg3 = hr->sprg[3];
vcpu              167 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.pid = hr->pidr;
vcpu              168 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.cfar = hr->cfar;
vcpu              169 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.ppr = hr->ppr;
vcpu              172 arch/powerpc/kvm/book3s_hv_nested.c void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
vcpu              175 arch/powerpc/kvm/book3s_hv_nested.c 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
vcpu              178 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.hfscr = hr->hfscr;
vcpu              179 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.purr = hr->purr;
vcpu              180 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.spurr = hr->spurr;
vcpu              181 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.ic = hr->ic;
vcpu              183 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.fault_dar = hr->hdar;
vcpu              184 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.fault_dsisr = hr->hdsisr;
vcpu              185 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.fault_gpa = hr->asdr;
vcpu              186 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.emul_inst = hr->heir;
vcpu              187 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.shregs.srr0 = hr->srr0;
vcpu              188 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.shregs.srr1 = hr->srr1;
vcpu              189 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.shregs.sprg0 = hr->sprg[0];
vcpu              190 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.shregs.sprg1 = hr->sprg[1];
vcpu              191 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.shregs.sprg2 = hr->sprg[2];
vcpu              192 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.shregs.sprg3 = hr->sprg[3];
vcpu              193 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.pid = hr->pidr;
vcpu              194 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.cfar = hr->cfar;
vcpu              195 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.ppr = hr->ppr;
vcpu              198 arch/powerpc/kvm/book3s_hv_nested.c static void kvmhv_nested_mmio_needed(struct kvm_vcpu *vcpu, u64 regs_ptr)
vcpu              201 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.trap = 0;
vcpu              209 arch/powerpc/kvm/book3s_hv_nested.c 	if (((vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) == KVM_MMIO_REG_GPR)
vcpu              210 arch/powerpc/kvm/book3s_hv_nested.c 	    && (vcpu->mmio_is_write == 0)) {
vcpu              211 arch/powerpc/kvm/book3s_hv_nested.c 		vcpu->arch.nested_io_gpr = (gpa_t) regs_ptr +
vcpu              213 arch/powerpc/kvm/book3s_hv_nested.c 						    gpr[vcpu->arch.io_gpr]);
vcpu              214 arch/powerpc/kvm/book3s_hv_nested.c 		vcpu->arch.io_gpr = KVM_MMIO_REG_NESTED_GPR;
vcpu              218 arch/powerpc/kvm/book3s_hv_nested.c long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
vcpu              224 arch/powerpc/kvm/book3s_hv_nested.c 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
vcpu              231 arch/powerpc/kvm/book3s_hv_nested.c 	if (vcpu->kvm->arch.l1_ptcr == 0)
vcpu              235 arch/powerpc/kvm/book3s_hv_nested.c 	hv_ptr = kvmppc_get_gpr(vcpu, 4);
vcpu              236 arch/powerpc/kvm/book3s_hv_nested.c 	err = kvm_vcpu_read_guest(vcpu, hv_ptr, &l2_hv,
vcpu              240 arch/powerpc/kvm/book3s_hv_nested.c 	if (kvmppc_need_byteswap(vcpu))
vcpu              245 arch/powerpc/kvm/book3s_hv_nested.c 	regs_ptr = kvmppc_get_gpr(vcpu, 5);
vcpu              246 arch/powerpc/kvm/book3s_hv_nested.c 	err = kvm_vcpu_read_guest(vcpu, regs_ptr, &l2_regs,
vcpu              250 arch/powerpc/kvm/book3s_hv_nested.c 	if (kvmppc_need_byteswap(vcpu))
vcpu              256 arch/powerpc/kvm/book3s_hv_nested.c 	l2 = kvmhv_get_nested(vcpu->kvm, l2_hv.lpid, true);
vcpu              266 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
vcpu              267 arch/powerpc/kvm/book3s_hv_nested.c 	saved_l1_regs = vcpu->arch.regs;
vcpu              268 arch/powerpc/kvm/book3s_hv_nested.c 	kvmhv_save_hv_regs(vcpu, &saved_l1_hv);
vcpu              275 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.nested = l2;
vcpu              276 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.nested_vcpu_id = l2_hv.vcpu_token;
vcpu              277 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.regs = l2_regs;
vcpu              278 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.shregs.msr = vcpu->arch.regs.msr;
vcpu              282 arch/powerpc/kvm/book3s_hv_nested.c 	sanitise_hv_regs(vcpu, &l2_hv);
vcpu              283 arch/powerpc/kvm/book3s_hv_nested.c 	restore_hv_regs(vcpu, &l2_hv);
vcpu              285 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.ret = RESUME_GUEST;
vcpu              286 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.trap = 0;
vcpu              289 arch/powerpc/kvm/book3s_hv_nested.c 			vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER;
vcpu              293 arch/powerpc/kvm/book3s_hv_nested.c 		r = kvmhv_run_single_vcpu(vcpu->arch.kvm_run, vcpu, hdec_exp,
vcpu              298 arch/powerpc/kvm/book3s_hv_nested.c 	l2_regs = vcpu->arch.regs;
vcpu              299 arch/powerpc/kvm/book3s_hv_nested.c 	l2_regs.msr = vcpu->arch.shregs.msr;
vcpu              300 arch/powerpc/kvm/book3s_hv_nested.c 	delta_purr = vcpu->arch.purr - l2_hv.purr;
vcpu              301 arch/powerpc/kvm/book3s_hv_nested.c 	delta_spurr = vcpu->arch.spurr - l2_hv.spurr;
vcpu              302 arch/powerpc/kvm/book3s_hv_nested.c 	delta_ic = vcpu->arch.ic - l2_hv.ic;
vcpu              304 arch/powerpc/kvm/book3s_hv_nested.c 	save_hv_return_state(vcpu, vcpu->arch.trap, &l2_hv);
vcpu              307 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.nested = NULL;
vcpu              308 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.regs = saved_l1_regs;
vcpu              309 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.shregs.msr = saved_l1_regs.msr & ~MSR_TS_MASK;
vcpu              312 arch/powerpc/kvm/book3s_hv_nested.c 		vcpu->arch.shregs.msr |= MSR_TS_S;
vcpu              314 arch/powerpc/kvm/book3s_hv_nested.c 	restore_hv_regs(vcpu, &saved_l1_hv);
vcpu              315 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.purr += delta_purr;
vcpu              316 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.spurr += delta_spurr;
vcpu              317 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.ic += delta_ic;
vcpu              323 arch/powerpc/kvm/book3s_hv_nested.c 	if (kvmppc_need_byteswap(vcpu)) {
vcpu              327 arch/powerpc/kvm/book3s_hv_nested.c 	err = kvm_vcpu_write_guest(vcpu, hv_ptr, &l2_hv,
vcpu              331 arch/powerpc/kvm/book3s_hv_nested.c 	err = kvm_vcpu_write_guest(vcpu, regs_ptr, &l2_regs,
vcpu              339 arch/powerpc/kvm/book3s_hv_nested.c 	if (vcpu->mmio_needed) {
vcpu              340 arch/powerpc/kvm/book3s_hv_nested.c 		kvmhv_nested_mmio_needed(vcpu, regs_ptr);
vcpu              344 arch/powerpc/kvm/book3s_hv_nested.c 	return vcpu->arch.trap;
vcpu              443 arch/powerpc/kvm/book3s_hv_nested.c long kvmhv_set_partition_table(struct kvm_vcpu *vcpu)
vcpu              445 arch/powerpc/kvm/book3s_hv_nested.c 	struct kvm *kvm = vcpu->kvm;
vcpu              446 arch/powerpc/kvm/book3s_hv_nested.c 	unsigned long ptcr = kvmppc_get_gpr(vcpu, 4);
vcpu              456 arch/powerpc/kvm/book3s_hv_nested.c 	    !kvm_is_visible_gfn(vcpu->kvm, (ptcr & PRTB_MASK) >> PAGE_SHIFT))
vcpu              473 arch/powerpc/kvm/book3s_hv_nested.c long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu)
vcpu              476 arch/powerpc/kvm/book3s_hv_nested.c 	int l1_lpid = kvmppc_get_gpr(vcpu, 4);
vcpu              477 arch/powerpc/kvm/book3s_hv_nested.c 	int pid = kvmppc_get_gpr(vcpu, 5);
vcpu              478 arch/powerpc/kvm/book3s_hv_nested.c 	gva_t eaddr = kvmppc_get_gpr(vcpu, 6);
vcpu              479 arch/powerpc/kvm/book3s_hv_nested.c 	gpa_t gp_to = (gpa_t) kvmppc_get_gpr(vcpu, 7);
vcpu              480 arch/powerpc/kvm/book3s_hv_nested.c 	gpa_t gp_from = (gpa_t) kvmppc_get_gpr(vcpu, 8);
vcpu              482 arch/powerpc/kvm/book3s_hv_nested.c 	unsigned long n = kvmppc_get_gpr(vcpu, 9);
vcpu              496 arch/powerpc/kvm/book3s_hv_nested.c 	gp = kvmhv_get_nested(vcpu->kvm, l1_lpid, false);
vcpu              512 arch/powerpc/kvm/book3s_hv_nested.c 		rc = kvm_vcpu_write_guest(vcpu, gp_to, buf, n);
vcpu              517 arch/powerpc/kvm/book3s_hv_nested.c 		rc = kvm_vcpu_read_guest(vcpu, gp_from, buf, n);
vcpu              914 arch/powerpc/kvm/book3s_hv_nested.c static bool kvmhv_invalidate_shadow_pte(struct kvm_vcpu *vcpu,
vcpu              918 arch/powerpc/kvm/book3s_hv_nested.c 	struct kvm *kvm = vcpu->kvm;
vcpu              973 arch/powerpc/kvm/book3s_hv_nested.c static int kvmhv_emulate_tlbie_tlb_addr(struct kvm_vcpu *vcpu, int lpid,
vcpu              976 arch/powerpc/kvm/book3s_hv_nested.c 	struct kvm *kvm = vcpu->kvm;
vcpu              998 arch/powerpc/kvm/book3s_hv_nested.c 		kvmhv_invalidate_shadow_pte(vcpu, gp, addr, &shadow_shift);
vcpu             1009 arch/powerpc/kvm/book3s_hv_nested.c static void kvmhv_emulate_tlbie_lpid(struct kvm_vcpu *vcpu,
vcpu             1012 arch/powerpc/kvm/book3s_hv_nested.c 	struct kvm *kvm = vcpu->kvm;
vcpu             1040 arch/powerpc/kvm/book3s_hv_nested.c static void kvmhv_emulate_tlbie_all_lpid(struct kvm_vcpu *vcpu, int ric)
vcpu             1042 arch/powerpc/kvm/book3s_hv_nested.c 	struct kvm *kvm = vcpu->kvm;
vcpu             1051 arch/powerpc/kvm/book3s_hv_nested.c 			kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
vcpu             1058 arch/powerpc/kvm/book3s_hv_nested.c static int kvmhv_emulate_priv_tlbie(struct kvm_vcpu *vcpu, unsigned int instr,
vcpu             1061 arch/powerpc/kvm/book3s_hv_nested.c 	struct kvm *kvm = vcpu->kvm;
vcpu             1094 arch/powerpc/kvm/book3s_hv_nested.c 		ret = kvmhv_emulate_tlbie_tlb_addr(vcpu, lpid, ap, epn);
vcpu             1100 arch/powerpc/kvm/book3s_hv_nested.c 			kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
vcpu             1106 arch/powerpc/kvm/book3s_hv_nested.c 		kvmhv_emulate_tlbie_all_lpid(vcpu, ric);
vcpu             1121 arch/powerpc/kvm/book3s_hv_nested.c long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu)
vcpu             1125 arch/powerpc/kvm/book3s_hv_nested.c 	ret = kvmhv_emulate_priv_tlbie(vcpu, kvmppc_get_gpr(vcpu, 4),
vcpu             1126 arch/powerpc/kvm/book3s_hv_nested.c 			kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 6));
vcpu             1133 arch/powerpc/kvm/book3s_hv_nested.c static int kvmhv_translate_addr_nested(struct kvm_vcpu *vcpu,
vcpu             1141 arch/powerpc/kvm/book3s_hv_nested.c 	ret = kvmppc_mmu_walk_radix_tree(vcpu, n_gpa, gpte_p, gp->l1_gr_to_hr,
vcpu             1155 arch/powerpc/kvm/book3s_hv_nested.c 			vcpu->arch.fault_gpa = fault_addr;
vcpu             1169 arch/powerpc/kvm/book3s_hv_nested.c 		} else if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) {
vcpu             1187 arch/powerpc/kvm/book3s_hv_nested.c 	vcpu->arch.fault_dsisr = flags;
vcpu             1188 arch/powerpc/kvm/book3s_hv_nested.c 	if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) {
vcpu             1189 arch/powerpc/kvm/book3s_hv_nested.c 		vcpu->arch.shregs.msr &= ~0x783f0000ul;
vcpu             1190 arch/powerpc/kvm/book3s_hv_nested.c 		vcpu->arch.shregs.msr |= flags;
vcpu             1195 arch/powerpc/kvm/book3s_hv_nested.c static long kvmhv_handle_nested_set_rc(struct kvm_vcpu *vcpu,
vcpu             1201 arch/powerpc/kvm/book3s_hv_nested.c 	struct kvm *kvm = vcpu->kvm;
vcpu             1261 arch/powerpc/kvm/book3s_hv_nested.c 					  struct kvm_vcpu *vcpu,
vcpu             1264 arch/powerpc/kvm/book3s_hv_nested.c 	struct kvm *kvm = vcpu->kvm;
vcpu             1270 arch/powerpc/kvm/book3s_hv_nested.c 	unsigned long dsisr = vcpu->arch.fault_dsisr;
vcpu             1271 arch/powerpc/kvm/book3s_hv_nested.c 	unsigned long ea = vcpu->arch.fault_dar;
vcpu             1287 arch/powerpc/kvm/book3s_hv_nested.c 	n_gpa = vcpu->arch.fault_gpa & ~0xF000000000000FFFULL;
vcpu             1290 arch/powerpc/kvm/book3s_hv_nested.c 	ret = kvmhv_translate_addr_nested(vcpu, gp, n_gpa, dsisr, &gpte);
vcpu             1306 arch/powerpc/kvm/book3s_hv_nested.c 		ret = kvmhv_handle_nested_set_rc(vcpu, gp, n_gpa, gpte, dsisr);
vcpu             1339 arch/powerpc/kvm/book3s_hv_nested.c 			kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
vcpu             1344 arch/powerpc/kvm/book3s_hv_nested.c 		return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, writing);
vcpu             1349 arch/powerpc/kvm/book3s_hv_nested.c 			kvmppc_core_queue_data_storage(vcpu, ea,
vcpu             1374 arch/powerpc/kvm/book3s_hv_nested.c 		ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot,
vcpu             1427 arch/powerpc/kvm/book3s_hv_nested.c 	kvmhv_invalidate_shadow_pte(vcpu, gp, n_gpa, NULL);
vcpu             1431 arch/powerpc/kvm/book3s_hv_nested.c long int kvmhv_nested_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu)
vcpu             1433 arch/powerpc/kvm/book3s_hv_nested.c 	struct kvm_nested_guest *gp = vcpu->arch.nested;
vcpu             1437 arch/powerpc/kvm/book3s_hv_nested.c 	ret = __kvmhv_nested_page_fault(run, vcpu, gp);
vcpu               36 arch/powerpc/kvm/book3s_hv_ras.c static void reload_slb(struct kvm_vcpu *vcpu)
vcpu               45 arch/powerpc/kvm/book3s_hv_ras.c 	slb = vcpu->arch.slb_shadow.pinned_addr;
vcpu               51 arch/powerpc/kvm/book3s_hv_ras.c 	if ((void *) &slb->save_area[n] > vcpu->arch.slb_shadow.pinned_end)
vcpu               68 arch/powerpc/kvm/book3s_hv_ras.c static void kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
vcpu               70 arch/powerpc/kvm/book3s_hv_ras.c 	unsigned long srr1 = vcpu->arch.shregs.msr;
vcpu               76 arch/powerpc/kvm/book3s_hv_ras.c 		unsigned long dsisr = vcpu->arch.shregs.dsisr;
vcpu               81 arch/powerpc/kvm/book3s_hv_ras.c 			reload_slb(vcpu);
vcpu               86 arch/powerpc/kvm/book3s_hv_ras.c 			tlbiel_all_lpid(vcpu->kvm->arch.radix);
vcpu              100 arch/powerpc/kvm/book3s_hv_ras.c 		reload_slb(vcpu);
vcpu              103 arch/powerpc/kvm/book3s_hv_ras.c 		tlbiel_all_lpid(vcpu->kvm->arch.radix);
vcpu              122 arch/powerpc/kvm/book3s_hv_ras.c 	vcpu->arch.mce_evt = mce_evt;
vcpu              125 arch/powerpc/kvm/book3s_hv_ras.c void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu)
vcpu              127 arch/powerpc/kvm/book3s_hv_ras.c 	kvmppc_realmode_mc_power7(vcpu);
vcpu              415 arch/powerpc/kvm/book3s_hv_rm_mmu.c long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
vcpu              418 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel,
vcpu              419 arch/powerpc/kvm/book3s_hv_rm_mmu.c 				 vcpu->arch.pgdir, true,
vcpu              420 arch/powerpc/kvm/book3s_hv_rm_mmu.c 				 &vcpu->arch.regs.gpr[4]);
vcpu              560 arch/powerpc/kvm/book3s_hv_rm_mmu.c long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
vcpu              563 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn,
vcpu              564 arch/powerpc/kvm/book3s_hv_rm_mmu.c 				  &vcpu->arch.regs.gpr[4]);
vcpu              567 arch/powerpc/kvm/book3s_hv_rm_mmu.c long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
vcpu              569 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	struct kvm *kvm = vcpu->kvm;
vcpu              570 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	unsigned long *args = &vcpu->arch.regs.gpr[4];
vcpu              685 arch/powerpc/kvm/book3s_hv_rm_mmu.c long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
vcpu              689 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	struct kvm *kvm = vcpu->kvm;
vcpu              756 arch/powerpc/kvm/book3s_hv_rm_mmu.c long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
vcpu              759 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	struct kvm *kvm = vcpu->kvm;
vcpu              790 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		vcpu->arch.regs.gpr[4 + i * 2] = v;
vcpu              791 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		vcpu->arch.regs.gpr[5 + i * 2] = r;
vcpu              796 arch/powerpc/kvm/book3s_hv_rm_mmu.c long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
vcpu              799 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	struct kvm *kvm = vcpu->kvm;
vcpu              837 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	vcpu->arch.regs.gpr[4] = gr;
vcpu              844 arch/powerpc/kvm/book3s_hv_rm_mmu.c long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
vcpu              847 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	struct kvm *kvm = vcpu->kvm;
vcpu              884 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	vcpu->arch.regs.gpr[4] = gr;
vcpu              891 arch/powerpc/kvm/book3s_hv_rm_mmu.c static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long gpa,
vcpu              895 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	struct kvm *kvm = vcpu->kvm;
vcpu              911 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
vcpu              933 arch/powerpc/kvm/book3s_hv_rm_mmu.c static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu,
vcpu              937 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	struct kvm *kvm = vcpu->kvm;
vcpu              946 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	ret = kvmppc_get_hpa(vcpu, dest, 1, &pa, &memslot);
vcpu              967 arch/powerpc/kvm/book3s_hv_rm_mmu.c static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu,
vcpu              972 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	struct kvm *kvm = vcpu->kvm;
vcpu              979 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	ret = kvmppc_get_hpa(vcpu, dest, 1, &dest_pa, &dest_memslot);
vcpu              982 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	ret = kvmppc_get_hpa(vcpu, src, 0, &src_pa, NULL);
vcpu             1003 arch/powerpc/kvm/book3s_hv_rm_mmu.c long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
vcpu             1006 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	struct kvm *kvm = vcpu->kvm;
vcpu             1025 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		ret = kvmppc_do_h_page_init_copy(vcpu, dest, src);
vcpu             1027 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		ret = kvmppc_do_h_page_init_zero(vcpu, dest);
vcpu             1080 arch/powerpc/kvm/book3s_hv_rm_mmu.c static struct mmio_hpte_cache_entry *mmio_cache_search(struct kvm_vcpu *vcpu,
vcpu             1088 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		entry = &vcpu->arch.mmio_cache.entry[i];
vcpu             1100 arch/powerpc/kvm/book3s_hv_rm_mmu.c 			next_mmio_cache_entry(struct kvm_vcpu *vcpu)
vcpu             1102 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	unsigned int index = vcpu->arch.mmio_cache.index;
vcpu             1104 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	vcpu->arch.mmio_cache.index++;
vcpu             1105 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	if (vcpu->arch.mmio_cache.index == MMIO_HPTE_CACHE_SIZE)
vcpu             1106 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		vcpu->arch.mmio_cache.index = 0;
vcpu             1108 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	return &vcpu->arch.mmio_cache.entry[index];
vcpu             1208 arch/powerpc/kvm/book3s_hv_rm_mmu.c long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
vcpu             1211 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	struct kvm *kvm = vcpu->kvm;
vcpu             1226 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		cache_entry = mmio_cache_search(vcpu, addr, slb_v, mmio_update);
vcpu             1259 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
vcpu             1276 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	if (data && (vcpu->arch.shregs.msr & MSR_DR)) {
vcpu             1277 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		unsigned int perm = hpte_get_skey_perm(gr, vcpu->arch.amr);
vcpu             1285 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	vcpu->arch.pgfault_addr = addr;
vcpu             1286 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	vcpu->arch.pgfault_index = index;
vcpu             1287 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	vcpu->arch.pgfault_hpte[0] = v;
vcpu             1288 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	vcpu->arch.pgfault_hpte[1] = r;
vcpu             1289 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	vcpu->arch.pgfault_cache = cache_entry;
vcpu             1302 arch/powerpc/kvm/book3s_hv_rm_mmu.c 			cache_entry = next_mmio_cache_entry(vcpu);
vcpu             1312 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		if (data && (vcpu->arch.shregs.msr & MSR_IR))
vcpu               54 arch/powerpc/kvm/book3s_hv_rm_xics.c static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu)
vcpu               59 arch/powerpc/kvm/book3s_hv_rm_xics.c 	kvmppc_host_rm_ops_hv->rm_core[hcore].rm_data = vcpu;
vcpu               66 arch/powerpc/kvm/book3s_hv_rm_xics.c static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu) { }
vcpu              127 arch/powerpc/kvm/book3s_hv_rm_xics.c static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
vcpu              135 arch/powerpc/kvm/book3s_hv_rm_xics.c 	vcpu->stat.queue_intr++;
vcpu              136 arch/powerpc/kvm/book3s_hv_rm_xics.c 	set_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
vcpu              139 arch/powerpc/kvm/book3s_hv_rm_xics.c 	if (vcpu == this_vcpu) {
vcpu              147 arch/powerpc/kvm/book3s_hv_rm_xics.c 		this_icp->rm_kick_target = vcpu;
vcpu              156 arch/powerpc/kvm/book3s_hv_rm_xics.c 	cpu = vcpu->arch.thread_cpu;
vcpu              162 arch/powerpc/kvm/book3s_hv_rm_xics.c 			icp_send_hcore_msg(hcore, vcpu);
vcpu              165 arch/powerpc/kvm/book3s_hv_rm_xics.c 			this_icp->rm_kick_target = vcpu;
vcpu              174 arch/powerpc/kvm/book3s_hv_rm_xics.c static void icp_rm_clr_vcpu_irq(struct kvm_vcpu *vcpu)
vcpu              177 arch/powerpc/kvm/book3s_hv_rm_xics.c 	clear_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
vcpu              212 arch/powerpc/kvm/book3s_hv_rm_xics.c 		icp_rm_set_vcpu_irq(icp->vcpu, this_vcpu);
vcpu              216 arch/powerpc/kvm/book3s_hv_rm_xics.c 	this_vcpu->arch.icp->rm_dbgtgt = icp->vcpu;
vcpu              490 arch/powerpc/kvm/book3s_hv_rm_xics.c unsigned long xics_rm_h_xirr(struct kvm_vcpu *vcpu)
vcpu              493 arch/powerpc/kvm/book3s_hv_rm_xics.c 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
vcpu              494 arch/powerpc/kvm/book3s_hv_rm_xics.c 	struct kvmppc_icp *icp = vcpu->arch.icp;
vcpu              501 arch/powerpc/kvm/book3s_hv_rm_xics.c 	icp_rm_clr_vcpu_irq(icp->vcpu);
vcpu              523 arch/powerpc/kvm/book3s_hv_rm_xics.c 	vcpu->arch.regs.gpr[4] = xirr;
vcpu              528 arch/powerpc/kvm/book3s_hv_rm_xics.c int xics_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
vcpu              532 arch/powerpc/kvm/book3s_hv_rm_xics.c 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
vcpu              533 arch/powerpc/kvm/book3s_hv_rm_xics.c 	struct kvmppc_icp *icp, *this_icp = vcpu->arch.icp;
vcpu              545 arch/powerpc/kvm/book3s_hv_rm_xics.c 		icp = kvmppc_xics_find_server(vcpu->kvm, server);
vcpu              615 arch/powerpc/kvm/book3s_hv_rm_xics.c int xics_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
vcpu              618 arch/powerpc/kvm/book3s_hv_rm_xics.c 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
vcpu              619 arch/powerpc/kvm/book3s_hv_rm_xics.c 	struct kvmppc_icp *icp = vcpu->arch.icp;
vcpu              649 arch/powerpc/kvm/book3s_hv_rm_xics.c 	icp_rm_clr_vcpu_irq(icp->vcpu);
vcpu              677 arch/powerpc/kvm/book3s_hv_rm_xics.c static int ics_rm_eoi(struct kvm_vcpu *vcpu, u32 irq)
vcpu              679 arch/powerpc/kvm/book3s_hv_rm_xics.c 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
vcpu              680 arch/powerpc/kvm/book3s_hv_rm_xics.c 	struct kvmppc_icp *icp = vcpu->arch.icp;
vcpu              711 arch/powerpc/kvm/book3s_hv_rm_xics.c 	if (!hlist_empty(&vcpu->kvm->irq_ack_notifier_list)) {
vcpu              717 arch/powerpc/kvm/book3s_hv_rm_xics.c 		++vcpu->stat.pthru_all;
vcpu              722 arch/powerpc/kvm/book3s_hv_rm_xics.c 			++vcpu->stat.pthru_host;
vcpu              724 arch/powerpc/kvm/book3s_hv_rm_xics.c 				++vcpu->stat.pthru_bad_aff;
vcpu              735 arch/powerpc/kvm/book3s_hv_rm_xics.c int xics_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
vcpu              737 arch/powerpc/kvm/book3s_hv_rm_xics.c 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
vcpu              738 arch/powerpc/kvm/book3s_hv_rm_xics.c 	struct kvmppc_icp *icp = vcpu->arch.icp;
vcpu              764 arch/powerpc/kvm/book3s_hv_rm_xics.c 	return ics_rm_eoi(vcpu, irq);
vcpu              853 arch/powerpc/kvm/book3s_hv_rm_xics.c long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu,
vcpu              868 arch/powerpc/kvm/book3s_hv_rm_xics.c 	xics = vcpu->kvm->arch.xics;
vcpu              869 arch/powerpc/kvm/book3s_hv_rm_xics.c 	icp = vcpu->arch.icp;
vcpu               14 arch/powerpc/kvm/book3s_hv_tm.c static void emulate_tx_failure(struct kvm_vcpu *vcpu, u64 failure_cause)
vcpu               17 arch/powerpc/kvm/book3s_hv_tm.c 	u64 msr = vcpu->arch.shregs.msr;
vcpu               19 arch/powerpc/kvm/book3s_hv_tm.c 	tfiar = vcpu->arch.regs.nip & ~0x3ull;
vcpu               21 arch/powerpc/kvm/book3s_hv_tm.c 	if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr))
vcpu               27 arch/powerpc/kvm/book3s_hv_tm.c 	vcpu->arch.tfiar = tfiar;
vcpu               29 arch/powerpc/kvm/book3s_hv_tm.c 	vcpu->arch.texasr = (vcpu->arch.texasr & 0x3ffffff) | texasr;
vcpu               40 arch/powerpc/kvm/book3s_hv_tm.c int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
vcpu               42 arch/powerpc/kvm/book3s_hv_tm.c 	u32 instr = vcpu->arch.emul_inst;
vcpu               43 arch/powerpc/kvm/book3s_hv_tm.c 	u64 msr = vcpu->arch.shregs.msr;
vcpu               50 arch/powerpc/kvm/book3s_hv_tm.c 		newmsr = vcpu->arch.shregs.srr1;
vcpu               56 arch/powerpc/kvm/book3s_hv_tm.c 		vcpu->arch.shregs.msr = newmsr;
vcpu               57 arch/powerpc/kvm/book3s_hv_tm.c 		vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
vcpu               58 arch/powerpc/kvm/book3s_hv_tm.c 		vcpu->arch.regs.nip = vcpu->arch.shregs.srr0;
vcpu               62 arch/powerpc/kvm/book3s_hv_tm.c 		if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) {
vcpu               64 arch/powerpc/kvm/book3s_hv_tm.c 			kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
vcpu               68 arch/powerpc/kvm/book3s_hv_tm.c 		if (!(vcpu->arch.hfscr & HFSCR_EBB)) {
vcpu               70 arch/powerpc/kvm/book3s_hv_tm.c 			kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
vcpu               73 arch/powerpc/kvm/book3s_hv_tm.c 		if ((msr & MSR_PR) && !(vcpu->arch.fscr & FSCR_EBB)) {
vcpu               75 arch/powerpc/kvm/book3s_hv_tm.c 			vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
vcpu               77 arch/powerpc/kvm/book3s_hv_tm.c 			kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL);
vcpu               80 arch/powerpc/kvm/book3s_hv_tm.c 		bescr = vcpu->arch.bescr;
vcpu               87 arch/powerpc/kvm/book3s_hv_tm.c 		vcpu->arch.bescr = bescr;
vcpu               89 arch/powerpc/kvm/book3s_hv_tm.c 		vcpu->arch.shregs.msr = msr;
vcpu               90 arch/powerpc/kvm/book3s_hv_tm.c 		vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
vcpu               91 arch/powerpc/kvm/book3s_hv_tm.c 		vcpu->arch.regs.nip = vcpu->arch.ebbrr;
vcpu               97 arch/powerpc/kvm/book3s_hv_tm.c 		newmsr = kvmppc_get_gpr(vcpu, rs);
vcpu              105 arch/powerpc/kvm/book3s_hv_tm.c 		vcpu->arch.shregs.msr = newmsr;
vcpu              110 arch/powerpc/kvm/book3s_hv_tm.c 		if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) {
vcpu              112 arch/powerpc/kvm/book3s_hv_tm.c 			kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
vcpu              116 arch/powerpc/kvm/book3s_hv_tm.c 		if (!(vcpu->arch.hfscr & HFSCR_TM)) {
vcpu              118 arch/powerpc/kvm/book3s_hv_tm.c 			kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
vcpu              123 arch/powerpc/kvm/book3s_hv_tm.c 			vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
vcpu              125 arch/powerpc/kvm/book3s_hv_tm.c 			kvmppc_book3s_queue_irqprio(vcpu,
vcpu              130 arch/powerpc/kvm/book3s_hv_tm.c 		vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
vcpu              140 arch/powerpc/kvm/book3s_hv_tm.c 		vcpu->arch.shregs.msr = msr;
vcpu              145 arch/powerpc/kvm/book3s_hv_tm.c 		if (!(vcpu->arch.hfscr & HFSCR_TM)) {
vcpu              147 arch/powerpc/kvm/book3s_hv_tm.c 			kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
vcpu              152 arch/powerpc/kvm/book3s_hv_tm.c 			vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
vcpu              154 arch/powerpc/kvm/book3s_hv_tm.c 			kvmppc_book3s_queue_irqprio(vcpu,
vcpu              160 arch/powerpc/kvm/book3s_hv_tm.c 			kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
vcpu              164 arch/powerpc/kvm/book3s_hv_tm.c 		if (!(vcpu->arch.orig_texasr & TEXASR_FS)) {
vcpu              167 arch/powerpc/kvm/book3s_hv_tm.c 				ra = kvmppc_get_gpr(vcpu, ra) & 0xff;
vcpu              168 arch/powerpc/kvm/book3s_hv_tm.c 			emulate_tx_failure(vcpu, ra);
vcpu              171 arch/powerpc/kvm/book3s_hv_tm.c 		copy_from_checkpoint(vcpu);
vcpu              174 arch/powerpc/kvm/book3s_hv_tm.c 		vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
vcpu              176 arch/powerpc/kvm/book3s_hv_tm.c 		vcpu->arch.shregs.msr &= ~MSR_TS_MASK;
vcpu              182 arch/powerpc/kvm/book3s_hv_tm.c 		if (!(vcpu->arch.hfscr & HFSCR_TM)) {
vcpu              184 arch/powerpc/kvm/book3s_hv_tm.c 			kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
vcpu              189 arch/powerpc/kvm/book3s_hv_tm.c 			vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
vcpu              191 arch/powerpc/kvm/book3s_hv_tm.c 			kvmppc_book3s_queue_irqprio(vcpu,
vcpu              196 arch/powerpc/kvm/book3s_hv_tm.c 		if (MSR_TM_ACTIVE(msr) || !(vcpu->arch.texasr & TEXASR_FS)) {
vcpu              197 arch/powerpc/kvm/book3s_hv_tm.c 			kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
vcpu              201 arch/powerpc/kvm/book3s_hv_tm.c 		copy_to_checkpoint(vcpu);
vcpu              204 arch/powerpc/kvm/book3s_hv_tm.c 		vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
vcpu              206 arch/powerpc/kvm/book3s_hv_tm.c 		vcpu->arch.shregs.msr = msr | MSR_TS_S;
vcpu               20 arch/powerpc/kvm/book3s_hv_tm_builtin.c int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
vcpu               22 arch/powerpc/kvm/book3s_hv_tm_builtin.c 	u32 instr = vcpu->arch.emul_inst;
vcpu               29 arch/powerpc/kvm/book3s_hv_tm_builtin.c 		newmsr = vcpu->arch.shregs.srr1;
vcpu               34 arch/powerpc/kvm/book3s_hv_tm_builtin.c 		vcpu->arch.shregs.msr = newmsr;
vcpu               35 arch/powerpc/kvm/book3s_hv_tm_builtin.c 		vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
vcpu               36 arch/powerpc/kvm/book3s_hv_tm_builtin.c 		vcpu->arch.regs.nip = vcpu->arch.shregs.srr0;
vcpu               41 arch/powerpc/kvm/book3s_hv_tm_builtin.c 		msr = vcpu->arch.shregs.msr;
vcpu               42 arch/powerpc/kvm/book3s_hv_tm_builtin.c 		if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206))
vcpu               45 arch/powerpc/kvm/book3s_hv_tm_builtin.c 		if (!(vcpu->arch.hfscr & HFSCR_EBB) ||
vcpu               57 arch/powerpc/kvm/book3s_hv_tm_builtin.c 		vcpu->arch.shregs.msr = msr;
vcpu               58 arch/powerpc/kvm/book3s_hv_tm_builtin.c 		vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
vcpu               59 arch/powerpc/kvm/book3s_hv_tm_builtin.c 		vcpu->arch.regs.nip = mfspr(SPRN_EBBRR);
vcpu               65 arch/powerpc/kvm/book3s_hv_tm_builtin.c 		newmsr = kvmppc_get_gpr(vcpu, rs);
vcpu               66 arch/powerpc/kvm/book3s_hv_tm_builtin.c 		msr = vcpu->arch.shregs.msr;
vcpu               73 arch/powerpc/kvm/book3s_hv_tm_builtin.c 		vcpu->arch.shregs.msr = newmsr;
vcpu               78 arch/powerpc/kvm/book3s_hv_tm_builtin.c 		msr = vcpu->arch.shregs.msr;
vcpu               80 arch/powerpc/kvm/book3s_hv_tm_builtin.c 		if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206))
vcpu               83 arch/powerpc/kvm/book3s_hv_tm_builtin.c 		if (!(vcpu->arch.hfscr & HFSCR_TM) || !(msr & MSR_TM))
vcpu               87 arch/powerpc/kvm/book3s_hv_tm_builtin.c 			vcpu->arch.shregs.msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
vcpu               89 arch/powerpc/kvm/book3s_hv_tm_builtin.c 		vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
vcpu              101 arch/powerpc/kvm/book3s_hv_tm_builtin.c void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu)
vcpu              103 arch/powerpc/kvm/book3s_hv_tm_builtin.c 	vcpu->arch.shregs.msr &= ~MSR_TS_MASK;	/* go to N state */
vcpu              104 arch/powerpc/kvm/book3s_hv_tm_builtin.c 	vcpu->arch.regs.nip = vcpu->arch.tfhar;
vcpu              105 arch/powerpc/kvm/book3s_hv_tm_builtin.c 	copy_from_checkpoint(vcpu);
vcpu              106 arch/powerpc/kvm/book3s_hv_tm_builtin.c 	vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | 0xa0000000;
vcpu               56 arch/powerpc/kvm/book3s_mmu_hpte.c void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
vcpu               59 arch/powerpc/kvm/book3s_mmu_hpte.c 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
vcpu              101 arch/powerpc/kvm/book3s_mmu_hpte.c static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
vcpu              103 arch/powerpc/kvm/book3s_mmu_hpte.c 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
vcpu              108 arch/powerpc/kvm/book3s_mmu_hpte.c 	kvmppc_mmu_invalidate_pte(vcpu, pte);
vcpu              132 arch/powerpc/kvm/book3s_mmu_hpte.c static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
vcpu              134 arch/powerpc/kvm/book3s_mmu_hpte.c 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
vcpu              144 arch/powerpc/kvm/book3s_mmu_hpte.c 			invalidate_pte(vcpu, pte);
vcpu              150 arch/powerpc/kvm/book3s_mmu_hpte.c static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
vcpu              152 arch/powerpc/kvm/book3s_mmu_hpte.c 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
vcpu              164 arch/powerpc/kvm/book3s_mmu_hpte.c 			invalidate_pte(vcpu, pte);
vcpu              169 arch/powerpc/kvm/book3s_mmu_hpte.c static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
vcpu              171 arch/powerpc/kvm/book3s_mmu_hpte.c 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
vcpu              184 arch/powerpc/kvm/book3s_mmu_hpte.c 			invalidate_pte(vcpu, pte);
vcpu              189 arch/powerpc/kvm/book3s_mmu_hpte.c void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
vcpu              191 arch/powerpc/kvm/book3s_mmu_hpte.c 	trace_kvm_book3s_mmu_flush("", vcpu, guest_ea, ea_mask);
vcpu              196 arch/powerpc/kvm/book3s_mmu_hpte.c 		kvmppc_mmu_pte_flush_page(vcpu, guest_ea);
vcpu              199 arch/powerpc/kvm/book3s_mmu_hpte.c 		kvmppc_mmu_pte_flush_long(vcpu, guest_ea);
vcpu              203 arch/powerpc/kvm/book3s_mmu_hpte.c 		kvmppc_mmu_pte_flush_all(vcpu);
vcpu              212 arch/powerpc/kvm/book3s_mmu_hpte.c static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
vcpu              214 arch/powerpc/kvm/book3s_mmu_hpte.c 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
vcpu              226 arch/powerpc/kvm/book3s_mmu_hpte.c 			invalidate_pte(vcpu, pte);
vcpu              233 arch/powerpc/kvm/book3s_mmu_hpte.c static void kvmppc_mmu_pte_vflush_64k(struct kvm_vcpu *vcpu, u64 guest_vp)
vcpu              235 arch/powerpc/kvm/book3s_mmu_hpte.c 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
vcpu              248 arch/powerpc/kvm/book3s_mmu_hpte.c 			invalidate_pte(vcpu, pte);
vcpu              255 arch/powerpc/kvm/book3s_mmu_hpte.c static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
vcpu              257 arch/powerpc/kvm/book3s_mmu_hpte.c 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
vcpu              270 arch/powerpc/kvm/book3s_mmu_hpte.c 			invalidate_pte(vcpu, pte);
vcpu              275 arch/powerpc/kvm/book3s_mmu_hpte.c void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
vcpu              277 arch/powerpc/kvm/book3s_mmu_hpte.c 	trace_kvm_book3s_mmu_flush("v", vcpu, guest_vp, vp_mask);
vcpu              282 arch/powerpc/kvm/book3s_mmu_hpte.c 		kvmppc_mmu_pte_vflush_short(vcpu, guest_vp);
vcpu              286 arch/powerpc/kvm/book3s_mmu_hpte.c 		kvmppc_mmu_pte_vflush_64k(vcpu, guest_vp);
vcpu              290 arch/powerpc/kvm/book3s_mmu_hpte.c 		kvmppc_mmu_pte_vflush_long(vcpu, guest_vp);
vcpu              298 arch/powerpc/kvm/book3s_mmu_hpte.c void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
vcpu              300 arch/powerpc/kvm/book3s_mmu_hpte.c 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
vcpu              304 arch/powerpc/kvm/book3s_mmu_hpte.c 	trace_kvm_book3s_mmu_flush("p", vcpu, pa_start, pa_end);
vcpu              314 arch/powerpc/kvm/book3s_mmu_hpte.c 				invalidate_pte(vcpu, pte);
vcpu              320 arch/powerpc/kvm/book3s_mmu_hpte.c struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
vcpu              322 arch/powerpc/kvm/book3s_mmu_hpte.c 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
vcpu              326 arch/powerpc/kvm/book3s_mmu_hpte.c 		kvmppc_mmu_pte_flush_all(vcpu);
vcpu              338 arch/powerpc/kvm/book3s_mmu_hpte.c void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu)
vcpu              340 arch/powerpc/kvm/book3s_mmu_hpte.c 	kvmppc_mmu_pte_flush(vcpu, 0, 0);
vcpu              351 arch/powerpc/kvm/book3s_mmu_hpte.c int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu)
vcpu              353 arch/powerpc/kvm/book3s_mmu_hpte.c 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
vcpu              150 arch/powerpc/kvm/book3s_paired_singles.c static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt)
vcpu              152 arch/powerpc/kvm/book3s_paired_singles.c 	kvm_cvt_df(&VCPU_FPR(vcpu, rt), &vcpu->arch.qpr[rt]);
vcpu              155 arch/powerpc/kvm/book3s_paired_singles.c static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store)
vcpu              158 arch/powerpc/kvm/book3s_paired_singles.c 	u64 msr = kvmppc_get_msr(vcpu);
vcpu              162 arch/powerpc/kvm/book3s_paired_singles.c 	kvmppc_set_msr(vcpu, msr);
vcpu              163 arch/powerpc/kvm/book3s_paired_singles.c 	kvmppc_set_dar(vcpu, eaddr);
vcpu              168 arch/powerpc/kvm/book3s_paired_singles.c 	kvmppc_set_dsisr(vcpu, dsisr);
vcpu              169 arch/powerpc/kvm/book3s_paired_singles.c 	kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE);
vcpu              172 arch/powerpc/kvm/book3s_paired_singles.c static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu              184 arch/powerpc/kvm/book3s_paired_singles.c 	r = kvmppc_ld(vcpu, &addr, len, tmp, true);
vcpu              185 arch/powerpc/kvm/book3s_paired_singles.c 	vcpu->arch.paddr_accessed = addr;
vcpu              188 arch/powerpc/kvm/book3s_paired_singles.c 		kvmppc_inject_pf(vcpu, addr, false);
vcpu              191 arch/powerpc/kvm/book3s_paired_singles.c 		emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FPR | rs,
vcpu              201 arch/powerpc/kvm/book3s_paired_singles.c 		kvm_cvt_fd((u32*)tmp, &VCPU_FPR(vcpu, rs));
vcpu              202 arch/powerpc/kvm/book3s_paired_singles.c 		vcpu->arch.qpr[rs] = *((u32*)tmp);
vcpu              205 arch/powerpc/kvm/book3s_paired_singles.c 		VCPU_FPR(vcpu, rs) = *((u64*)tmp);
vcpu              216 arch/powerpc/kvm/book3s_paired_singles.c static int kvmppc_emulate_fpr_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu              227 arch/powerpc/kvm/book3s_paired_singles.c 		kvm_cvt_df(&VCPU_FPR(vcpu, rs), (u32*)tmp);
vcpu              232 arch/powerpc/kvm/book3s_paired_singles.c 		*((u32*)tmp) = VCPU_FPR(vcpu, rs);
vcpu              233 arch/powerpc/kvm/book3s_paired_singles.c 		val = VCPU_FPR(vcpu, rs) & 0xffffffff;
vcpu              237 arch/powerpc/kvm/book3s_paired_singles.c 		*((u64*)tmp) = VCPU_FPR(vcpu, rs);
vcpu              238 arch/powerpc/kvm/book3s_paired_singles.c 		val = VCPU_FPR(vcpu, rs);
vcpu              246 arch/powerpc/kvm/book3s_paired_singles.c 	r = kvmppc_st(vcpu, &addr, len, tmp, true);
vcpu              247 arch/powerpc/kvm/book3s_paired_singles.c 	vcpu->arch.paddr_accessed = addr;
vcpu              249 arch/powerpc/kvm/book3s_paired_singles.c 		kvmppc_inject_pf(vcpu, addr, true);
vcpu              251 arch/powerpc/kvm/book3s_paired_singles.c 		emulated = kvmppc_handle_store(run, vcpu, val, len, 1);
vcpu              262 arch/powerpc/kvm/book3s_paired_singles.c static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu              272 arch/powerpc/kvm/book3s_paired_singles.c 		r = kvmppc_ld(vcpu, &addr, sizeof(u32), tmp, true);
vcpu              275 arch/powerpc/kvm/book3s_paired_singles.c 		r = kvmppc_ld(vcpu, &addr, sizeof(u32) * 2, tmp, true);
vcpu              277 arch/powerpc/kvm/book3s_paired_singles.c 	vcpu->arch.paddr_accessed = addr;
vcpu              279 arch/powerpc/kvm/book3s_paired_singles.c 		kvmppc_inject_pf(vcpu, addr, false);
vcpu              282 arch/powerpc/kvm/book3s_paired_singles.c 		emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FPR | rs,
vcpu              284 arch/powerpc/kvm/book3s_paired_singles.c 		vcpu->arch.qpr[rs] = tmp[1];
vcpu              287 arch/powerpc/kvm/book3s_paired_singles.c 		emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FQPR | rs,
vcpu              295 arch/powerpc/kvm/book3s_paired_singles.c 	kvm_cvt_fd(&tmp[0], &VCPU_FPR(vcpu, rs));
vcpu              296 arch/powerpc/kvm/book3s_paired_singles.c 	vcpu->arch.qpr[rs] = tmp[1];
vcpu              305 arch/powerpc/kvm/book3s_paired_singles.c static int kvmppc_emulate_psq_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu              313 arch/powerpc/kvm/book3s_paired_singles.c 	kvm_cvt_df(&VCPU_FPR(vcpu, rs), &tmp[0]);
vcpu              314 arch/powerpc/kvm/book3s_paired_singles.c 	tmp[1] = vcpu->arch.qpr[rs];
vcpu              316 arch/powerpc/kvm/book3s_paired_singles.c 	r = kvmppc_st(vcpu, &addr, len, tmp, true);
vcpu              317 arch/powerpc/kvm/book3s_paired_singles.c 	vcpu->arch.paddr_accessed = addr;
vcpu              319 arch/powerpc/kvm/book3s_paired_singles.c 		kvmppc_inject_pf(vcpu, addr, true);
vcpu              321 arch/powerpc/kvm/book3s_paired_singles.c 		emulated = kvmppc_handle_store(run, vcpu, tmp[0], 4, 1);
vcpu              324 arch/powerpc/kvm/book3s_paired_singles.c 		emulated = kvmppc_handle_store(run, vcpu, val, 8, 1);
vcpu              344 arch/powerpc/kvm/book3s_paired_singles.c static bool kvmppc_inst_is_paired_single(struct kvm_vcpu *vcpu, u32 inst)
vcpu              346 arch/powerpc/kvm/book3s_paired_singles.c 	if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
vcpu              490 arch/powerpc/kvm/book3s_paired_singles.c static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc,
vcpu              497 arch/powerpc/kvm/book3s_paired_singles.c 	u32 *qpr = vcpu->arch.qpr;
vcpu              506 arch/powerpc/kvm/book3s_paired_singles.c 	kvm_cvt_df(&VCPU_FPR(vcpu, reg_in1), &ps0_in1);
vcpu              507 arch/powerpc/kvm/book3s_paired_singles.c 	kvm_cvt_df(&VCPU_FPR(vcpu, reg_in2), &ps0_in2);
vcpu              508 arch/powerpc/kvm/book3s_paired_singles.c 	kvm_cvt_df(&VCPU_FPR(vcpu, reg_in3), &ps0_in3);
vcpu              513 arch/powerpc/kvm/book3s_paired_singles.c 	func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in1, &ps0_in2, &ps0_in3);
vcpu              519 arch/powerpc/kvm/book3s_paired_singles.c 		kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out));
vcpu              530 arch/powerpc/kvm/book3s_paired_singles.c 		func(&vcpu->arch.fp.fpscr, &qpr[reg_out], &ps1_in1, &ps1_in2, &ps1_in3);
vcpu              538 arch/powerpc/kvm/book3s_paired_singles.c static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc,
vcpu              545 arch/powerpc/kvm/book3s_paired_singles.c 	u32 *qpr = vcpu->arch.qpr;
vcpu              555 arch/powerpc/kvm/book3s_paired_singles.c 	kvm_cvt_df(&VCPU_FPR(vcpu, reg_in1), &ps0_in1);
vcpu              560 arch/powerpc/kvm/book3s_paired_singles.c 		kvm_cvt_df(&VCPU_FPR(vcpu, reg_in2), &ps0_in2);
vcpu              562 arch/powerpc/kvm/book3s_paired_singles.c 	func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in1, &ps0_in2);
vcpu              568 arch/powerpc/kvm/book3s_paired_singles.c 		kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out));
vcpu              578 arch/powerpc/kvm/book3s_paired_singles.c 	func(&vcpu->arch.fp.fpscr, &ps1_out, &ps1_in1, &ps1_in2);
vcpu              590 arch/powerpc/kvm/book3s_paired_singles.c static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc,
vcpu              595 arch/powerpc/kvm/book3s_paired_singles.c 	u32 *qpr = vcpu->arch.qpr;
vcpu              603 arch/powerpc/kvm/book3s_paired_singles.c 	kvm_cvt_df(&VCPU_FPR(vcpu, reg_in), &ps0_in);
vcpu              604 arch/powerpc/kvm/book3s_paired_singles.c 	func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in);
vcpu              609 arch/powerpc/kvm/book3s_paired_singles.c 	kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out));
vcpu              613 arch/powerpc/kvm/book3s_paired_singles.c 	func(&vcpu->arch.fp.fpscr, &qpr[reg_out], &ps1_in);
vcpu              621 arch/powerpc/kvm/book3s_paired_singles.c int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
vcpu              635 arch/powerpc/kvm/book3s_paired_singles.c 	emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
vcpu              645 arch/powerpc/kvm/book3s_paired_singles.c 	fpr_d = &VCPU_FPR(vcpu, ax_rd);
vcpu              646 arch/powerpc/kvm/book3s_paired_singles.c 	fpr_a = &VCPU_FPR(vcpu, ax_ra);
vcpu              647 arch/powerpc/kvm/book3s_paired_singles.c 	fpr_b = &VCPU_FPR(vcpu, ax_rb);
vcpu              648 arch/powerpc/kvm/book3s_paired_singles.c 	fpr_c = &VCPU_FPR(vcpu, ax_rc);
vcpu              651 arch/powerpc/kvm/book3s_paired_singles.c 	cr = kvmppc_get_cr(vcpu);
vcpu              653 arch/powerpc/kvm/book3s_paired_singles.c 	if (!kvmppc_inst_is_paired_single(vcpu, inst))
vcpu              656 arch/powerpc/kvm/book3s_paired_singles.c 	if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
vcpu              657 arch/powerpc/kvm/book3s_paired_singles.c 		kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL);
vcpu              661 arch/powerpc/kvm/book3s_paired_singles.c 	kvmppc_giveup_ext(vcpu, MSR_FP);
vcpu              667 arch/powerpc/kvm/book3s_paired_singles.c 	for (i = 0; i < ARRAY_SIZE(vcpu->arch.fp.fpr); i++) {
vcpu              669 arch/powerpc/kvm/book3s_paired_singles.c 		kvm_cvt_df(&VCPU_FPR(vcpu, i), &f);
vcpu              671 arch/powerpc/kvm/book3s_paired_singles.c 			i, f, VCPU_FPR(vcpu, i), i, vcpu->arch.qpr[i]);
vcpu              678 arch/powerpc/kvm/book3s_paired_singles.c 		ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
vcpu              683 arch/powerpc/kvm/book3s_paired_singles.c 		emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
vcpu              688 arch/powerpc/kvm/book3s_paired_singles.c 		ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
vcpu              693 arch/powerpc/kvm/book3s_paired_singles.c 		emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
vcpu              696 arch/powerpc/kvm/book3s_paired_singles.c 			kvmppc_set_gpr(vcpu, ax_ra, addr);
vcpu              701 arch/powerpc/kvm/book3s_paired_singles.c 		ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
vcpu              706 arch/powerpc/kvm/book3s_paired_singles.c 		emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
vcpu              711 arch/powerpc/kvm/book3s_paired_singles.c 		ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
vcpu              716 arch/powerpc/kvm/book3s_paired_singles.c 		emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
vcpu              719 arch/powerpc/kvm/book3s_paired_singles.c 			kvmppc_set_gpr(vcpu, ax_ra, addr);
vcpu              731 arch/powerpc/kvm/book3s_paired_singles.c 			ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
vcpu              735 arch/powerpc/kvm/book3s_paired_singles.c 			addr += kvmppc_get_gpr(vcpu, ax_rb);
vcpu              736 arch/powerpc/kvm/book3s_paired_singles.c 			emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
vcpu              745 arch/powerpc/kvm/book3s_paired_singles.c 			ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
vcpu              749 arch/powerpc/kvm/book3s_paired_singles.c 			addr += kvmppc_get_gpr(vcpu, ax_rb);
vcpu              750 arch/powerpc/kvm/book3s_paired_singles.c 			emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
vcpu              753 arch/powerpc/kvm/book3s_paired_singles.c 				kvmppc_set_gpr(vcpu, ax_ra, addr);
vcpu              757 arch/powerpc/kvm/book3s_paired_singles.c 			VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb);
vcpu              758 arch/powerpc/kvm/book3s_paired_singles.c 			VCPU_FPR(vcpu, ax_rd) ^= 0x8000000000000000ULL;
vcpu              759 arch/powerpc/kvm/book3s_paired_singles.c 			vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
vcpu              760 arch/powerpc/kvm/book3s_paired_singles.c 			vcpu->arch.qpr[ax_rd] ^= 0x80000000;
vcpu              768 arch/powerpc/kvm/book3s_paired_singles.c 			VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb);
vcpu              769 arch/powerpc/kvm/book3s_paired_singles.c 			vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
vcpu              777 arch/powerpc/kvm/book3s_paired_singles.c 			VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb);
vcpu              778 arch/powerpc/kvm/book3s_paired_singles.c 			VCPU_FPR(vcpu, ax_rd) |= 0x8000000000000000ULL;
vcpu              779 arch/powerpc/kvm/book3s_paired_singles.c 			vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
vcpu              780 arch/powerpc/kvm/book3s_paired_singles.c 			vcpu->arch.qpr[ax_rd] |= 0x80000000;
vcpu              784 arch/powerpc/kvm/book3s_paired_singles.c 			VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb);
vcpu              785 arch/powerpc/kvm/book3s_paired_singles.c 			VCPU_FPR(vcpu, ax_rd) &= ~0x8000000000000000ULL;
vcpu              786 arch/powerpc/kvm/book3s_paired_singles.c 			vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
vcpu              787 arch/powerpc/kvm/book3s_paired_singles.c 			vcpu->arch.qpr[ax_rd] &= ~0x80000000;
vcpu              791 arch/powerpc/kvm/book3s_paired_singles.c 			VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_ra);
vcpu              793 arch/powerpc/kvm/book3s_paired_singles.c 			kvm_cvt_df(&VCPU_FPR(vcpu, ax_rb),
vcpu              794 arch/powerpc/kvm/book3s_paired_singles.c 				   &vcpu->arch.qpr[ax_rd]);
vcpu              798 arch/powerpc/kvm/book3s_paired_singles.c 			VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_ra);
vcpu              799 arch/powerpc/kvm/book3s_paired_singles.c 			vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
vcpu              804 arch/powerpc/kvm/book3s_paired_singles.c 			kvm_cvt_fd(&vcpu->arch.qpr[ax_ra],
vcpu              805 arch/powerpc/kvm/book3s_paired_singles.c 				   &VCPU_FPR(vcpu, ax_rd));
vcpu              807 arch/powerpc/kvm/book3s_paired_singles.c 			kvm_cvt_df(&VCPU_FPR(vcpu, ax_rb),
vcpu              808 arch/powerpc/kvm/book3s_paired_singles.c 				   &vcpu->arch.qpr[ax_rd]);
vcpu              813 arch/powerpc/kvm/book3s_paired_singles.c 			kvm_cvt_fd(&vcpu->arch.qpr[ax_ra],
vcpu              814 arch/powerpc/kvm/book3s_paired_singles.c 				   &VCPU_FPR(vcpu, ax_rd));
vcpu              815 arch/powerpc/kvm/book3s_paired_singles.c 			vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
vcpu              822 arch/powerpc/kvm/book3s_paired_singles.c 			ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
vcpu              826 arch/powerpc/kvm/book3s_paired_singles.c 			addr += kvmppc_get_gpr(vcpu, ax_rb);
vcpu              827 arch/powerpc/kvm/book3s_paired_singles.c 			emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
vcpu              832 arch/powerpc/kvm/book3s_paired_singles.c 			ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
vcpu              836 arch/powerpc/kvm/book3s_paired_singles.c 			addr += kvmppc_get_gpr(vcpu, ax_rb);
vcpu              837 arch/powerpc/kvm/book3s_paired_singles.c 			emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
vcpu              840 arch/powerpc/kvm/book3s_paired_singles.c 				kvmppc_set_gpr(vcpu, ax_ra, addr);
vcpu              847 arch/powerpc/kvm/book3s_paired_singles.c 			emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
vcpu              849 arch/powerpc/kvm/book3s_paired_singles.c 			VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rc);
vcpu              852 arch/powerpc/kvm/book3s_paired_singles.c 			emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
vcpu              854 arch/powerpc/kvm/book3s_paired_singles.c 			vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rc];
vcpu              857 arch/powerpc/kvm/book3s_paired_singles.c 			emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
vcpu              861 arch/powerpc/kvm/book3s_paired_singles.c 			emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
vcpu              865 arch/powerpc/kvm/book3s_paired_singles.c 			emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
vcpu              869 arch/powerpc/kvm/book3s_paired_singles.c 			emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
vcpu              873 arch/powerpc/kvm/book3s_paired_singles.c 			emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
vcpu              877 arch/powerpc/kvm/book3s_paired_singles.c 			emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
vcpu              881 arch/powerpc/kvm/book3s_paired_singles.c 			emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
vcpu              885 arch/powerpc/kvm/book3s_paired_singles.c 			emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
vcpu              889 arch/powerpc/kvm/book3s_paired_singles.c 			emulated = kvmppc_ps_one_in(vcpu, rcomp, ax_rd,
vcpu              893 arch/powerpc/kvm/book3s_paired_singles.c 			emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
vcpu              897 arch/powerpc/kvm/book3s_paired_singles.c 			emulated = kvmppc_ps_one_in(vcpu, rcomp, ax_rd,
vcpu              901 arch/powerpc/kvm/book3s_paired_singles.c 			emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
vcpu              905 arch/powerpc/kvm/book3s_paired_singles.c 			emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
vcpu              909 arch/powerpc/kvm/book3s_paired_singles.c 			emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
vcpu              913 arch/powerpc/kvm/book3s_paired_singles.c 			emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
vcpu              923 arch/powerpc/kvm/book3s_paired_singles.c 		ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
vcpu              925 arch/powerpc/kvm/book3s_paired_singles.c 		emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
vcpu              931 arch/powerpc/kvm/book3s_paired_singles.c 		ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
vcpu              933 arch/powerpc/kvm/book3s_paired_singles.c 		emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
vcpu              937 arch/powerpc/kvm/book3s_paired_singles.c 			kvmppc_set_gpr(vcpu, ax_ra, addr);
vcpu              942 arch/powerpc/kvm/book3s_paired_singles.c 		ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
vcpu              944 arch/powerpc/kvm/book3s_paired_singles.c 		emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
vcpu              950 arch/powerpc/kvm/book3s_paired_singles.c 		ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
vcpu              952 arch/powerpc/kvm/book3s_paired_singles.c 		emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
vcpu              956 arch/powerpc/kvm/book3s_paired_singles.c 			kvmppc_set_gpr(vcpu, ax_ra, addr);
vcpu              961 arch/powerpc/kvm/book3s_paired_singles.c 		ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
vcpu              963 arch/powerpc/kvm/book3s_paired_singles.c 		emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
vcpu              969 arch/powerpc/kvm/book3s_paired_singles.c 		ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
vcpu              971 arch/powerpc/kvm/book3s_paired_singles.c 		emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
vcpu              975 arch/powerpc/kvm/book3s_paired_singles.c 			kvmppc_set_gpr(vcpu, ax_ra, addr);
vcpu              980 arch/powerpc/kvm/book3s_paired_singles.c 		ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
vcpu              982 arch/powerpc/kvm/book3s_paired_singles.c 		emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
vcpu              988 arch/powerpc/kvm/book3s_paired_singles.c 		ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
vcpu              990 arch/powerpc/kvm/book3s_paired_singles.c 		emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
vcpu              994 arch/powerpc/kvm/book3s_paired_singles.c 			kvmppc_set_gpr(vcpu, ax_ra, addr);
vcpu             1001 arch/powerpc/kvm/book3s_paired_singles.c 			ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
vcpu             1003 arch/powerpc/kvm/book3s_paired_singles.c 			addr += kvmppc_get_gpr(vcpu, ax_rb);
vcpu             1004 arch/powerpc/kvm/book3s_paired_singles.c 			emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
vcpu             1010 arch/powerpc/kvm/book3s_paired_singles.c 			ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
vcpu             1011 arch/powerpc/kvm/book3s_paired_singles.c 				     kvmppc_get_gpr(vcpu, ax_rb);
vcpu             1013 arch/powerpc/kvm/book3s_paired_singles.c 			emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
vcpu             1017 arch/powerpc/kvm/book3s_paired_singles.c 				kvmppc_set_gpr(vcpu, ax_ra, addr);
vcpu             1022 arch/powerpc/kvm/book3s_paired_singles.c 			ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
vcpu             1023 arch/powerpc/kvm/book3s_paired_singles.c 				     kvmppc_get_gpr(vcpu, ax_rb);
vcpu             1025 arch/powerpc/kvm/book3s_paired_singles.c 			emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
vcpu             1031 arch/powerpc/kvm/book3s_paired_singles.c 			ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
vcpu             1032 arch/powerpc/kvm/book3s_paired_singles.c 				     kvmppc_get_gpr(vcpu, ax_rb);
vcpu             1034 arch/powerpc/kvm/book3s_paired_singles.c 			emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
vcpu             1038 arch/powerpc/kvm/book3s_paired_singles.c 				kvmppc_set_gpr(vcpu, ax_ra, addr);
vcpu             1043 arch/powerpc/kvm/book3s_paired_singles.c 			ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
vcpu             1044 arch/powerpc/kvm/book3s_paired_singles.c 				     kvmppc_get_gpr(vcpu, ax_rb);
vcpu             1046 arch/powerpc/kvm/book3s_paired_singles.c 			emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
vcpu             1052 arch/powerpc/kvm/book3s_paired_singles.c 			ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
vcpu             1053 arch/powerpc/kvm/book3s_paired_singles.c 				     kvmppc_get_gpr(vcpu, ax_rb);
vcpu             1055 arch/powerpc/kvm/book3s_paired_singles.c 			emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
vcpu             1059 arch/powerpc/kvm/book3s_paired_singles.c 				kvmppc_set_gpr(vcpu, ax_ra, addr);
vcpu             1064 arch/powerpc/kvm/book3s_paired_singles.c 			ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
vcpu             1065 arch/powerpc/kvm/book3s_paired_singles.c 				     kvmppc_get_gpr(vcpu, ax_rb);
vcpu             1067 arch/powerpc/kvm/book3s_paired_singles.c 			emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
vcpu             1073 arch/powerpc/kvm/book3s_paired_singles.c 			ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
vcpu             1074 arch/powerpc/kvm/book3s_paired_singles.c 				     kvmppc_get_gpr(vcpu, ax_rb);
vcpu             1076 arch/powerpc/kvm/book3s_paired_singles.c 			emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
vcpu             1080 arch/powerpc/kvm/book3s_paired_singles.c 				kvmppc_set_gpr(vcpu, ax_ra, addr);
vcpu             1085 arch/powerpc/kvm/book3s_paired_singles.c 			ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
vcpu             1086 arch/powerpc/kvm/book3s_paired_singles.c 				     kvmppc_get_gpr(vcpu, ax_rb);
vcpu             1088 arch/powerpc/kvm/book3s_paired_singles.c 			emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
vcpu             1099 arch/powerpc/kvm/book3s_paired_singles.c 			fpd_fadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
vcpu             1100 arch/powerpc/kvm/book3s_paired_singles.c 			kvmppc_sync_qpr(vcpu, ax_rd);
vcpu             1103 arch/powerpc/kvm/book3s_paired_singles.c 			fpd_fsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
vcpu             1104 arch/powerpc/kvm/book3s_paired_singles.c 			kvmppc_sync_qpr(vcpu, ax_rd);
vcpu             1107 arch/powerpc/kvm/book3s_paired_singles.c 			fpd_fdivs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
vcpu             1108 arch/powerpc/kvm/book3s_paired_singles.c 			kvmppc_sync_qpr(vcpu, ax_rd);
vcpu             1111 arch/powerpc/kvm/book3s_paired_singles.c 			fpd_fres(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
vcpu             1112 arch/powerpc/kvm/book3s_paired_singles.c 			kvmppc_sync_qpr(vcpu, ax_rd);
vcpu             1115 arch/powerpc/kvm/book3s_paired_singles.c 			fpd_frsqrtes(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
vcpu             1116 arch/powerpc/kvm/book3s_paired_singles.c 			kvmppc_sync_qpr(vcpu, ax_rd);
vcpu             1121 arch/powerpc/kvm/book3s_paired_singles.c 			fpd_fmuls(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c);
vcpu             1122 arch/powerpc/kvm/book3s_paired_singles.c 			kvmppc_sync_qpr(vcpu, ax_rd);
vcpu             1125 arch/powerpc/kvm/book3s_paired_singles.c 			fpd_fmsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
vcpu             1126 arch/powerpc/kvm/book3s_paired_singles.c 			kvmppc_sync_qpr(vcpu, ax_rd);
vcpu             1129 arch/powerpc/kvm/book3s_paired_singles.c 			fpd_fmadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
vcpu             1130 arch/powerpc/kvm/book3s_paired_singles.c 			kvmppc_sync_qpr(vcpu, ax_rd);
vcpu             1133 arch/powerpc/kvm/book3s_paired_singles.c 			fpd_fnmsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
vcpu             1134 arch/powerpc/kvm/book3s_paired_singles.c 			kvmppc_sync_qpr(vcpu, ax_rd);
vcpu             1137 arch/powerpc/kvm/book3s_paired_singles.c 			fpd_fnmadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
vcpu             1138 arch/powerpc/kvm/book3s_paired_singles.c 			kvmppc_sync_qpr(vcpu, ax_rd);
vcpu             1152 arch/powerpc/kvm/book3s_paired_singles.c 			*fpr_d = vcpu->arch.fp.fpscr;
vcpu             1157 arch/powerpc/kvm/book3s_paired_singles.c 			vcpu->arch.fp.fpscr = *fpr_b;
vcpu             1165 arch/powerpc/kvm/book3s_paired_singles.c 			fpd_fcmpu(&vcpu->arch.fp.fpscr, &tmp_cr, fpr_a, fpr_b);
vcpu             1176 arch/powerpc/kvm/book3s_paired_singles.c 			fpd_fcmpo(&vcpu->arch.fp.fpscr, &tmp_cr, fpr_a, fpr_b);
vcpu             1182 arch/powerpc/kvm/book3s_paired_singles.c 			fpd_fneg(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
vcpu             1188 arch/powerpc/kvm/book3s_paired_singles.c 			fpd_fabs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
vcpu             1191 arch/powerpc/kvm/book3s_paired_singles.c 			fpd_fcpsgn(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
vcpu             1194 arch/powerpc/kvm/book3s_paired_singles.c 			fpd_fdiv(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
vcpu             1197 arch/powerpc/kvm/book3s_paired_singles.c 			fpd_fadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
vcpu             1200 arch/powerpc/kvm/book3s_paired_singles.c 			fpd_fsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
vcpu             1203 arch/powerpc/kvm/book3s_paired_singles.c 			fpd_fctiw(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
vcpu             1206 arch/powerpc/kvm/book3s_paired_singles.c 			fpd_fctiwz(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
vcpu             1209 arch/powerpc/kvm/book3s_paired_singles.c 			fpd_frsp(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
vcpu             1210 arch/powerpc/kvm/book3s_paired_singles.c 			kvmppc_sync_qpr(vcpu, ax_rd);
vcpu             1217 arch/powerpc/kvm/book3s_paired_singles.c 			fpd_fsqrt(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
vcpu             1219 arch/powerpc/kvm/book3s_paired_singles.c 			fpd_fdiv(&vcpu->arch.fp.fpscr, &cr, fpr_d, (u64*)&one, fpr_d);
vcpu             1225 arch/powerpc/kvm/book3s_paired_singles.c 			fpd_fmul(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c);
vcpu             1228 arch/powerpc/kvm/book3s_paired_singles.c 			fpd_fsel(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
vcpu             1231 arch/powerpc/kvm/book3s_paired_singles.c 			fpd_fmsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
vcpu             1234 arch/powerpc/kvm/book3s_paired_singles.c 			fpd_fmadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
vcpu             1237 arch/powerpc/kvm/book3s_paired_singles.c 			fpd_fnmsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
vcpu             1240 arch/powerpc/kvm/book3s_paired_singles.c 			fpd_fnmadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
vcpu             1247 arch/powerpc/kvm/book3s_paired_singles.c 	for (i = 0; i < ARRAY_SIZE(vcpu->arch.fp.fpr); i++) {
vcpu             1249 arch/powerpc/kvm/book3s_paired_singles.c 		kvm_cvt_df(&VCPU_FPR(vcpu, i), &f);
vcpu             1255 arch/powerpc/kvm/book3s_paired_singles.c 		kvmppc_set_cr(vcpu, cr);
vcpu               52 arch/powerpc/kvm/book3s_pr.c static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
vcpu               55 arch/powerpc/kvm/book3s_pr.c static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac);
vcpu               66 arch/powerpc/kvm/book3s_pr.c static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu)
vcpu               68 arch/powerpc/kvm/book3s_pr.c 	ulong msr = kvmppc_get_msr(vcpu);
vcpu               72 arch/powerpc/kvm/book3s_pr.c static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu)
vcpu               74 arch/powerpc/kvm/book3s_pr.c 	ulong msr = kvmppc_get_msr(vcpu);
vcpu               75 arch/powerpc/kvm/book3s_pr.c 	ulong pc = kvmppc_get_pc(vcpu);
vcpu               82 arch/powerpc/kvm/book3s_pr.c 	if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK)
vcpu               89 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK;
vcpu               90 arch/powerpc/kvm/book3s_pr.c 	kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS);
vcpu               93 arch/powerpc/kvm/book3s_pr.c void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu);
vcpu               95 arch/powerpc/kvm/book3s_pr.c static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
vcpu               98 arch/powerpc/kvm/book3s_pr.c 	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
vcpu               99 arch/powerpc/kvm/book3s_pr.c 	memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
vcpu              100 arch/powerpc/kvm/book3s_pr.c 	svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
vcpu              110 arch/powerpc/kvm/book3s_pr.c 	vcpu->cpu = smp_processor_id();
vcpu              112 arch/powerpc/kvm/book3s_pr.c 	current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu;
vcpu              115 arch/powerpc/kvm/book3s_pr.c 	if (kvmppc_is_split_real(vcpu))
vcpu              116 arch/powerpc/kvm/book3s_pr.c 		kvmppc_fixup_split_real(vcpu);
vcpu              118 arch/powerpc/kvm/book3s_pr.c 	kvmppc_restore_tm_pr(vcpu);
vcpu              121 arch/powerpc/kvm/book3s_pr.c static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
vcpu              124 arch/powerpc/kvm/book3s_pr.c 	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
vcpu              126 arch/powerpc/kvm/book3s_pr.c 		kvmppc_copy_from_svcpu(vcpu);
vcpu              128 arch/powerpc/kvm/book3s_pr.c 	memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
vcpu              129 arch/powerpc/kvm/book3s_pr.c 	to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
vcpu              133 arch/powerpc/kvm/book3s_pr.c 	if (kvmppc_is_split_real(vcpu))
vcpu              134 arch/powerpc/kvm/book3s_pr.c 		kvmppc_unfixup_split_real(vcpu);
vcpu              136 arch/powerpc/kvm/book3s_pr.c 	kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
vcpu              137 arch/powerpc/kvm/book3s_pr.c 	kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
vcpu              138 arch/powerpc/kvm/book3s_pr.c 	kvmppc_save_tm_pr(vcpu);
vcpu              145 arch/powerpc/kvm/book3s_pr.c 	vcpu->cpu = -1;
vcpu              149 arch/powerpc/kvm/book3s_pr.c void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu)
vcpu              151 arch/powerpc/kvm/book3s_pr.c 	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
vcpu              153 arch/powerpc/kvm/book3s_pr.c 	svcpu->gpr[0] = vcpu->arch.regs.gpr[0];
vcpu              154 arch/powerpc/kvm/book3s_pr.c 	svcpu->gpr[1] = vcpu->arch.regs.gpr[1];
vcpu              155 arch/powerpc/kvm/book3s_pr.c 	svcpu->gpr[2] = vcpu->arch.regs.gpr[2];
vcpu              156 arch/powerpc/kvm/book3s_pr.c 	svcpu->gpr[3] = vcpu->arch.regs.gpr[3];
vcpu              157 arch/powerpc/kvm/book3s_pr.c 	svcpu->gpr[4] = vcpu->arch.regs.gpr[4];
vcpu              158 arch/powerpc/kvm/book3s_pr.c 	svcpu->gpr[5] = vcpu->arch.regs.gpr[5];
vcpu              159 arch/powerpc/kvm/book3s_pr.c 	svcpu->gpr[6] = vcpu->arch.regs.gpr[6];
vcpu              160 arch/powerpc/kvm/book3s_pr.c 	svcpu->gpr[7] = vcpu->arch.regs.gpr[7];
vcpu              161 arch/powerpc/kvm/book3s_pr.c 	svcpu->gpr[8] = vcpu->arch.regs.gpr[8];
vcpu              162 arch/powerpc/kvm/book3s_pr.c 	svcpu->gpr[9] = vcpu->arch.regs.gpr[9];
vcpu              163 arch/powerpc/kvm/book3s_pr.c 	svcpu->gpr[10] = vcpu->arch.regs.gpr[10];
vcpu              164 arch/powerpc/kvm/book3s_pr.c 	svcpu->gpr[11] = vcpu->arch.regs.gpr[11];
vcpu              165 arch/powerpc/kvm/book3s_pr.c 	svcpu->gpr[12] = vcpu->arch.regs.gpr[12];
vcpu              166 arch/powerpc/kvm/book3s_pr.c 	svcpu->gpr[13] = vcpu->arch.regs.gpr[13];
vcpu              167 arch/powerpc/kvm/book3s_pr.c 	svcpu->cr  = vcpu->arch.regs.ccr;
vcpu              168 arch/powerpc/kvm/book3s_pr.c 	svcpu->xer = vcpu->arch.regs.xer;
vcpu              169 arch/powerpc/kvm/book3s_pr.c 	svcpu->ctr = vcpu->arch.regs.ctr;
vcpu              170 arch/powerpc/kvm/book3s_pr.c 	svcpu->lr  = vcpu->arch.regs.link;
vcpu              171 arch/powerpc/kvm/book3s_pr.c 	svcpu->pc  = vcpu->arch.regs.nip;
vcpu              173 arch/powerpc/kvm/book3s_pr.c 	svcpu->shadow_fscr = vcpu->arch.shadow_fscr;
vcpu              179 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.entry_tb = get_tb();
vcpu              180 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.entry_vtb = get_vtb();
vcpu              182 arch/powerpc/kvm/book3s_pr.c 		vcpu->arch.entry_ic = mfspr(SPRN_IC);
vcpu              188 arch/powerpc/kvm/book3s_pr.c static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
vcpu              190 arch/powerpc/kvm/book3s_pr.c 	ulong guest_msr = kvmppc_get_msr(vcpu);
vcpu              203 arch/powerpc/kvm/book3s_pr.c 	smsr |= (guest_msr & vcpu->arch.guest_owned_ext);
vcpu              217 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.shadow_msr = smsr;
vcpu              221 arch/powerpc/kvm/book3s_pr.c void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu)
vcpu              223 arch/powerpc/kvm/book3s_pr.c 	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
vcpu              235 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.regs.gpr[0] = svcpu->gpr[0];
vcpu              236 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.regs.gpr[1] = svcpu->gpr[1];
vcpu              237 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.regs.gpr[2] = svcpu->gpr[2];
vcpu              238 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.regs.gpr[3] = svcpu->gpr[3];
vcpu              239 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.regs.gpr[4] = svcpu->gpr[4];
vcpu              240 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.regs.gpr[5] = svcpu->gpr[5];
vcpu              241 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.regs.gpr[6] = svcpu->gpr[6];
vcpu              242 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.regs.gpr[7] = svcpu->gpr[7];
vcpu              243 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.regs.gpr[8] = svcpu->gpr[8];
vcpu              244 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.regs.gpr[9] = svcpu->gpr[9];
vcpu              245 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.regs.gpr[10] = svcpu->gpr[10];
vcpu              246 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.regs.gpr[11] = svcpu->gpr[11];
vcpu              247 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.regs.gpr[12] = svcpu->gpr[12];
vcpu              248 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.regs.gpr[13] = svcpu->gpr[13];
vcpu              249 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.regs.ccr  = svcpu->cr;
vcpu              250 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.regs.xer = svcpu->xer;
vcpu              251 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.regs.ctr = svcpu->ctr;
vcpu              252 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.regs.link  = svcpu->lr;
vcpu              253 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.regs.nip  = svcpu->pc;
vcpu              254 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
vcpu              255 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.fault_dar   = svcpu->fault_dar;
vcpu              256 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
vcpu              257 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.last_inst   = svcpu->last_inst;
vcpu              259 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.shadow_fscr = svcpu->shadow_fscr;
vcpu              264 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb;
vcpu              265 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb;
vcpu              266 arch/powerpc/kvm/book3s_pr.c 	to_book3s(vcpu)->vtb += get_vtb() - vcpu->arch.entry_vtb;
vcpu              268 arch/powerpc/kvm/book3s_pr.c 		vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic;
vcpu              282 arch/powerpc/kvm/book3s_pr.c 	old_msr = kvmppc_get_msr(vcpu);
vcpu              284 arch/powerpc/kvm/book3s_pr.c 		(vcpu->arch.shadow_srr1 & (MSR_TS_MASK)) !=
vcpu              287 arch/powerpc/kvm/book3s_pr.c 		old_msr |= (vcpu->arch.shadow_srr1 & (MSR_TS_MASK));
vcpu              288 arch/powerpc/kvm/book3s_pr.c 		kvmppc_set_msr_fast(vcpu, old_msr);
vcpu              289 arch/powerpc/kvm/book3s_pr.c 		kvmppc_recalc_shadow_msr(vcpu);
vcpu              300 arch/powerpc/kvm/book3s_pr.c void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu)
vcpu              303 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.tfhar = mfspr(SPRN_TFHAR);
vcpu              304 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.texasr = mfspr(SPRN_TEXASR);
vcpu              305 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.tfiar = mfspr(SPRN_TFIAR);
vcpu              309 arch/powerpc/kvm/book3s_pr.c void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu)
vcpu              312 arch/powerpc/kvm/book3s_pr.c 	mtspr(SPRN_TFHAR, vcpu->arch.tfhar);
vcpu              313 arch/powerpc/kvm/book3s_pr.c 	mtspr(SPRN_TEXASR, vcpu->arch.texasr);
vcpu              314 arch/powerpc/kvm/book3s_pr.c 	mtspr(SPRN_TFIAR, vcpu->arch.tfiar);
vcpu              321 arch/powerpc/kvm/book3s_pr.c static void kvmppc_handle_lost_math_exts(struct kvm_vcpu *vcpu)
vcpu              324 arch/powerpc/kvm/book3s_pr.c 	ulong ext_diff = (kvmppc_get_msr(vcpu) & ~vcpu->arch.guest_owned_ext) &
vcpu              337 arch/powerpc/kvm/book3s_pr.c 	kvmppc_handle_ext(vcpu, exit_nr, ext_diff);
vcpu              340 arch/powerpc/kvm/book3s_pr.c void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu)
vcpu              342 arch/powerpc/kvm/book3s_pr.c 	if (!(MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)))) {
vcpu              343 arch/powerpc/kvm/book3s_pr.c 		kvmppc_save_tm_sprs(vcpu);
vcpu              347 arch/powerpc/kvm/book3s_pr.c 	kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
vcpu              348 arch/powerpc/kvm/book3s_pr.c 	kvmppc_giveup_ext(vcpu, MSR_VSX);
vcpu              351 arch/powerpc/kvm/book3s_pr.c 	_kvmppc_save_tm_pr(vcpu, mfmsr());
vcpu              355 arch/powerpc/kvm/book3s_pr.c void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu)
vcpu              357 arch/powerpc/kvm/book3s_pr.c 	if (!MSR_TM_ACTIVE(kvmppc_get_msr(vcpu))) {
vcpu              358 arch/powerpc/kvm/book3s_pr.c 		kvmppc_restore_tm_sprs(vcpu);
vcpu              359 arch/powerpc/kvm/book3s_pr.c 		if (kvmppc_get_msr(vcpu) & MSR_TM) {
vcpu              360 arch/powerpc/kvm/book3s_pr.c 			kvmppc_handle_lost_math_exts(vcpu);
vcpu              361 arch/powerpc/kvm/book3s_pr.c 			if (vcpu->arch.fscr & FSCR_TAR)
vcpu              362 arch/powerpc/kvm/book3s_pr.c 				kvmppc_handle_fac(vcpu, FSCR_TAR_LG);
vcpu              368 arch/powerpc/kvm/book3s_pr.c 	_kvmppc_restore_tm_pr(vcpu, kvmppc_get_msr(vcpu));
vcpu              371 arch/powerpc/kvm/book3s_pr.c 	if (kvmppc_get_msr(vcpu) & MSR_TM) {
vcpu              372 arch/powerpc/kvm/book3s_pr.c 		kvmppc_handle_lost_math_exts(vcpu);
vcpu              373 arch/powerpc/kvm/book3s_pr.c 		if (vcpu->arch.fscr & FSCR_TAR)
vcpu              374 arch/powerpc/kvm/book3s_pr.c 			kvmppc_handle_fac(vcpu, FSCR_TAR_LG);
vcpu              379 arch/powerpc/kvm/book3s_pr.c static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
vcpu              385 arch/powerpc/kvm/book3s_pr.c 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
vcpu              386 arch/powerpc/kvm/book3s_pr.c 		kvmppc_mmu_pte_flush(vcpu, 0, 0);
vcpu              396 arch/powerpc/kvm/book3s_pr.c 	struct kvm_vcpu *vcpu;
vcpu              416 arch/powerpc/kvm/book3s_pr.c 		kvm_for_each_vcpu(i, vcpu, kvm)
vcpu              417 arch/powerpc/kvm/book3s_pr.c 			kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT,
vcpu              451 arch/powerpc/kvm/book3s_pr.c static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
vcpu              456 arch/powerpc/kvm/book3s_pr.c 	if (vcpu->arch.papr_enabled)
vcpu              469 arch/powerpc/kvm/book3s_pr.c 		kvmppc_emulate_tabort(vcpu,
vcpu              473 arch/powerpc/kvm/book3s_pr.c 	old_msr = kvmppc_get_msr(vcpu);
vcpu              474 arch/powerpc/kvm/book3s_pr.c 	msr &= to_book3s(vcpu)->msr_mask;
vcpu              475 arch/powerpc/kvm/book3s_pr.c 	kvmppc_set_msr_fast(vcpu, msr);
vcpu              476 arch/powerpc/kvm/book3s_pr.c 	kvmppc_recalc_shadow_msr(vcpu);
vcpu              479 arch/powerpc/kvm/book3s_pr.c 		if (!vcpu->arch.pending_exceptions) {
vcpu              480 arch/powerpc/kvm/book3s_pr.c 			kvm_vcpu_block(vcpu);
vcpu              481 arch/powerpc/kvm/book3s_pr.c 			kvm_clear_request(KVM_REQ_UNHALT, vcpu);
vcpu              482 arch/powerpc/kvm/book3s_pr.c 			vcpu->stat.halt_wakeup++;
vcpu              486 arch/powerpc/kvm/book3s_pr.c 			kvmppc_set_msr_fast(vcpu, msr);
vcpu              490 arch/powerpc/kvm/book3s_pr.c 	if (kvmppc_is_split_real(vcpu))
vcpu              491 arch/powerpc/kvm/book3s_pr.c 		kvmppc_fixup_split_real(vcpu);
vcpu              493 arch/powerpc/kvm/book3s_pr.c 		kvmppc_unfixup_split_real(vcpu);
vcpu              495 arch/powerpc/kvm/book3s_pr.c 	if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) !=
vcpu              497 arch/powerpc/kvm/book3s_pr.c 		kvmppc_mmu_flush_segments(vcpu);
vcpu              498 arch/powerpc/kvm/book3s_pr.c 		kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
vcpu              501 arch/powerpc/kvm/book3s_pr.c 		if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
vcpu              502 arch/powerpc/kvm/book3s_pr.c 			struct kvm_vcpu_arch *a = &vcpu->arch;
vcpu              505 arch/powerpc/kvm/book3s_pr.c 				kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
vcpu              507 arch/powerpc/kvm/book3s_pr.c 				kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
vcpu              519 arch/powerpc/kvm/book3s_pr.c 	if (vcpu->arch.magic_page_pa &&
vcpu              522 arch/powerpc/kvm/book3s_pr.c 		kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
vcpu              527 arch/powerpc/kvm/book3s_pr.c 	if (kvmppc_get_msr(vcpu) & MSR_FP)
vcpu              528 arch/powerpc/kvm/book3s_pr.c 		kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
vcpu              531 arch/powerpc/kvm/book3s_pr.c 	if (kvmppc_get_msr(vcpu) & MSR_TM)
vcpu              532 arch/powerpc/kvm/book3s_pr.c 		kvmppc_handle_lost_math_exts(vcpu);
vcpu              536 arch/powerpc/kvm/book3s_pr.c void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
vcpu              540 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
vcpu              541 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.pvr = pvr;
vcpu              544 arch/powerpc/kvm/book3s_pr.c 		kvmppc_mmu_book3s_64_init(vcpu);
vcpu              545 arch/powerpc/kvm/book3s_pr.c 		if (!to_book3s(vcpu)->hior_explicit)
vcpu              546 arch/powerpc/kvm/book3s_pr.c 			to_book3s(vcpu)->hior = 0xfff00000;
vcpu              547 arch/powerpc/kvm/book3s_pr.c 		to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
vcpu              548 arch/powerpc/kvm/book3s_pr.c 		vcpu->arch.cpu_type = KVM_CPU_3S_64;
vcpu              552 arch/powerpc/kvm/book3s_pr.c 		kvmppc_mmu_book3s_32_init(vcpu);
vcpu              553 arch/powerpc/kvm/book3s_pr.c 		if (!to_book3s(vcpu)->hior_explicit)
vcpu              554 arch/powerpc/kvm/book3s_pr.c 			to_book3s(vcpu)->hior = 0;
vcpu              555 arch/powerpc/kvm/book3s_pr.c 		to_book3s(vcpu)->msr_mask = 0xffffffffULL;
vcpu              556 arch/powerpc/kvm/book3s_pr.c 		vcpu->arch.cpu_type = KVM_CPU_3S_32;
vcpu              559 arch/powerpc/kvm/book3s_pr.c 	kvmppc_sanity_check(vcpu);
vcpu              563 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
vcpu              564 arch/powerpc/kvm/book3s_pr.c 	if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
vcpu              566 arch/powerpc/kvm/book3s_pr.c 		vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
vcpu              571 arch/powerpc/kvm/book3s_pr.c 		to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
vcpu              588 arch/powerpc/kvm/book3s_pr.c 		vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE |
vcpu              595 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
vcpu              610 arch/powerpc/kvm/book3s_pr.c 		vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
vcpu              625 arch/powerpc/kvm/book3s_pr.c static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
vcpu              632 arch/powerpc/kvm/book3s_pr.c 	hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
vcpu              652 arch/powerpc/kvm/book3s_pr.c static bool kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
vcpu              654 arch/powerpc/kvm/book3s_pr.c 	ulong mp_pa = vcpu->arch.magic_page_pa;
vcpu              656 arch/powerpc/kvm/book3s_pr.c 	if (!(kvmppc_get_msr(vcpu) & MSR_SF))
vcpu              664 arch/powerpc/kvm/book3s_pr.c 	return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT);
vcpu              667 arch/powerpc/kvm/book3s_pr.c int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu              676 arch/powerpc/kvm/book3s_pr.c 	bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false;
vcpu              677 arch/powerpc/kvm/book3s_pr.c 	bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false;
vcpu              681 arch/powerpc/kvm/book3s_pr.c 	if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE))
vcpu              686 arch/powerpc/kvm/book3s_pr.c 		page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite);
vcpu              698 arch/powerpc/kvm/book3s_pr.c 	switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) {
vcpu              704 arch/powerpc/kvm/book3s_pr.c 		    (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
vcpu              709 arch/powerpc/kvm/book3s_pr.c 		vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
vcpu              711 arch/powerpc/kvm/book3s_pr.c 		if ((kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) == MSR_DR)
vcpu              722 arch/powerpc/kvm/book3s_pr.c 	if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
vcpu              723 arch/powerpc/kvm/book3s_pr.c 	   (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
vcpu              741 arch/powerpc/kvm/book3s_pr.c 			flags |= vcpu->arch.fault_dsisr & DSISR_ISSTORE;
vcpu              742 arch/powerpc/kvm/book3s_pr.c 			kvmppc_core_queue_data_storage(vcpu, eaddr, flags);
vcpu              744 arch/powerpc/kvm/book3s_pr.c 			kvmppc_core_queue_inst_storage(vcpu, flags);
vcpu              748 arch/powerpc/kvm/book3s_pr.c 		kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
vcpu              749 arch/powerpc/kvm/book3s_pr.c 		kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
vcpu              750 arch/powerpc/kvm/book3s_pr.c 	} else if (kvmppc_visible_gpa(vcpu, pte.raddr)) {
vcpu              751 arch/powerpc/kvm/book3s_pr.c 		if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
vcpu              757 arch/powerpc/kvm/book3s_pr.c 			kvmppc_mmu_unmap_page(vcpu, &pte);
vcpu              760 arch/powerpc/kvm/book3s_pr.c 		if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) {
vcpu              766 arch/powerpc/kvm/book3s_pr.c 			vcpu->stat.sp_storage++;
vcpu              767 arch/powerpc/kvm/book3s_pr.c 		else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
vcpu              768 arch/powerpc/kvm/book3s_pr.c 			 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
vcpu              769 arch/powerpc/kvm/book3s_pr.c 			kvmppc_patch_dcbz(vcpu, &pte);
vcpu              772 arch/powerpc/kvm/book3s_pr.c 		vcpu->stat.mmio_exits++;
vcpu              773 arch/powerpc/kvm/book3s_pr.c 		vcpu->arch.paddr_accessed = pte.raddr;
vcpu              774 arch/powerpc/kvm/book3s_pr.c 		vcpu->arch.vaddr_accessed = pte.eaddr;
vcpu              775 arch/powerpc/kvm/book3s_pr.c 		r = kvmppc_emulate_mmio(run, vcpu);
vcpu              784 arch/powerpc/kvm/book3s_pr.c void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
vcpu              795 arch/powerpc/kvm/book3s_pr.c 	msr &= vcpu->arch.guest_owned_ext;
vcpu              822 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX);
vcpu              823 arch/powerpc/kvm/book3s_pr.c 	kvmppc_recalc_shadow_msr(vcpu);
vcpu              827 arch/powerpc/kvm/book3s_pr.c void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac)
vcpu              830 arch/powerpc/kvm/book3s_pr.c 	if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) {
vcpu              837 arch/powerpc/kvm/book3s_pr.c 		vcpu->arch.tar = mfspr(SPRN_TAR);
vcpu              839 arch/powerpc/kvm/book3s_pr.c 		vcpu->arch.shadow_fscr &= ~FSCR_TAR;
vcpu              846 arch/powerpc/kvm/book3s_pr.c static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
vcpu              852 arch/powerpc/kvm/book3s_pr.c 	if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
vcpu              855 arch/powerpc/kvm/book3s_pr.c 	if (!(kvmppc_get_msr(vcpu) & msr)) {
vcpu              856 arch/powerpc/kvm/book3s_pr.c 		kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
vcpu              866 arch/powerpc/kvm/book3s_pr.c 			kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
vcpu              878 arch/powerpc/kvm/book3s_pr.c 	msr &= ~vcpu->arch.guest_owned_ext;
vcpu              889 arch/powerpc/kvm/book3s_pr.c 		load_fp_state(&vcpu->arch.fp);
vcpu              891 arch/powerpc/kvm/book3s_pr.c 		t->fp_save_area = &vcpu->arch.fp;
vcpu              899 arch/powerpc/kvm/book3s_pr.c 		load_vr_state(&vcpu->arch.vr);
vcpu              901 arch/powerpc/kvm/book3s_pr.c 		t->vr_save_area = &vcpu->arch.vr;
vcpu              907 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.guest_owned_ext |= msr;
vcpu              908 arch/powerpc/kvm/book3s_pr.c 	kvmppc_recalc_shadow_msr(vcpu);
vcpu              917 arch/powerpc/kvm/book3s_pr.c static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
vcpu              921 arch/powerpc/kvm/book3s_pr.c 	lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr;
vcpu              928 arch/powerpc/kvm/book3s_pr.c 		load_fp_state(&vcpu->arch.fp);
vcpu              936 arch/powerpc/kvm/book3s_pr.c 		load_vr_state(&vcpu->arch.vr);
vcpu              946 arch/powerpc/kvm/book3s_pr.c void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac)
vcpu              949 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.fscr &= ~(0xffULL << 56);
vcpu              950 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.fscr |= (fac << 56);
vcpu              951 arch/powerpc/kvm/book3s_pr.c 	kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL);
vcpu              954 arch/powerpc/kvm/book3s_pr.c static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac)
vcpu              958 arch/powerpc/kvm/book3s_pr.c 	if (!(kvmppc_get_msr(vcpu) & MSR_PR))
vcpu              959 arch/powerpc/kvm/book3s_pr.c 		er = kvmppc_emulate_instruction(vcpu->run, vcpu);
vcpu              963 arch/powerpc/kvm/book3s_pr.c 		kvmppc_trigger_fac_interrupt(vcpu, fac);
vcpu              968 arch/powerpc/kvm/book3s_pr.c static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac)
vcpu              980 arch/powerpc/kvm/book3s_pr.c 		guest_fac_enabled = (vcpu->arch.fscr & (1ULL << fac));
vcpu              983 arch/powerpc/kvm/book3s_pr.c 		guest_fac_enabled = kvmppc_get_msr(vcpu) & MSR_TM;
vcpu              992 arch/powerpc/kvm/book3s_pr.c 		kvmppc_trigger_fac_interrupt(vcpu, fac);
vcpu             1000 arch/powerpc/kvm/book3s_pr.c 		mtspr(SPRN_TAR, vcpu->arch.tar);
vcpu             1001 arch/powerpc/kvm/book3s_pr.c 		vcpu->arch.shadow_fscr |= FSCR_TAR;
vcpu             1004 arch/powerpc/kvm/book3s_pr.c 		kvmppc_emulate_fac(vcpu, fac);
vcpu             1016 arch/powerpc/kvm/book3s_pr.c 	if ((fac == FSCR_TM_LG) && !(kvmppc_get_msr(vcpu) & MSR_PR))
vcpu             1023 arch/powerpc/kvm/book3s_pr.c void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr)
vcpu             1025 arch/powerpc/kvm/book3s_pr.c 	if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) {
vcpu             1027 arch/powerpc/kvm/book3s_pr.c 		kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
vcpu             1028 arch/powerpc/kvm/book3s_pr.c 	} else if (!(vcpu->arch.fscr & FSCR_TAR) && (fscr & FSCR_TAR)) {
vcpu             1029 arch/powerpc/kvm/book3s_pr.c 		vcpu->arch.fscr = fscr;
vcpu             1030 arch/powerpc/kvm/book3s_pr.c 		kvmppc_handle_fac(vcpu, FSCR_TAR_LG);
vcpu             1034 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.fscr = fscr;
vcpu             1038 arch/powerpc/kvm/book3s_pr.c static void kvmppc_setup_debug(struct kvm_vcpu *vcpu)
vcpu             1040 arch/powerpc/kvm/book3s_pr.c 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
vcpu             1041 arch/powerpc/kvm/book3s_pr.c 		u64 msr = kvmppc_get_msr(vcpu);
vcpu             1043 arch/powerpc/kvm/book3s_pr.c 		kvmppc_set_msr(vcpu, msr | MSR_SE);
vcpu             1047 arch/powerpc/kvm/book3s_pr.c static void kvmppc_clear_debug(struct kvm_vcpu *vcpu)
vcpu             1049 arch/powerpc/kvm/book3s_pr.c 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
vcpu             1050 arch/powerpc/kvm/book3s_pr.c 		u64 msr = kvmppc_get_msr(vcpu);
vcpu             1052 arch/powerpc/kvm/book3s_pr.c 		kvmppc_set_msr(vcpu, msr & ~MSR_SE);
vcpu             1056 arch/powerpc/kvm/book3s_pr.c static int kvmppc_exit_pr_progint(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu             1071 arch/powerpc/kvm/book3s_pr.c 		flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
vcpu             1075 arch/powerpc/kvm/book3s_pr.c 	emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
vcpu             1079 arch/powerpc/kvm/book3s_pr.c 	if (kvmppc_get_msr(vcpu) & MSR_PR) {
vcpu             1082 arch/powerpc/kvm/book3s_pr.c 			kvmppc_get_pc(vcpu), last_inst);
vcpu             1085 arch/powerpc/kvm/book3s_pr.c 			kvmppc_core_queue_program(vcpu, flags);
vcpu             1090 arch/powerpc/kvm/book3s_pr.c 	vcpu->stat.emulated_inst_exits++;
vcpu             1091 arch/powerpc/kvm/book3s_pr.c 	er = kvmppc_emulate_instruction(run, vcpu);
vcpu             1101 arch/powerpc/kvm/book3s_pr.c 			__func__, kvmppc_get_pc(vcpu), last_inst);
vcpu             1102 arch/powerpc/kvm/book3s_pr.c 		kvmppc_core_queue_program(vcpu, flags);
vcpu             1119 arch/powerpc/kvm/book3s_pr.c int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu             1125 arch/powerpc/kvm/book3s_pr.c 	vcpu->stat.sum_exits++;
vcpu             1132 arch/powerpc/kvm/book3s_pr.c 	trace_kvm_exit(exit_nr, vcpu);
vcpu             1138 arch/powerpc/kvm/book3s_pr.c 		ulong shadow_srr1 = vcpu->arch.shadow_srr1;
vcpu             1139 arch/powerpc/kvm/book3s_pr.c 		vcpu->stat.pf_instruc++;
vcpu             1141 arch/powerpc/kvm/book3s_pr.c 		if (kvmppc_is_split_real(vcpu))
vcpu             1142 arch/powerpc/kvm/book3s_pr.c 			kvmppc_fixup_split_real(vcpu);
vcpu             1151 arch/powerpc/kvm/book3s_pr.c 			svcpu = svcpu_get(vcpu);
vcpu             1152 arch/powerpc/kvm/book3s_pr.c 			sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT];
vcpu             1155 arch/powerpc/kvm/book3s_pr.c 				kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
vcpu             1164 arch/powerpc/kvm/book3s_pr.c 			int idx = srcu_read_lock(&vcpu->kvm->srcu);
vcpu             1165 arch/powerpc/kvm/book3s_pr.c 			r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
vcpu             1166 arch/powerpc/kvm/book3s_pr.c 			srcu_read_unlock(&vcpu->kvm->srcu, idx);
vcpu             1167 arch/powerpc/kvm/book3s_pr.c 			vcpu->stat.sp_instruc++;
vcpu             1168 arch/powerpc/kvm/book3s_pr.c 		} else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
vcpu             1169 arch/powerpc/kvm/book3s_pr.c 			  (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
vcpu             1175 arch/powerpc/kvm/book3s_pr.c 			kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
vcpu             1178 arch/powerpc/kvm/book3s_pr.c 			kvmppc_core_queue_inst_storage(vcpu,
vcpu             1186 arch/powerpc/kvm/book3s_pr.c 		ulong dar = kvmppc_get_fault_dar(vcpu);
vcpu             1187 arch/powerpc/kvm/book3s_pr.c 		u32 fault_dsisr = vcpu->arch.fault_dsisr;
vcpu             1188 arch/powerpc/kvm/book3s_pr.c 		vcpu->stat.pf_storage++;
vcpu             1197 arch/powerpc/kvm/book3s_pr.c 			svcpu = svcpu_get(vcpu);
vcpu             1201 arch/powerpc/kvm/book3s_pr.c 				kvmppc_mmu_map_segment(vcpu, dar);
vcpu             1214 arch/powerpc/kvm/book3s_pr.c 			int idx = srcu_read_lock(&vcpu->kvm->srcu);
vcpu             1215 arch/powerpc/kvm/book3s_pr.c 			r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
vcpu             1216 arch/powerpc/kvm/book3s_pr.c 			srcu_read_unlock(&vcpu->kvm->srcu, idx);
vcpu             1218 arch/powerpc/kvm/book3s_pr.c 			kvmppc_core_queue_data_storage(vcpu, dar, fault_dsisr);
vcpu             1224 arch/powerpc/kvm/book3s_pr.c 		if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
vcpu             1225 arch/powerpc/kvm/book3s_pr.c 			kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
vcpu             1226 arch/powerpc/kvm/book3s_pr.c 			kvmppc_book3s_queue_irqprio(vcpu,
vcpu             1232 arch/powerpc/kvm/book3s_pr.c 		if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
vcpu             1233 arch/powerpc/kvm/book3s_pr.c 			kvmppc_book3s_queue_irqprio(vcpu,
vcpu             1243 arch/powerpc/kvm/book3s_pr.c 		vcpu->stat.dec_exits++;
vcpu             1249 arch/powerpc/kvm/book3s_pr.c 		vcpu->stat.ext_intr_exits++;
vcpu             1259 arch/powerpc/kvm/book3s_pr.c 		r = kvmppc_exit_pr_progint(run, vcpu, exit_nr);
vcpu             1267 arch/powerpc/kvm/book3s_pr.c 		if (vcpu->arch.papr_enabled) {
vcpu             1269 arch/powerpc/kvm/book3s_pr.c 			emul = kvmppc_get_last_inst(vcpu, INST_SC, &last_sc);
vcpu             1271 arch/powerpc/kvm/book3s_pr.c 				kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) - 4);
vcpu             1277 arch/powerpc/kvm/book3s_pr.c 		if (vcpu->arch.papr_enabled &&
vcpu             1279 arch/powerpc/kvm/book3s_pr.c 		    !(kvmppc_get_msr(vcpu) & MSR_PR)) {
vcpu             1281 arch/powerpc/kvm/book3s_pr.c 			ulong cmd = kvmppc_get_gpr(vcpu, 3);
vcpu             1285 arch/powerpc/kvm/book3s_pr.c 			if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
vcpu             1293 arch/powerpc/kvm/book3s_pr.c 				ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
vcpu             1297 arch/powerpc/kvm/book3s_pr.c 			vcpu->arch.hcall_needed = 1;
vcpu             1299 arch/powerpc/kvm/book3s_pr.c 		} else if (vcpu->arch.osi_enabled &&
vcpu             1300 arch/powerpc/kvm/book3s_pr.c 		    (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
vcpu             1301 arch/powerpc/kvm/book3s_pr.c 		    (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
vcpu             1308 arch/powerpc/kvm/book3s_pr.c 				gprs[i] = kvmppc_get_gpr(vcpu, i);
vcpu             1309 arch/powerpc/kvm/book3s_pr.c 			vcpu->arch.osi_needed = 1;
vcpu             1311 arch/powerpc/kvm/book3s_pr.c 		} else if (!(kvmppc_get_msr(vcpu) & MSR_PR) &&
vcpu             1312 arch/powerpc/kvm/book3s_pr.c 		    (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
vcpu             1314 arch/powerpc/kvm/book3s_pr.c 			kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
vcpu             1318 arch/powerpc/kvm/book3s_pr.c 			vcpu->stat.syscall_exits++;
vcpu             1319 arch/powerpc/kvm/book3s_pr.c 			kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
vcpu             1332 arch/powerpc/kvm/book3s_pr.c 		if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) {
vcpu             1334 arch/powerpc/kvm/book3s_pr.c 			emul = kvmppc_get_last_inst(vcpu, INST_GENERIC,
vcpu             1337 arch/powerpc/kvm/book3s_pr.c 				r = kvmppc_exit_pr_progint(run, vcpu, exit_nr);
vcpu             1359 arch/powerpc/kvm/book3s_pr.c 		r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
vcpu             1365 arch/powerpc/kvm/book3s_pr.c 		int emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
vcpu             1371 arch/powerpc/kvm/book3s_pr.c 			dsisr = kvmppc_alignment_dsisr(vcpu, last_inst);
vcpu             1372 arch/powerpc/kvm/book3s_pr.c 			dar = kvmppc_alignment_dar(vcpu, last_inst);
vcpu             1374 arch/powerpc/kvm/book3s_pr.c 			kvmppc_set_dsisr(vcpu, dsisr);
vcpu             1375 arch/powerpc/kvm/book3s_pr.c 			kvmppc_set_dar(vcpu, dar);
vcpu             1377 arch/powerpc/kvm/book3s_pr.c 			kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
vcpu             1384 arch/powerpc/kvm/book3s_pr.c 		r = kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56);
vcpu             1388 arch/powerpc/kvm/book3s_pr.c 		kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
vcpu             1392 arch/powerpc/kvm/book3s_pr.c 		if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
vcpu             1396 arch/powerpc/kvm/book3s_pr.c 			kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
vcpu             1402 arch/powerpc/kvm/book3s_pr.c 		ulong shadow_srr1 = vcpu->arch.shadow_srr1;
vcpu             1405 arch/powerpc/kvm/book3s_pr.c 			exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
vcpu             1423 arch/powerpc/kvm/book3s_pr.c 		s = kvmppc_prepare_to_enter(vcpu);
vcpu             1431 arch/powerpc/kvm/book3s_pr.c 		kvmppc_handle_lost_ext(vcpu);
vcpu             1434 arch/powerpc/kvm/book3s_pr.c 	trace_kvm_book3s_reenter(r, vcpu);
vcpu             1439 arch/powerpc/kvm/book3s_pr.c static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu,
vcpu             1442 arch/powerpc/kvm/book3s_pr.c 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
vcpu             1445 arch/powerpc/kvm/book3s_pr.c 	sregs->pvr = vcpu->arch.pvr;
vcpu             1447 arch/powerpc/kvm/book3s_pr.c 	sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
vcpu             1448 arch/powerpc/kvm/book3s_pr.c 	if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
vcpu             1450 arch/powerpc/kvm/book3s_pr.c 			sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
vcpu             1451 arch/powerpc/kvm/book3s_pr.c 			sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
vcpu             1455 arch/powerpc/kvm/book3s_pr.c 			sregs->u.s.ppc32.sr[i] = kvmppc_get_sr(vcpu, i);
vcpu             1466 arch/powerpc/kvm/book3s_pr.c static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu,
vcpu             1469 arch/powerpc/kvm/book3s_pr.c 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
vcpu             1472 arch/powerpc/kvm/book3s_pr.c 	kvmppc_set_pvr_pr(vcpu, sregs->pvr);
vcpu             1476 arch/powerpc/kvm/book3s_pr.c 	if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
vcpu             1478 arch/powerpc/kvm/book3s_pr.c 		vcpu->arch.mmu.slbmte(vcpu, 0, 0);
vcpu             1479 arch/powerpc/kvm/book3s_pr.c 		vcpu->arch.mmu.slbia(vcpu);
vcpu             1486 arch/powerpc/kvm/book3s_pr.c 				vcpu->arch.mmu.slbmte(vcpu, rs, rb);
vcpu             1492 arch/powerpc/kvm/book3s_pr.c 			vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
vcpu             1495 arch/powerpc/kvm/book3s_pr.c 			kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
vcpu             1497 arch/powerpc/kvm/book3s_pr.c 			kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
vcpu             1499 arch/powerpc/kvm/book3s_pr.c 			kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
vcpu             1501 arch/powerpc/kvm/book3s_pr.c 			kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
vcpu             1507 arch/powerpc/kvm/book3s_pr.c 	kvmppc_mmu_pte_flush(vcpu, 0, 0);
vcpu             1512 arch/powerpc/kvm/book3s_pr.c static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
vcpu             1522 arch/powerpc/kvm/book3s_pr.c 		*val = get_reg_val(id, to_book3s(vcpu)->hior);
vcpu             1525 arch/powerpc/kvm/book3s_pr.c 		*val = get_reg_val(id, to_book3s(vcpu)->vtb);
vcpu             1532 arch/powerpc/kvm/book3s_pr.c 		if (vcpu->arch.intr_msr & MSR_LE)
vcpu             1539 arch/powerpc/kvm/book3s_pr.c 		*val = get_reg_val(id, vcpu->arch.tfhar);
vcpu             1542 arch/powerpc/kvm/book3s_pr.c 		*val = get_reg_val(id, vcpu->arch.tfiar);
vcpu             1545 arch/powerpc/kvm/book3s_pr.c 		*val = get_reg_val(id, vcpu->arch.texasr);
vcpu             1549 arch/powerpc/kvm/book3s_pr.c 				vcpu->arch.gpr_tm[id-KVM_REG_PPC_TM_GPR0]);
vcpu             1558 arch/powerpc/kvm/book3s_pr.c 				val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j];
vcpu             1561 arch/powerpc/kvm/book3s_pr.c 				val->vval = vcpu->arch.vr_tm.vr[i-32];
vcpu             1568 arch/powerpc/kvm/book3s_pr.c 		*val = get_reg_val(id, vcpu->arch.cr_tm);
vcpu             1571 arch/powerpc/kvm/book3s_pr.c 		*val = get_reg_val(id, vcpu->arch.xer_tm);
vcpu             1574 arch/powerpc/kvm/book3s_pr.c 		*val = get_reg_val(id, vcpu->arch.lr_tm);
vcpu             1577 arch/powerpc/kvm/book3s_pr.c 		*val = get_reg_val(id, vcpu->arch.ctr_tm);
vcpu             1580 arch/powerpc/kvm/book3s_pr.c 		*val = get_reg_val(id, vcpu->arch.fp_tm.fpscr);
vcpu             1583 arch/powerpc/kvm/book3s_pr.c 		*val = get_reg_val(id, vcpu->arch.amr_tm);
vcpu             1586 arch/powerpc/kvm/book3s_pr.c 		*val = get_reg_val(id, vcpu->arch.ppr_tm);
vcpu             1589 arch/powerpc/kvm/book3s_pr.c 		*val = get_reg_val(id, vcpu->arch.vrsave_tm);
vcpu             1593 arch/powerpc/kvm/book3s_pr.c 			*val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]);
vcpu             1598 arch/powerpc/kvm/book3s_pr.c 		*val = get_reg_val(id, vcpu->arch.dscr_tm);
vcpu             1601 arch/powerpc/kvm/book3s_pr.c 		*val = get_reg_val(id, vcpu->arch.tar_tm);
vcpu             1612 arch/powerpc/kvm/book3s_pr.c static void kvmppc_set_lpcr_pr(struct kvm_vcpu *vcpu, u64 new_lpcr)
vcpu             1615 arch/powerpc/kvm/book3s_pr.c 		vcpu->arch.intr_msr |= MSR_LE;
vcpu             1617 arch/powerpc/kvm/book3s_pr.c 		vcpu->arch.intr_msr &= ~MSR_LE;
vcpu             1620 arch/powerpc/kvm/book3s_pr.c static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
vcpu             1627 arch/powerpc/kvm/book3s_pr.c 		to_book3s(vcpu)->hior = set_reg_val(id, *val);
vcpu             1628 arch/powerpc/kvm/book3s_pr.c 		to_book3s(vcpu)->hior_explicit = true;
vcpu             1631 arch/powerpc/kvm/book3s_pr.c 		to_book3s(vcpu)->vtb = set_reg_val(id, *val);
vcpu             1635 arch/powerpc/kvm/book3s_pr.c 		kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val));
vcpu             1639 arch/powerpc/kvm/book3s_pr.c 		vcpu->arch.tfhar = set_reg_val(id, *val);
vcpu             1642 arch/powerpc/kvm/book3s_pr.c 		vcpu->arch.tfiar = set_reg_val(id, *val);
vcpu             1645 arch/powerpc/kvm/book3s_pr.c 		vcpu->arch.texasr = set_reg_val(id, *val);
vcpu             1648 arch/powerpc/kvm/book3s_pr.c 		vcpu->arch.gpr_tm[id - KVM_REG_PPC_TM_GPR0] =
vcpu             1658 arch/powerpc/kvm/book3s_pr.c 				vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j];
vcpu             1661 arch/powerpc/kvm/book3s_pr.c 				vcpu->arch.vr_tm.vr[i-32] = val->vval;
vcpu             1667 arch/powerpc/kvm/book3s_pr.c 		vcpu->arch.cr_tm = set_reg_val(id, *val);
vcpu             1670 arch/powerpc/kvm/book3s_pr.c 		vcpu->arch.xer_tm = set_reg_val(id, *val);
vcpu             1673 arch/powerpc/kvm/book3s_pr.c 		vcpu->arch.lr_tm = set_reg_val(id, *val);
vcpu             1676 arch/powerpc/kvm/book3s_pr.c 		vcpu->arch.ctr_tm = set_reg_val(id, *val);
vcpu             1679 arch/powerpc/kvm/book3s_pr.c 		vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val);
vcpu             1682 arch/powerpc/kvm/book3s_pr.c 		vcpu->arch.amr_tm = set_reg_val(id, *val);
vcpu             1685 arch/powerpc/kvm/book3s_pr.c 		vcpu->arch.ppr_tm = set_reg_val(id, *val);
vcpu             1688 arch/powerpc/kvm/book3s_pr.c 		vcpu->arch.vrsave_tm = set_reg_val(id, *val);
vcpu             1692 arch/powerpc/kvm/book3s_pr.c 			vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val);
vcpu             1697 arch/powerpc/kvm/book3s_pr.c 		vcpu->arch.dscr_tm = set_reg_val(id, *val);
vcpu             1700 arch/powerpc/kvm/book3s_pr.c 		vcpu->arch.tar_tm = set_reg_val(id, *val);
vcpu             1715 arch/powerpc/kvm/book3s_pr.c 	struct kvm_vcpu *vcpu;
vcpu             1719 arch/powerpc/kvm/book3s_pr.c 	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
vcpu             1720 arch/powerpc/kvm/book3s_pr.c 	if (!vcpu)
vcpu             1726 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.book3s = vcpu_book3s;
vcpu             1729 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.shadow_vcpu =
vcpu             1730 arch/powerpc/kvm/book3s_pr.c 		kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
vcpu             1731 arch/powerpc/kvm/book3s_pr.c 	if (!vcpu->arch.shadow_vcpu)
vcpu             1735 arch/powerpc/kvm/book3s_pr.c 	err = kvm_vcpu_init(vcpu, kvm, id);
vcpu             1743 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.shared = (void *)p;
vcpu             1747 arch/powerpc/kvm/book3s_pr.c         vcpu->arch.shared_big_endian = true;
vcpu             1749 arch/powerpc/kvm/book3s_pr.c         vcpu->arch.shared_big_endian = false;
vcpu             1757 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.pvr = 0x3C0301;
vcpu             1759 arch/powerpc/kvm/book3s_pr.c 		vcpu->arch.pvr = mfspr(SPRN_PVR);
vcpu             1760 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.intr_msr = MSR_SF;
vcpu             1763 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.pvr = 0x84202;
vcpu             1765 arch/powerpc/kvm/book3s_pr.c 	kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr);
vcpu             1766 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.slb_nr = 64;
vcpu             1768 arch/powerpc/kvm/book3s_pr.c 	vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE;
vcpu             1770 arch/powerpc/kvm/book3s_pr.c 	err = kvmppc_mmu_init(vcpu);
vcpu             1774 arch/powerpc/kvm/book3s_pr.c 	return vcpu;
vcpu             1777 arch/powerpc/kvm/book3s_pr.c 	free_page((unsigned long)vcpu->arch.shared);
vcpu             1779 arch/powerpc/kvm/book3s_pr.c 	kvm_vcpu_uninit(vcpu);
vcpu             1782 arch/powerpc/kvm/book3s_pr.c 	kfree(vcpu->arch.shadow_vcpu);
vcpu             1787 arch/powerpc/kvm/book3s_pr.c 	kmem_cache_free(kvm_vcpu_cache, vcpu);
vcpu             1792 arch/powerpc/kvm/book3s_pr.c static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
vcpu             1794 arch/powerpc/kvm/book3s_pr.c 	struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
vcpu             1796 arch/powerpc/kvm/book3s_pr.c 	free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
vcpu             1797 arch/powerpc/kvm/book3s_pr.c 	kvm_vcpu_uninit(vcpu);
vcpu             1799 arch/powerpc/kvm/book3s_pr.c 	kfree(vcpu->arch.shadow_vcpu);
vcpu             1802 arch/powerpc/kvm/book3s_pr.c 	kmem_cache_free(kvm_vcpu_cache, vcpu);
vcpu             1805 arch/powerpc/kvm/book3s_pr.c static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
vcpu             1813 arch/powerpc/kvm/book3s_pr.c 	if (!vcpu->arch.sane) {
vcpu             1819 arch/powerpc/kvm/book3s_pr.c 	kvmppc_setup_debug(vcpu);
vcpu             1827 arch/powerpc/kvm/book3s_pr.c 	ret = kvmppc_prepare_to_enter(vcpu);
vcpu             1836 arch/powerpc/kvm/book3s_pr.c 	if (kvmppc_get_msr(vcpu) & MSR_FP)
vcpu             1837 arch/powerpc/kvm/book3s_pr.c 		kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
vcpu             1841 arch/powerpc/kvm/book3s_pr.c 	ret = __kvmppc_vcpu_run(kvm_run, vcpu);
vcpu             1843 arch/powerpc/kvm/book3s_pr.c 	kvmppc_clear_debug(vcpu);
vcpu             1849 arch/powerpc/kvm/book3s_pr.c 	kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
vcpu             1852 arch/powerpc/kvm/book3s_pr.c 	kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
vcpu             1855 arch/powerpc/kvm/book3s_pr.c 	vcpu->mode = OUTSIDE_GUEST_MODE;
vcpu             1867 arch/powerpc/kvm/book3s_pr.c 	struct kvm_vcpu *vcpu;
vcpu             1887 arch/powerpc/kvm/book3s_pr.c 		kvm_for_each_vcpu(n, vcpu, kvm)
vcpu             1888 arch/powerpc/kvm/book3s_pr.c 			kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
vcpu             1940 arch/powerpc/kvm/book3s_pr.c 	struct kvm_vcpu *vcpu;
vcpu             1963 arch/powerpc/kvm/book3s_pr.c 	vcpu = kvm_get_vcpu(kvm, 0);
vcpu             1964 arch/powerpc/kvm/book3s_pr.c 	if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
vcpu               23 arch/powerpc/kvm/book3s_pr_papr.c static unsigned long get_pteg_addr(struct kvm_vcpu *vcpu, long pte_index)
vcpu               25 arch/powerpc/kvm/book3s_pr_papr.c 	struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
vcpu               36 arch/powerpc/kvm/book3s_pr_papr.c static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
vcpu               38 arch/powerpc/kvm/book3s_pr_papr.c 	long flags = kvmppc_get_gpr(vcpu, 4);
vcpu               39 arch/powerpc/kvm/book3s_pr_papr.c 	long pte_index = kvmppc_get_gpr(vcpu, 5);
vcpu               47 arch/powerpc/kvm/book3s_pr_papr.c 	pteg_addr = get_pteg_addr(vcpu, pte_index);
vcpu               49 arch/powerpc/kvm/book3s_pr_papr.c 	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
vcpu               70 arch/powerpc/kvm/book3s_pr_papr.c 	hpte[0] = cpu_to_be64(kvmppc_get_gpr(vcpu, 6));
vcpu               71 arch/powerpc/kvm/book3s_pr_papr.c 	hpte[1] = cpu_to_be64(kvmppc_get_gpr(vcpu, 7));
vcpu               76 arch/powerpc/kvm/book3s_pr_papr.c 	kvmppc_set_gpr(vcpu, 4, pte_index | i);
vcpu               80 arch/powerpc/kvm/book3s_pr_papr.c 	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
vcpu               81 arch/powerpc/kvm/book3s_pr_papr.c 	kvmppc_set_gpr(vcpu, 3, ret);
vcpu               86 arch/powerpc/kvm/book3s_pr_papr.c static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
vcpu               88 arch/powerpc/kvm/book3s_pr_papr.c 	unsigned long flags= kvmppc_get_gpr(vcpu, 4);
vcpu               89 arch/powerpc/kvm/book3s_pr_papr.c 	unsigned long pte_index = kvmppc_get_gpr(vcpu, 5);
vcpu               90 arch/powerpc/kvm/book3s_pr_papr.c 	unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
vcpu               95 arch/powerpc/kvm/book3s_pr_papr.c 	pteg = get_pteg_addr(vcpu, pte_index);
vcpu               96 arch/powerpc/kvm/book3s_pr_papr.c 	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
vcpu              114 arch/powerpc/kvm/book3s_pr_papr.c 	vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
vcpu              117 arch/powerpc/kvm/book3s_pr_papr.c 	kvmppc_set_gpr(vcpu, 4, pte[0]);
vcpu              118 arch/powerpc/kvm/book3s_pr_papr.c 	kvmppc_set_gpr(vcpu, 5, pte[1]);
vcpu              121 arch/powerpc/kvm/book3s_pr_papr.c 	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
vcpu              122 arch/powerpc/kvm/book3s_pr_papr.c 	kvmppc_set_gpr(vcpu, 3, ret);
vcpu              145 arch/powerpc/kvm/book3s_pr_papr.c static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
vcpu              151 arch/powerpc/kvm/book3s_pr_papr.c 	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
vcpu              153 arch/powerpc/kvm/book3s_pr_papr.c 		unsigned long tsh = kvmppc_get_gpr(vcpu, paramnr+(2*i));
vcpu              154 arch/powerpc/kvm/book3s_pr_papr.c 		unsigned long tsl = kvmppc_get_gpr(vcpu, paramnr+(2*i)+1);
vcpu              173 arch/powerpc/kvm/book3s_pr_papr.c 			kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh);
vcpu              178 arch/powerpc/kvm/book3s_pr_papr.c 		pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX);
vcpu              202 arch/powerpc/kvm/book3s_pr_papr.c 			vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
vcpu              206 arch/powerpc/kvm/book3s_pr_papr.c 		kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh);
vcpu              208 arch/powerpc/kvm/book3s_pr_papr.c 	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
vcpu              209 arch/powerpc/kvm/book3s_pr_papr.c 	kvmppc_set_gpr(vcpu, 3, ret);
vcpu              214 arch/powerpc/kvm/book3s_pr_papr.c static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
vcpu              216 arch/powerpc/kvm/book3s_pr_papr.c 	unsigned long flags = kvmppc_get_gpr(vcpu, 4);
vcpu              217 arch/powerpc/kvm/book3s_pr_papr.c 	unsigned long pte_index = kvmppc_get_gpr(vcpu, 5);
vcpu              218 arch/powerpc/kvm/book3s_pr_papr.c 	unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
vcpu              223 arch/powerpc/kvm/book3s_pr_papr.c 	pteg = get_pteg_addr(vcpu, pte_index);
vcpu              224 arch/powerpc/kvm/book3s_pr_papr.c 	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
vcpu              247 arch/powerpc/kvm/book3s_pr_papr.c 	vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
vcpu              256 arch/powerpc/kvm/book3s_pr_papr.c 	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
vcpu              257 arch/powerpc/kvm/book3s_pr_papr.c 	kvmppc_set_gpr(vcpu, 3, ret);
vcpu              262 arch/powerpc/kvm/book3s_pr_papr.c static int kvmppc_h_pr_logical_ci_load(struct kvm_vcpu *vcpu)
vcpu              266 arch/powerpc/kvm/book3s_pr_papr.c 	rc = kvmppc_h_logical_ci_load(vcpu);
vcpu              269 arch/powerpc/kvm/book3s_pr_papr.c 	kvmppc_set_gpr(vcpu, 3, rc);
vcpu              273 arch/powerpc/kvm/book3s_pr_papr.c static int kvmppc_h_pr_logical_ci_store(struct kvm_vcpu *vcpu)
vcpu              277 arch/powerpc/kvm/book3s_pr_papr.c 	rc = kvmppc_h_logical_ci_store(vcpu);
vcpu              280 arch/powerpc/kvm/book3s_pr_papr.c 	kvmppc_set_gpr(vcpu, 3, rc);
vcpu              285 arch/powerpc/kvm/book3s_pr_papr.c static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu)
vcpu              287 arch/powerpc/kvm/book3s_pr_papr.c 	unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
vcpu              288 arch/powerpc/kvm/book3s_pr_papr.c 	unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
vcpu              289 arch/powerpc/kvm/book3s_pr_papr.c 	unsigned long tce = kvmppc_get_gpr(vcpu, 6);
vcpu              292 arch/powerpc/kvm/book3s_pr_papr.c 	rc = kvmppc_h_put_tce(vcpu, liobn, ioba, tce);
vcpu              295 arch/powerpc/kvm/book3s_pr_papr.c 	kvmppc_set_gpr(vcpu, 3, rc);
vcpu              299 arch/powerpc/kvm/book3s_pr_papr.c static int kvmppc_h_pr_put_tce_indirect(struct kvm_vcpu *vcpu)
vcpu              301 arch/powerpc/kvm/book3s_pr_papr.c 	unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
vcpu              302 arch/powerpc/kvm/book3s_pr_papr.c 	unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
vcpu              303 arch/powerpc/kvm/book3s_pr_papr.c 	unsigned long tce = kvmppc_get_gpr(vcpu, 6);
vcpu              304 arch/powerpc/kvm/book3s_pr_papr.c 	unsigned long npages = kvmppc_get_gpr(vcpu, 7);
vcpu              307 arch/powerpc/kvm/book3s_pr_papr.c 	rc = kvmppc_h_put_tce_indirect(vcpu, liobn, ioba,
vcpu              311 arch/powerpc/kvm/book3s_pr_papr.c 	kvmppc_set_gpr(vcpu, 3, rc);
vcpu              315 arch/powerpc/kvm/book3s_pr_papr.c static int kvmppc_h_pr_stuff_tce(struct kvm_vcpu *vcpu)
vcpu              317 arch/powerpc/kvm/book3s_pr_papr.c 	unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
vcpu              318 arch/powerpc/kvm/book3s_pr_papr.c 	unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
vcpu              319 arch/powerpc/kvm/book3s_pr_papr.c 	unsigned long tce_value = kvmppc_get_gpr(vcpu, 6);
vcpu              320 arch/powerpc/kvm/book3s_pr_papr.c 	unsigned long npages = kvmppc_get_gpr(vcpu, 7);
vcpu              323 arch/powerpc/kvm/book3s_pr_papr.c 	rc = kvmppc_h_stuff_tce(vcpu, liobn, ioba, tce_value, npages);
vcpu              326 arch/powerpc/kvm/book3s_pr_papr.c 	kvmppc_set_gpr(vcpu, 3, rc);
vcpu              331 arch/powerpc/kvm/book3s_pr_papr.c static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu)
vcpu              336 arch/powerpc/kvm/book3s_pr_papr.c static int kvmppc_h_pr_put_tce_indirect(struct kvm_vcpu *vcpu)
vcpu              341 arch/powerpc/kvm/book3s_pr_papr.c static int kvmppc_h_pr_stuff_tce(struct kvm_vcpu *vcpu)
vcpu              347 arch/powerpc/kvm/book3s_pr_papr.c static int kvmppc_h_pr_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
vcpu              349 arch/powerpc/kvm/book3s_pr_papr.c 	long rc = kvmppc_xics_hcall(vcpu, cmd);
vcpu              350 arch/powerpc/kvm/book3s_pr_papr.c 	kvmppc_set_gpr(vcpu, 3, rc);
vcpu              354 arch/powerpc/kvm/book3s_pr_papr.c int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
vcpu              359 arch/powerpc/kvm/book3s_pr_papr.c 	    !test_bit(cmd/4, vcpu->kvm->arch.enabled_hcalls))
vcpu              364 arch/powerpc/kvm/book3s_pr_papr.c 		return kvmppc_h_pr_enter(vcpu);
vcpu              366 arch/powerpc/kvm/book3s_pr_papr.c 		return kvmppc_h_pr_remove(vcpu);
vcpu              368 arch/powerpc/kvm/book3s_pr_papr.c 		return kvmppc_h_pr_protect(vcpu);
vcpu              370 arch/powerpc/kvm/book3s_pr_papr.c 		return kvmppc_h_pr_bulk_remove(vcpu);
vcpu              372 arch/powerpc/kvm/book3s_pr_papr.c 		return kvmppc_h_pr_put_tce(vcpu);
vcpu              374 arch/powerpc/kvm/book3s_pr_papr.c 		return kvmppc_h_pr_put_tce_indirect(vcpu);
vcpu              376 arch/powerpc/kvm/book3s_pr_papr.c 		return kvmppc_h_pr_stuff_tce(vcpu);
vcpu              378 arch/powerpc/kvm/book3s_pr_papr.c 		kvmppc_set_msr_fast(vcpu, kvmppc_get_msr(vcpu) | MSR_EE);
vcpu              379 arch/powerpc/kvm/book3s_pr_papr.c 		kvm_vcpu_block(vcpu);
vcpu              380 arch/powerpc/kvm/book3s_pr_papr.c 		kvm_clear_request(KVM_REQ_UNHALT, vcpu);
vcpu              381 arch/powerpc/kvm/book3s_pr_papr.c 		vcpu->stat.halt_wakeup++;
vcpu              384 arch/powerpc/kvm/book3s_pr_papr.c 		return kvmppc_h_pr_logical_ci_load(vcpu);
vcpu              386 arch/powerpc/kvm/book3s_pr_papr.c 		return kvmppc_h_pr_logical_ci_store(vcpu);
vcpu              393 arch/powerpc/kvm/book3s_pr_papr.c 		if (kvmppc_xics_enabled(vcpu))
vcpu              394 arch/powerpc/kvm/book3s_pr_papr.c 			return kvmppc_h_pr_xics_hcall(vcpu, cmd);
vcpu              397 arch/powerpc/kvm/book3s_pr_papr.c 		if (list_empty(&vcpu->kvm->arch.rtas_tokens))
vcpu              399 arch/powerpc/kvm/book3s_pr_papr.c 		idx = srcu_read_lock(&vcpu->kvm->srcu);
vcpu              400 arch/powerpc/kvm/book3s_pr_papr.c 		rc = kvmppc_rtas_hcall(vcpu);
vcpu              401 arch/powerpc/kvm/book3s_pr_papr.c 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
vcpu              404 arch/powerpc/kvm/book3s_pr_papr.c 		kvmppc_set_gpr(vcpu, 3, 0);
vcpu               19 arch/powerpc/kvm/book3s_rtas.c static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
vcpu               34 arch/powerpc/kvm/book3s_rtas.c 		rc = kvmppc_xive_set_xive(vcpu->kvm, irq, server, priority);
vcpu               36 arch/powerpc/kvm/book3s_rtas.c 		rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority);
vcpu               43 arch/powerpc/kvm/book3s_rtas.c static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
vcpu               57 arch/powerpc/kvm/book3s_rtas.c 		rc = kvmppc_xive_get_xive(vcpu->kvm, irq, &server, &priority);
vcpu               59 arch/powerpc/kvm/book3s_rtas.c 		rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority);
vcpu               71 arch/powerpc/kvm/book3s_rtas.c static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
vcpu               84 arch/powerpc/kvm/book3s_rtas.c 		rc = kvmppc_xive_int_off(vcpu->kvm, irq);
vcpu               86 arch/powerpc/kvm/book3s_rtas.c 		rc = kvmppc_xics_int_off(vcpu->kvm, irq);
vcpu               93 arch/powerpc/kvm/book3s_rtas.c static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
vcpu              106 arch/powerpc/kvm/book3s_rtas.c 		rc = kvmppc_xive_int_on(vcpu->kvm, irq);
vcpu              108 arch/powerpc/kvm/book3s_rtas.c 		rc = kvmppc_xics_int_on(vcpu->kvm, irq);
vcpu              117 arch/powerpc/kvm/book3s_rtas.c 	void (*handler)(struct kvm_vcpu *vcpu, struct rtas_args *args);
vcpu              218 arch/powerpc/kvm/book3s_rtas.c int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
vcpu              230 arch/powerpc/kvm/book3s_rtas.c 	args_phys = kvmppc_get_gpr(vcpu, 4) & KVM_PAM;
vcpu              232 arch/powerpc/kvm/book3s_rtas.c 	rc = kvm_read_guest(vcpu->kvm, args_phys, &args, sizeof(args));
vcpu              245 arch/powerpc/kvm/book3s_rtas.c 	mutex_lock(&vcpu->kvm->arch.rtas_token_lock);
vcpu              248 arch/powerpc/kvm/book3s_rtas.c 	list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) {
vcpu              250 arch/powerpc/kvm/book3s_rtas.c 			d->handler->handler(vcpu, &args);
vcpu              256 arch/powerpc/kvm/book3s_rtas.c 	mutex_unlock(&vcpu->kvm->arch.rtas_token_lock);
vcpu              260 arch/powerpc/kvm/book3s_rtas.c 		rc = kvm_write_guest(vcpu->kvm, args_phys, &args, sizeof(args));
vcpu              309 arch/powerpc/kvm/book3s_xics.c 		kvmppc_book3s_queue_irqprio(icp->vcpu,
vcpu              312 arch/powerpc/kvm/book3s_xics.c 			kvmppc_fast_vcpu_kick(icp->vcpu);
vcpu              586 arch/powerpc/kvm/book3s_xics.c static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu)
vcpu              589 arch/powerpc/kvm/book3s_xics.c 	struct kvmppc_icp *icp = vcpu->arch.icp;
vcpu              593 arch/powerpc/kvm/book3s_xics.c 	kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL);
vcpu              614 arch/powerpc/kvm/book3s_xics.c 	XICS_DBG("h_xirr vcpu %d xirr %#x\n", vcpu->vcpu_id, xirr);
vcpu              619 arch/powerpc/kvm/book3s_xics.c static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
vcpu              623 arch/powerpc/kvm/book3s_xics.c 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
vcpu              630 arch/powerpc/kvm/book3s_xics.c 		 vcpu->vcpu_id, server, mfrr);
vcpu              632 arch/powerpc/kvm/book3s_xics.c 	icp = vcpu->arch.icp;
vcpu              635 arch/powerpc/kvm/book3s_xics.c 		icp = kvmppc_xics_find_server(vcpu->kvm, server);
vcpu              704 arch/powerpc/kvm/book3s_xics.c static int kvmppc_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
vcpu              709 arch/powerpc/kvm/book3s_xics.c 	icp = vcpu->arch.icp;
vcpu              711 arch/powerpc/kvm/book3s_xics.c 		icp = kvmppc_xics_find_server(vcpu->kvm, server);
vcpu              716 arch/powerpc/kvm/book3s_xics.c 	kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr);
vcpu              717 arch/powerpc/kvm/book3s_xics.c 	kvmppc_set_gpr(vcpu, 5, state.mfrr);
vcpu              721 arch/powerpc/kvm/book3s_xics.c static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
vcpu              724 arch/powerpc/kvm/book3s_xics.c 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
vcpu              725 arch/powerpc/kvm/book3s_xics.c 	struct kvmppc_icp *icp = vcpu->arch.icp;
vcpu              728 arch/powerpc/kvm/book3s_xics.c 	XICS_DBG("h_cppr vcpu %d cppr %#lx\n", vcpu->vcpu_id, cppr);
vcpu              753 arch/powerpc/kvm/book3s_xics.c 	kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL);
vcpu              777 arch/powerpc/kvm/book3s_xics.c static int ics_eoi(struct kvm_vcpu *vcpu, u32 irq)
vcpu              779 arch/powerpc/kvm/book3s_xics.c 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
vcpu              780 arch/powerpc/kvm/book3s_xics.c 	struct kvmppc_icp *icp = vcpu->arch.icp;
vcpu              812 arch/powerpc/kvm/book3s_xics.c 	kvm_notify_acked_irq(vcpu->kvm, 0, irq);
vcpu              817 arch/powerpc/kvm/book3s_xics.c static noinline int kvmppc_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
vcpu              819 arch/powerpc/kvm/book3s_xics.c 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
vcpu              820 arch/powerpc/kvm/book3s_xics.c 	struct kvmppc_icp *icp = vcpu->arch.icp;
vcpu              823 arch/powerpc/kvm/book3s_xics.c 	XICS_DBG("h_eoi vcpu %d eoi %#lx\n", vcpu->vcpu_id, xirr);
vcpu              845 arch/powerpc/kvm/book3s_xics.c 	return ics_eoi(vcpu, irq);
vcpu              848 arch/powerpc/kvm/book3s_xics.c int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
vcpu              850 arch/powerpc/kvm/book3s_xics.c 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
vcpu              851 arch/powerpc/kvm/book3s_xics.c 	struct kvmppc_icp *icp = vcpu->arch.icp;
vcpu              866 arch/powerpc/kvm/book3s_xics.c 		kvm_notify_acked_irq(vcpu->kvm, 0, icp->rm_eoied_irq);
vcpu              875 arch/powerpc/kvm/book3s_xics.c int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
vcpu              877 arch/powerpc/kvm/book3s_xics.c 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
vcpu              882 arch/powerpc/kvm/book3s_xics.c 	if (!xics || !vcpu->arch.icp)
vcpu              888 arch/powerpc/kvm/book3s_xics.c 		res = kvmppc_h_xirr(vcpu);
vcpu              889 arch/powerpc/kvm/book3s_xics.c 		kvmppc_set_gpr(vcpu, 4, res);
vcpu              890 arch/powerpc/kvm/book3s_xics.c 		kvmppc_set_gpr(vcpu, 5, get_tb());
vcpu              893 arch/powerpc/kvm/book3s_xics.c 		rc = kvmppc_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
vcpu              898 arch/powerpc/kvm/book3s_xics.c 	if (xics->real_mode && is_kvmppc_hv_enabled(vcpu->kvm))
vcpu              899 arch/powerpc/kvm/book3s_xics.c 		return kvmppc_xics_rm_complete(vcpu, req);
vcpu              903 arch/powerpc/kvm/book3s_xics.c 		res = kvmppc_h_xirr(vcpu);
vcpu              904 arch/powerpc/kvm/book3s_xics.c 		kvmppc_set_gpr(vcpu, 4, res);
vcpu              907 arch/powerpc/kvm/book3s_xics.c 		kvmppc_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4));
vcpu              910 arch/powerpc/kvm/book3s_xics.c 		rc = kvmppc_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4));
vcpu              913 arch/powerpc/kvm/book3s_xics.c 		rc = kvmppc_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4),
vcpu              914 arch/powerpc/kvm/book3s_xics.c 				  kvmppc_get_gpr(vcpu, 5));
vcpu              944 arch/powerpc/kvm/book3s_xics.c 	struct kvm_vcpu *vcpu;
vcpu              964 arch/powerpc/kvm/book3s_xics.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu              965 arch/powerpc/kvm/book3s_xics.c 		struct kvmppc_icp *icp = vcpu->arch.icp;
vcpu             1071 arch/powerpc/kvm/book3s_xics.c static int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server_num)
vcpu             1075 arch/powerpc/kvm/book3s_xics.c 	if (!vcpu->kvm->arch.xics)
vcpu             1078 arch/powerpc/kvm/book3s_xics.c 	if (kvmppc_xics_find_server(vcpu->kvm, server_num))
vcpu             1085 arch/powerpc/kvm/book3s_xics.c 	icp->vcpu = vcpu;
vcpu             1089 arch/powerpc/kvm/book3s_xics.c 	vcpu->arch.icp = icp;
vcpu             1091 arch/powerpc/kvm/book3s_xics.c 	XICS_DBG("created server for vcpu %d\n", vcpu->vcpu_id);
vcpu             1096 arch/powerpc/kvm/book3s_xics.c u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu)
vcpu             1098 arch/powerpc/kvm/book3s_xics.c 	struct kvmppc_icp *icp = vcpu->arch.icp;
vcpu             1110 arch/powerpc/kvm/book3s_xics.c int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
vcpu             1112 arch/powerpc/kvm/book3s_xics.c 	struct kvmppc_icp *icp = vcpu->arch.icp;
vcpu             1113 arch/powerpc/kvm/book3s_xics.c 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
vcpu             1155 arch/powerpc/kvm/book3s_xics.c 	kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL);
vcpu             1408 arch/powerpc/kvm/book3s_xics.c int kvmppc_xics_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
vcpu             1416 arch/powerpc/kvm/book3s_xics.c 	if (xics->kvm != vcpu->kvm)
vcpu             1418 arch/powerpc/kvm/book3s_xics.c 	if (vcpu->arch.irq_type)
vcpu             1421 arch/powerpc/kvm/book3s_xics.c 	r = kvmppc_xics_create_icp(vcpu, xcpu);
vcpu             1423 arch/powerpc/kvm/book3s_xics.c 		vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
vcpu             1428 arch/powerpc/kvm/book3s_xics.c void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu)
vcpu             1430 arch/powerpc/kvm/book3s_xics.c 	if (!vcpu->arch.icp)
vcpu             1432 arch/powerpc/kvm/book3s_xics.c 	kfree(vcpu->arch.icp);
vcpu             1433 arch/powerpc/kvm/book3s_xics.c 	vcpu->arch.icp = NULL;
vcpu             1434 arch/powerpc/kvm/book3s_xics.c 	vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
vcpu               67 arch/powerpc/kvm/book3s_xics.h 	struct kvm_vcpu *vcpu;
vcpu              118 arch/powerpc/kvm/book3s_xics.h 	struct kvm_vcpu *vcpu = NULL;
vcpu              121 arch/powerpc/kvm/book3s_xics.h 	kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu              122 arch/powerpc/kvm/book3s_xics.h 		if (vcpu->arch.icp && nr == vcpu->arch.icp->server_num)
vcpu              123 arch/powerpc/kvm/book3s_xics.h 			return vcpu->arch.icp;
vcpu              145 arch/powerpc/kvm/book3s_xics.h extern unsigned long xics_rm_h_xirr(struct kvm_vcpu *vcpu);
vcpu              146 arch/powerpc/kvm/book3s_xics.h extern int xics_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
vcpu              148 arch/powerpc/kvm/book3s_xics.h extern int xics_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
vcpu              149 arch/powerpc/kvm/book3s_xics.h extern int xics_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
vcpu               65 arch/powerpc/kvm/book3s_xive.c void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu)
vcpu               75 arch/powerpc/kvm/book3s_xive.c 	if (!tima || !vcpu->arch.xive_cam_word)
vcpu               79 arch/powerpc/kvm/book3s_xive.c 	__raw_writeq(vcpu->arch.xive_saved_state.w01, tima + TM_QW1_OS);
vcpu               80 arch/powerpc/kvm/book3s_xive.c 	__raw_writel(vcpu->arch.xive_cam_word, tima + TM_QW1_OS + TM_WORD2);
vcpu               81 arch/powerpc/kvm/book3s_xive.c 	vcpu->arch.xive_pushed = 1;
vcpu               91 arch/powerpc/kvm/book3s_xive.c 	vcpu->arch.irq_pending = 0;
vcpu               97 arch/powerpc/kvm/book3s_xive.c 	if (vcpu->arch.xive_esc_on) {
vcpu               98 arch/powerpc/kvm/book3s_xive.c 		pq = __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr +
vcpu              125 arch/powerpc/kvm/book3s_xive.c 			vcpu->arch.xive_esc_on = 0;
vcpu              151 arch/powerpc/kvm/book3s_xive.c 	struct kvm_vcpu *vcpu = data;
vcpu              153 arch/powerpc/kvm/book3s_xive.c 	vcpu->arch.irq_pending = 1;
vcpu              155 arch/powerpc/kvm/book3s_xive.c 	if (vcpu->arch.ceded)
vcpu              156 arch/powerpc/kvm/book3s_xive.c 		kvmppc_fast_vcpu_kick(vcpu);
vcpu              167 arch/powerpc/kvm/book3s_xive.c 	vcpu->arch.xive_esc_on = false;
vcpu              175 arch/powerpc/kvm/book3s_xive.c int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio,
vcpu              178 arch/powerpc/kvm/book3s_xive.c 	struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
vcpu              197 arch/powerpc/kvm/book3s_xive.c 				 vcpu->kvm->arch.lpid, xc->server_num);
vcpu              200 arch/powerpc/kvm/book3s_xive.c 				 vcpu->kvm->arch.lpid, xc->server_num, prio);
vcpu              211 arch/powerpc/kvm/book3s_xive.c 			 IRQF_NO_THREAD, name, vcpu);
vcpu              232 arch/powerpc/kvm/book3s_xive.c 		vcpu->arch.xive_esc_raddr = xd->eoi_page;
vcpu              233 arch/powerpc/kvm/book3s_xive.c 		vcpu->arch.xive_esc_vaddr = (__force u64)xd->eoi_mmio;
vcpu              245 arch/powerpc/kvm/book3s_xive.c static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio)
vcpu              247 arch/powerpc/kvm/book3s_xive.c 	struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
vcpu              284 arch/powerpc/kvm/book3s_xive.c 	struct kvm_vcpu *vcpu;
vcpu              296 arch/powerpc/kvm/book3s_xive.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu              297 arch/powerpc/kvm/book3s_xive.c 		if (!vcpu->arch.xive_vcpu)
vcpu              299 arch/powerpc/kvm/book3s_xive.c 		rc = xive_provision_queue(vcpu, prio);
vcpu              301 arch/powerpc/kvm/book3s_xive.c 			kvmppc_xive_attach_escalation(vcpu, prio,
vcpu              315 arch/powerpc/kvm/book3s_xive.c 	struct kvm_vcpu *vcpu;
vcpu              320 arch/powerpc/kvm/book3s_xive.c 	vcpu = kvmppc_xive_find_server(kvm, server);
vcpu              321 arch/powerpc/kvm/book3s_xive.c 	if (!vcpu) {
vcpu              325 arch/powerpc/kvm/book3s_xive.c 	xc = vcpu->arch.xive_vcpu;
vcpu              333 arch/powerpc/kvm/book3s_xive.c static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio)
vcpu              335 arch/powerpc/kvm/book3s_xive.c 	struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
vcpu              355 arch/powerpc/kvm/book3s_xive.c 	struct kvm_vcpu *vcpu;
vcpu              359 arch/powerpc/kvm/book3s_xive.c 	vcpu = kvmppc_xive_find_server(kvm, *server);
vcpu              360 arch/powerpc/kvm/book3s_xive.c 	if (!vcpu) {
vcpu              368 arch/powerpc/kvm/book3s_xive.c 	rc = xive_try_pick_queue(vcpu, prio);
vcpu              375 arch/powerpc/kvm/book3s_xive.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu              376 arch/powerpc/kvm/book3s_xive.c 		if (!vcpu->arch.xive_vcpu)
vcpu              378 arch/powerpc/kvm/book3s_xive.c 		rc = xive_try_pick_queue(vcpu, prio);
vcpu              380 arch/powerpc/kvm/book3s_xive.c 			*server = vcpu->arch.xive_vcpu->server_num;
vcpu              824 arch/powerpc/kvm/book3s_xive.c u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu)
vcpu              826 arch/powerpc/kvm/book3s_xive.c 	struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
vcpu              837 arch/powerpc/kvm/book3s_xive.c int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
vcpu              839 arch/powerpc/kvm/book3s_xive.c 	struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
vcpu              840 arch/powerpc/kvm/book3s_xive.c 	struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
vcpu              861 arch/powerpc/kvm/book3s_xive.c 	if (WARN_ON(vcpu->arch.xive_pushed))
vcpu              865 arch/powerpc/kvm/book3s_xive.c 	vcpu->arch.xive_saved_state.cppr = cppr;
vcpu             1074 arch/powerpc/kvm/book3s_xive.c void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu)
vcpu             1076 arch/powerpc/kvm/book3s_xive.c 	struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
vcpu             1077 arch/powerpc/kvm/book3s_xive.c 	struct kvm *kvm = vcpu->kvm;
vcpu             1110 arch/powerpc/kvm/book3s_xive.c 	if (vcpu->arch.xive_esc_on) {
vcpu             1111 arch/powerpc/kvm/book3s_xive.c 		__raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr +
vcpu             1113 arch/powerpc/kvm/book3s_xive.c 		vcpu->arch.xive_esc_on = false;
vcpu             1121 arch/powerpc/kvm/book3s_xive.c 	vcpu->arch.xive_esc_vaddr = 0;
vcpu             1122 arch/powerpc/kvm/book3s_xive.c 	vcpu->arch.xive_esc_raddr = 0;
vcpu             1133 arch/powerpc/kvm/book3s_xive.c void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu,
vcpu             1146 arch/powerpc/kvm/book3s_xive.c 	if (!vcpu->arch.xive_esc_on)
vcpu             1150 arch/powerpc/kvm/book3s_xive.c void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
vcpu             1152 arch/powerpc/kvm/book3s_xive.c 	struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
vcpu             1153 arch/powerpc/kvm/book3s_xive.c 	struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
vcpu             1156 arch/powerpc/kvm/book3s_xive.c 	if (!kvmppc_xics_enabled(vcpu))
vcpu             1166 arch/powerpc/kvm/book3s_xive.c 	kvmppc_xive_disable_vcpu_interrupts(vcpu);
vcpu             1175 arch/powerpc/kvm/book3s_xive.c 				xive_cleanup_single_escalation(vcpu, xc,
vcpu             1177 arch/powerpc/kvm/book3s_xive.c 			free_irq(xc->esc_virq[i], vcpu);
vcpu             1187 arch/powerpc/kvm/book3s_xive.c 	vcpu->arch.xive_cam_word = 0;
vcpu             1210 arch/powerpc/kvm/book3s_xive.c 	vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
vcpu             1211 arch/powerpc/kvm/book3s_xive.c 	vcpu->arch.xive_vcpu = NULL;
vcpu             1215 arch/powerpc/kvm/book3s_xive.c 			     struct kvm_vcpu *vcpu, u32 cpu)
vcpu             1228 arch/powerpc/kvm/book3s_xive.c 	if (xive->kvm != vcpu->kvm)
vcpu             1230 arch/powerpc/kvm/book3s_xive.c 	if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
vcpu             1232 arch/powerpc/kvm/book3s_xive.c 	if (cpu >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) {
vcpu             1253 arch/powerpc/kvm/book3s_xive.c 	vcpu->arch.xive_vcpu = xc;
vcpu             1255 arch/powerpc/kvm/book3s_xive.c 	xc->vcpu = vcpu;
vcpu             1266 arch/powerpc/kvm/book3s_xive.c 	vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000);
vcpu             1267 arch/powerpc/kvm/book3s_xive.c 	vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO);
vcpu             1308 arch/powerpc/kvm/book3s_xive.c 			r = xive_provision_queue(vcpu, i);
vcpu             1311 arch/powerpc/kvm/book3s_xive.c 					vcpu, i, xive->single_escalation);
vcpu             1326 arch/powerpc/kvm/book3s_xive.c 	r = kvmppc_xive_attach_escalation(vcpu, 0, xive->single_escalation);
vcpu             1338 arch/powerpc/kvm/book3s_xive.c 		kvmppc_xive_cleanup_vcpu(vcpu);
vcpu             1342 arch/powerpc/kvm/book3s_xive.c 	vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
vcpu             1438 arch/powerpc/kvm/book3s_xive.c 	struct kvm_vcpu *vcpu = NULL;
vcpu             1454 arch/powerpc/kvm/book3s_xive.c 	kvm_for_each_vcpu(i, vcpu, xive->kvm) {
vcpu             1455 arch/powerpc/kvm/book3s_xive.c 		struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
vcpu             1619 arch/powerpc/kvm/book3s_xive.c 	struct kvm_vcpu *vcpu = NULL;
vcpu             1622 arch/powerpc/kvm/book3s_xive.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu             1623 arch/powerpc/kvm/book3s_xive.c 		struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
vcpu             1909 arch/powerpc/kvm/book3s_xive.c 	struct kvm_vcpu *vcpu;
vcpu             1928 arch/powerpc/kvm/book3s_xive.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu             1937 arch/powerpc/kvm/book3s_xive.c 		mutex_lock(&vcpu->mutex);
vcpu             1938 arch/powerpc/kvm/book3s_xive.c 		kvmppc_xive_cleanup_vcpu(vcpu);
vcpu             1939 arch/powerpc/kvm/book3s_xive.c 		mutex_unlock(&vcpu->mutex);
vcpu             2044 arch/powerpc/kvm/book3s_xive.c int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu)
vcpu             2046 arch/powerpc/kvm/book3s_xive.c 	struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
vcpu             2086 arch/powerpc/kvm/book3s_xive.c 	struct kvm_vcpu *vcpu;
vcpu             2104 arch/powerpc/kvm/book3s_xive.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu             2105 arch/powerpc/kvm/book3s_xive.c 		struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
vcpu             2116 arch/powerpc/kvm/book3s_xive.c 		kvmppc_xive_debug_show_queues(m, vcpu);
vcpu              148 arch/powerpc/kvm/book3s_xive.h 	struct kvm_vcpu		*vcpu;
vcpu              195 arch/powerpc/kvm/book3s_xive.h 	struct kvm_vcpu *vcpu = NULL;
vcpu              198 arch/powerpc/kvm/book3s_xive.h 	kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu              199 arch/powerpc/kvm/book3s_xive.h 		if (vcpu->arch.xive_vcpu && nr == vcpu->arch.xive_vcpu->server_num)
vcpu              200 arch/powerpc/kvm/book3s_xive.h 			return vcpu;
vcpu              225 arch/powerpc/kvm/book3s_xive.h 	struct kvm_vcpu *vcpu = NULL;
vcpu              228 arch/powerpc/kvm/book3s_xive.h 	kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu              229 arch/powerpc/kvm/book3s_xive.h 		if (vcpu->arch.xive_vcpu && vp_id == vcpu->arch.xive_vcpu->vp_id)
vcpu              271 arch/powerpc/kvm/book3s_xive.h extern unsigned long xive_rm_h_xirr(struct kvm_vcpu *vcpu);
vcpu              272 arch/powerpc/kvm/book3s_xive.h extern unsigned long xive_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server);
vcpu              273 arch/powerpc/kvm/book3s_xive.h extern int xive_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
vcpu              275 arch/powerpc/kvm/book3s_xive.h extern int xive_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
vcpu              276 arch/powerpc/kvm/book3s_xive.h extern int xive_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
vcpu              278 arch/powerpc/kvm/book3s_xive.h extern unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu);
vcpu              279 arch/powerpc/kvm/book3s_xive.h extern unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server);
vcpu              280 arch/powerpc/kvm/book3s_xive.h extern int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
vcpu              282 arch/powerpc/kvm/book3s_xive.h extern int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
vcpu              283 arch/powerpc/kvm/book3s_xive.h extern int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
vcpu              288 arch/powerpc/kvm/book3s_xive.h void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu);
vcpu              289 arch/powerpc/kvm/book3s_xive.h int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu);
vcpu              294 arch/powerpc/kvm/book3s_xive.h int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio,
vcpu              297 arch/powerpc/kvm/book3s_xive.h void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu,
vcpu               41 arch/powerpc/kvm/book3s_xive_native.c static void kvmppc_xive_native_cleanup_queue(struct kvm_vcpu *vcpu, int prio)
vcpu               43 arch/powerpc/kvm/book3s_xive_native.c 	struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
vcpu               71 arch/powerpc/kvm/book3s_xive_native.c void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu)
vcpu               73 arch/powerpc/kvm/book3s_xive_native.c 	struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
vcpu               76 arch/powerpc/kvm/book3s_xive_native.c 	if (!kvmppc_xive_enabled(vcpu))
vcpu               86 arch/powerpc/kvm/book3s_xive_native.c 	kvmppc_xive_disable_vcpu_interrupts(vcpu);
vcpu               93 arch/powerpc/kvm/book3s_xive_native.c 				xive_cleanup_single_escalation(vcpu, xc,
vcpu               95 arch/powerpc/kvm/book3s_xive_native.c 			free_irq(xc->esc_virq[i], vcpu);
vcpu              106 arch/powerpc/kvm/book3s_xive_native.c 	vcpu->arch.xive_cam_word = 0;
vcpu              110 arch/powerpc/kvm/book3s_xive_native.c 		kvmppc_xive_native_cleanup_queue(vcpu, i);
vcpu              117 arch/powerpc/kvm/book3s_xive_native.c 	vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
vcpu              118 arch/powerpc/kvm/book3s_xive_native.c 	vcpu->arch.xive_vcpu = NULL;
vcpu              122 arch/powerpc/kvm/book3s_xive_native.c 				    struct kvm_vcpu *vcpu, u32 server_num)
vcpu              135 arch/powerpc/kvm/book3s_xive_native.c 	if (xive->kvm != vcpu->kvm)
vcpu              137 arch/powerpc/kvm/book3s_xive_native.c 	if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
vcpu              139 arch/powerpc/kvm/book3s_xive_native.c 	if (server_num >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) {
vcpu              159 arch/powerpc/kvm/book3s_xive_native.c 	vcpu->arch.xive_vcpu = xc;
vcpu              161 arch/powerpc/kvm/book3s_xive_native.c 	xc->vcpu = vcpu;
vcpu              166 arch/powerpc/kvm/book3s_xive_native.c 	vcpu->arch.irq_type = KVMPPC_IRQ_XIVE;
vcpu              185 arch/powerpc/kvm/book3s_xive_native.c 	vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000);
vcpu              186 arch/powerpc/kvm/book3s_xive_native.c 	vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO);
vcpu              192 arch/powerpc/kvm/book3s_xive_native.c 		kvmppc_xive_native_cleanup_vcpu(vcpu);
vcpu              555 arch/powerpc/kvm/book3s_xive_native.c 	struct kvm_vcpu *vcpu;
vcpu              580 arch/powerpc/kvm/book3s_xive_native.c 	vcpu = kvmppc_xive_find_server(kvm, server);
vcpu              581 arch/powerpc/kvm/book3s_xive_native.c 	if (!vcpu) {
vcpu              585 arch/powerpc/kvm/book3s_xive_native.c 	xc = vcpu->arch.xive_vcpu;
vcpu              641 arch/powerpc/kvm/book3s_xive_native.c 	page_size = kvm_host_page_size(vcpu, gfn);
vcpu              691 arch/powerpc/kvm/book3s_xive_native.c 	rc = kvmppc_xive_attach_escalation(vcpu, priority,
vcpu              695 arch/powerpc/kvm/book3s_xive_native.c 		kvmppc_xive_native_cleanup_queue(vcpu, priority);
vcpu              703 arch/powerpc/kvm/book3s_xive_native.c 	struct kvm_vcpu *vcpu;
vcpu              725 arch/powerpc/kvm/book3s_xive_native.c 	vcpu = kvmppc_xive_find_server(kvm, server);
vcpu              726 arch/powerpc/kvm/book3s_xive_native.c 	if (!vcpu) {
vcpu              730 arch/powerpc/kvm/book3s_xive_native.c 	xc = vcpu->arch.xive_vcpu;
vcpu              800 arch/powerpc/kvm/book3s_xive_native.c 	struct kvm_vcpu *vcpu;
vcpu              807 arch/powerpc/kvm/book3s_xive_native.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu              808 arch/powerpc/kvm/book3s_xive_native.c 		struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
vcpu              814 arch/powerpc/kvm/book3s_xive_native.c 		kvmppc_xive_disable_vcpu_interrupts(vcpu);
vcpu              823 arch/powerpc/kvm/book3s_xive_native.c 				free_irq(xc->esc_virq[prio], vcpu);
vcpu              829 arch/powerpc/kvm/book3s_xive_native.c 			kvmppc_xive_native_cleanup_queue(vcpu, prio);
vcpu              883 arch/powerpc/kvm/book3s_xive_native.c static int kvmppc_xive_native_vcpu_eq_sync(struct kvm_vcpu *vcpu)
vcpu              885 arch/powerpc/kvm/book3s_xive_native.c 	struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
vcpu              899 arch/powerpc/kvm/book3s_xive_native.c 		srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
vcpu              900 arch/powerpc/kvm/book3s_xive_native.c 		mark_page_dirty(vcpu->kvm, gpa_to_gfn(q->guest_qaddr));
vcpu              901 arch/powerpc/kvm/book3s_xive_native.c 		srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
vcpu              909 arch/powerpc/kvm/book3s_xive_native.c 	struct kvm_vcpu *vcpu;
vcpu              925 arch/powerpc/kvm/book3s_xive_native.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu              926 arch/powerpc/kvm/book3s_xive_native.c 		kvmppc_xive_native_vcpu_eq_sync(vcpu);
vcpu             1007 arch/powerpc/kvm/book3s_xive_native.c 	struct kvm_vcpu *vcpu;
vcpu             1035 arch/powerpc/kvm/book3s_xive_native.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu             1044 arch/powerpc/kvm/book3s_xive_native.c 		mutex_lock(&vcpu->mutex);
vcpu             1045 arch/powerpc/kvm/book3s_xive_native.c 		kvmppc_xive_native_cleanup_vcpu(vcpu);
vcpu             1046 arch/powerpc/kvm/book3s_xive_native.c 		mutex_unlock(&vcpu->mutex);
vcpu             1128 arch/powerpc/kvm/book3s_xive_native.c int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu, union kvmppc_one_reg *val)
vcpu             1130 arch/powerpc/kvm/book3s_xive_native.c 	struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
vcpu             1134 arch/powerpc/kvm/book3s_xive_native.c 	if (!kvmppc_xive_enabled(vcpu))
vcpu             1141 arch/powerpc/kvm/book3s_xive_native.c 	val->xive_timaval[0] = vcpu->arch.xive_saved_state.w01;
vcpu             1156 arch/powerpc/kvm/book3s_xive_native.c 		 vcpu->arch.xive_saved_state.nsr,
vcpu             1157 arch/powerpc/kvm/book3s_xive_native.c 		 vcpu->arch.xive_saved_state.cppr,
vcpu             1158 arch/powerpc/kvm/book3s_xive_native.c 		 vcpu->arch.xive_saved_state.ipb,
vcpu             1159 arch/powerpc/kvm/book3s_xive_native.c 		 vcpu->arch.xive_saved_state.pipr,
vcpu             1160 arch/powerpc/kvm/book3s_xive_native.c 		 vcpu->arch.xive_saved_state.w01,
vcpu             1161 arch/powerpc/kvm/book3s_xive_native.c 		 (u32) vcpu->arch.xive_cam_word, opal_state);
vcpu             1166 arch/powerpc/kvm/book3s_xive_native.c int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu, union kvmppc_one_reg *val)
vcpu             1168 arch/powerpc/kvm/book3s_xive_native.c 	struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
vcpu             1169 arch/powerpc/kvm/book3s_xive_native.c 	struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
vcpu             1174 arch/powerpc/kvm/book3s_xive_native.c 	if (!kvmppc_xive_enabled(vcpu))
vcpu             1181 arch/powerpc/kvm/book3s_xive_native.c 	if (WARN_ON(vcpu->arch.xive_pushed))
vcpu             1188 arch/powerpc/kvm/book3s_xive_native.c 	vcpu->arch.xive_saved_state.w01 = val->xive_timaval[0];
vcpu             1207 arch/powerpc/kvm/book3s_xive_native.c 	struct kvm_vcpu *vcpu;
vcpu             1215 arch/powerpc/kvm/book3s_xive_native.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu             1216 arch/powerpc/kvm/book3s_xive_native.c 		struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
vcpu             1223 arch/powerpc/kvm/book3s_xive_native.c 			   vcpu->arch.xive_saved_state.nsr,
vcpu             1224 arch/powerpc/kvm/book3s_xive_native.c 			   vcpu->arch.xive_saved_state.cppr,
vcpu             1225 arch/powerpc/kvm/book3s_xive_native.c 			   vcpu->arch.xive_saved_state.ipb,
vcpu             1226 arch/powerpc/kvm/book3s_xive_native.c 			   vcpu->arch.xive_saved_state.pipr,
vcpu             1227 arch/powerpc/kvm/book3s_xive_native.c 			   vcpu->arch.xive_saved_state.w01,
vcpu             1228 arch/powerpc/kvm/book3s_xive_native.c 			   (u32) vcpu->arch.xive_cam_word);
vcpu             1230 arch/powerpc/kvm/book3s_xive_native.c 		kvmppc_xive_debug_show_queues(m, vcpu);
vcpu              269 arch/powerpc/kvm/book3s_xive_template.c X_STATIC unsigned long GLUE(X_PFX,h_xirr)(struct kvm_vcpu *vcpu)
vcpu              271 arch/powerpc/kvm/book3s_xive_template.c 	struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
vcpu              315 arch/powerpc/kvm/book3s_xive_template.c 	vcpu->arch.regs.gpr[4] = hirq | (old_cppr << 24);
vcpu              320 arch/powerpc/kvm/book3s_xive_template.c X_STATIC unsigned long GLUE(X_PFX,h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server)
vcpu              322 arch/powerpc/kvm/book3s_xive_template.c 	struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
vcpu              332 arch/powerpc/kvm/book3s_xive_template.c 		vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
vcpu              333 arch/powerpc/kvm/book3s_xive_template.c 		if (!vcpu)
vcpu              335 arch/powerpc/kvm/book3s_xive_template.c 		xc = vcpu->arch.xive_vcpu;
vcpu              350 arch/powerpc/kvm/book3s_xive_template.c 	vcpu->arch.regs.gpr[4] = hirq | (xc->cppr << 24);
vcpu              439 arch/powerpc/kvm/book3s_xive_template.c X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr)
vcpu              441 arch/powerpc/kvm/book3s_xive_template.c 	struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
vcpu              442 arch/powerpc/kvm/book3s_xive_template.c 	struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
vcpu              498 arch/powerpc/kvm/book3s_xive_template.c X_STATIC int GLUE(X_PFX,h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr)
vcpu              500 arch/powerpc/kvm/book3s_xive_template.c 	struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
vcpu              503 arch/powerpc/kvm/book3s_xive_template.c 	struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
vcpu              603 arch/powerpc/kvm/book3s_xive_template.c X_STATIC int GLUE(X_PFX,h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
vcpu              606 arch/powerpc/kvm/book3s_xive_template.c 	struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
vcpu              613 arch/powerpc/kvm/book3s_xive_template.c 	vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
vcpu              614 arch/powerpc/kvm/book3s_xive_template.c 	if (!vcpu)
vcpu              616 arch/powerpc/kvm/book3s_xive_template.c 	xc = vcpu->arch.xive_vcpu;
vcpu               65 arch/powerpc/kvm/booke.c void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
vcpu               69 arch/powerpc/kvm/booke.c 	printk("pc:   %08lx msr:  %08llx\n", vcpu->arch.regs.nip,
vcpu               70 arch/powerpc/kvm/booke.c 			vcpu->arch.shared->msr);
vcpu               71 arch/powerpc/kvm/booke.c 	printk("lr:   %08lx ctr:  %08lx\n", vcpu->arch.regs.link,
vcpu               72 arch/powerpc/kvm/booke.c 			vcpu->arch.regs.ctr);
vcpu               73 arch/powerpc/kvm/booke.c 	printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
vcpu               74 arch/powerpc/kvm/booke.c 					    vcpu->arch.shared->srr1);
vcpu               76 arch/powerpc/kvm/booke.c 	printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
vcpu               80 arch/powerpc/kvm/booke.c 		       kvmppc_get_gpr(vcpu, i),
vcpu               81 arch/powerpc/kvm/booke.c 		       kvmppc_get_gpr(vcpu, i+1),
vcpu               82 arch/powerpc/kvm/booke.c 		       kvmppc_get_gpr(vcpu, i+2),
vcpu               83 arch/powerpc/kvm/booke.c 		       kvmppc_get_gpr(vcpu, i+3));
vcpu               88 arch/powerpc/kvm/booke.c void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
vcpu               92 arch/powerpc/kvm/booke.c 	kvmppc_save_guest_spe(vcpu);
vcpu               94 arch/powerpc/kvm/booke.c 	vcpu->arch.shadow_msr &= ~MSR_SPE;
vcpu               98 arch/powerpc/kvm/booke.c static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
vcpu              102 arch/powerpc/kvm/booke.c 	kvmppc_load_guest_spe(vcpu);
vcpu              104 arch/powerpc/kvm/booke.c 	vcpu->arch.shadow_msr |= MSR_SPE;
vcpu              108 arch/powerpc/kvm/booke.c static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
vcpu              110 arch/powerpc/kvm/booke.c 	if (vcpu->arch.shared->msr & MSR_SPE) {
vcpu              111 arch/powerpc/kvm/booke.c 		if (!(vcpu->arch.shadow_msr & MSR_SPE))
vcpu              112 arch/powerpc/kvm/booke.c 			kvmppc_vcpu_enable_spe(vcpu);
vcpu              113 arch/powerpc/kvm/booke.c 	} else if (vcpu->arch.shadow_msr & MSR_SPE) {
vcpu              114 arch/powerpc/kvm/booke.c 		kvmppc_vcpu_disable_spe(vcpu);
vcpu              118 arch/powerpc/kvm/booke.c static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
vcpu              132 arch/powerpc/kvm/booke.c static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
vcpu              137 arch/powerpc/kvm/booke.c 		load_fp_state(&vcpu->arch.fp);
vcpu              139 arch/powerpc/kvm/booke.c 		current->thread.fp_save_area = &vcpu->arch.fp;
vcpu              149 arch/powerpc/kvm/booke.c static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
vcpu              158 arch/powerpc/kvm/booke.c static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
vcpu              163 arch/powerpc/kvm/booke.c 	vcpu->arch.shadow_msr &= ~MSR_FP;
vcpu              164 arch/powerpc/kvm/booke.c 	vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP;
vcpu              173 arch/powerpc/kvm/booke.c static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
vcpu              179 arch/powerpc/kvm/booke.c 			load_vr_state(&vcpu->arch.vr);
vcpu              181 arch/powerpc/kvm/booke.c 			current->thread.vr_save_area = &vcpu->arch.vr;
vcpu              192 arch/powerpc/kvm/booke.c static inline void kvmppc_save_guest_altivec(struct kvm_vcpu *vcpu)
vcpu              203 arch/powerpc/kvm/booke.c static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
vcpu              207 arch/powerpc/kvm/booke.c 	vcpu->arch.shadow_msr &= ~MSR_DE;
vcpu              208 arch/powerpc/kvm/booke.c 	vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE;
vcpu              212 arch/powerpc/kvm/booke.c 	if (vcpu->guest_debug) {
vcpu              218 arch/powerpc/kvm/booke.c 		vcpu->arch.shared->msr |= MSR_DE;
vcpu              220 arch/powerpc/kvm/booke.c 		vcpu->arch.shadow_msr |= MSR_DE;
vcpu              221 arch/powerpc/kvm/booke.c 		vcpu->arch.shared->msr &= ~MSR_DE;
vcpu              230 arch/powerpc/kvm/booke.c void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
vcpu              232 arch/powerpc/kvm/booke.c 	u32 old_msr = vcpu->arch.shared->msr;
vcpu              238 arch/powerpc/kvm/booke.c 	vcpu->arch.shared->msr = new_msr;
vcpu              240 arch/powerpc/kvm/booke.c 	kvmppc_mmu_msr_notify(vcpu, old_msr);
vcpu              241 arch/powerpc/kvm/booke.c 	kvmppc_vcpu_sync_spe(vcpu);
vcpu              242 arch/powerpc/kvm/booke.c 	kvmppc_vcpu_sync_fpu(vcpu);
vcpu              243 arch/powerpc/kvm/booke.c 	kvmppc_vcpu_sync_debug(vcpu);
vcpu              246 arch/powerpc/kvm/booke.c static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
vcpu              249 arch/powerpc/kvm/booke.c 	trace_kvm_booke_queue_irqprio(vcpu, priority);
vcpu              250 arch/powerpc/kvm/booke.c 	set_bit(priority, &vcpu->arch.pending_exceptions);
vcpu              253 arch/powerpc/kvm/booke.c void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
vcpu              256 arch/powerpc/kvm/booke.c 	vcpu->arch.queued_dear = dear_flags;
vcpu              257 arch/powerpc/kvm/booke.c 	vcpu->arch.queued_esr = esr_flags;
vcpu              258 arch/powerpc/kvm/booke.c 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
vcpu              261 arch/powerpc/kvm/booke.c void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
vcpu              264 arch/powerpc/kvm/booke.c 	vcpu->arch.queued_dear = dear_flags;
vcpu              265 arch/powerpc/kvm/booke.c 	vcpu->arch.queued_esr = esr_flags;
vcpu              266 arch/powerpc/kvm/booke.c 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
vcpu              269 arch/powerpc/kvm/booke.c void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu)
vcpu              271 arch/powerpc/kvm/booke.c 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
vcpu              274 arch/powerpc/kvm/booke.c void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong esr_flags)
vcpu              276 arch/powerpc/kvm/booke.c 	vcpu->arch.queued_esr = esr_flags;
vcpu              277 arch/powerpc/kvm/booke.c 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
vcpu              280 arch/powerpc/kvm/booke.c static void kvmppc_core_queue_alignment(struct kvm_vcpu *vcpu, ulong dear_flags,
vcpu              283 arch/powerpc/kvm/booke.c 	vcpu->arch.queued_dear = dear_flags;
vcpu              284 arch/powerpc/kvm/booke.c 	vcpu->arch.queued_esr = esr_flags;
vcpu              285 arch/powerpc/kvm/booke.c 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALIGNMENT);
vcpu              288 arch/powerpc/kvm/booke.c void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
vcpu              290 arch/powerpc/kvm/booke.c 	vcpu->arch.queued_esr = esr_flags;
vcpu              291 arch/powerpc/kvm/booke.c 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
vcpu              294 arch/powerpc/kvm/booke.c void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
vcpu              296 arch/powerpc/kvm/booke.c 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
vcpu              300 arch/powerpc/kvm/booke.c void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
vcpu              302 arch/powerpc/kvm/booke.c 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
vcpu              306 arch/powerpc/kvm/booke.c void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
vcpu              308 arch/powerpc/kvm/booke.c 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
vcpu              311 arch/powerpc/kvm/booke.c int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
vcpu              313 arch/powerpc/kvm/booke.c 	return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
vcpu              316 arch/powerpc/kvm/booke.c void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
vcpu              318 arch/powerpc/kvm/booke.c 	clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
vcpu              321 arch/powerpc/kvm/booke.c void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
vcpu              329 arch/powerpc/kvm/booke.c 	kvmppc_booke_queue_irqprio(vcpu, prio);
vcpu              332 arch/powerpc/kvm/booke.c void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
vcpu              334 arch/powerpc/kvm/booke.c 	clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
vcpu              335 arch/powerpc/kvm/booke.c 	clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
vcpu              338 arch/powerpc/kvm/booke.c static void kvmppc_core_queue_watchdog(struct kvm_vcpu *vcpu)
vcpu              340 arch/powerpc/kvm/booke.c 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_WATCHDOG);
vcpu              343 arch/powerpc/kvm/booke.c static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu)
vcpu              345 arch/powerpc/kvm/booke.c 	clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions);
vcpu              348 arch/powerpc/kvm/booke.c void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu)
vcpu              350 arch/powerpc/kvm/booke.c 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DEBUG);
vcpu              353 arch/powerpc/kvm/booke.c void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu)
vcpu              355 arch/powerpc/kvm/booke.c 	clear_bit(BOOKE_IRQPRIO_DEBUG, &vcpu->arch.pending_exceptions);
vcpu              358 arch/powerpc/kvm/booke.c static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
vcpu              360 arch/powerpc/kvm/booke.c 	kvmppc_set_srr0(vcpu, srr0);
vcpu              361 arch/powerpc/kvm/booke.c 	kvmppc_set_srr1(vcpu, srr1);
vcpu              364 arch/powerpc/kvm/booke.c static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
vcpu              366 arch/powerpc/kvm/booke.c 	vcpu->arch.csrr0 = srr0;
vcpu              367 arch/powerpc/kvm/booke.c 	vcpu->arch.csrr1 = srr1;
vcpu              370 arch/powerpc/kvm/booke.c static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
vcpu              373 arch/powerpc/kvm/booke.c 		vcpu->arch.dsrr0 = srr0;
vcpu              374 arch/powerpc/kvm/booke.c 		vcpu->arch.dsrr1 = srr1;
vcpu              376 arch/powerpc/kvm/booke.c 		set_guest_csrr(vcpu, srr0, srr1);
vcpu              380 arch/powerpc/kvm/booke.c static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
vcpu              382 arch/powerpc/kvm/booke.c 	vcpu->arch.mcsrr0 = srr0;
vcpu              383 arch/powerpc/kvm/booke.c 	vcpu->arch.mcsrr1 = srr1;
vcpu              387 arch/powerpc/kvm/booke.c static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
vcpu              393 arch/powerpc/kvm/booke.c 	ulong crit_raw = vcpu->arch.shared->critical;
vcpu              394 arch/powerpc/kvm/booke.c 	ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
vcpu              398 arch/powerpc/kvm/booke.c 	ulong new_msr = vcpu->arch.shared->msr;
vcpu              401 arch/powerpc/kvm/booke.c 	if (!(vcpu->arch.shared->msr & MSR_SF)) {
vcpu              409 arch/powerpc/kvm/booke.c 	crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
vcpu              416 arch/powerpc/kvm/booke.c 	if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_flags)
vcpu              449 arch/powerpc/kvm/booke.c 		allowed = vcpu->arch.shared->msr & MSR_CE;
vcpu              455 arch/powerpc/kvm/booke.c 		allowed = vcpu->arch.shared->msr & MSR_ME;
vcpu              465 arch/powerpc/kvm/booke.c 		allowed = vcpu->arch.shared->msr & MSR_EE;
vcpu              471 arch/powerpc/kvm/booke.c 		allowed = vcpu->arch.shared->msr & MSR_DE;
vcpu              485 arch/powerpc/kvm/booke.c 			set_guest_srr(vcpu, vcpu->arch.regs.nip,
vcpu              486 arch/powerpc/kvm/booke.c 				      vcpu->arch.shared->msr);
vcpu              489 arch/powerpc/kvm/booke.c 			set_guest_csrr(vcpu, vcpu->arch.regs.nip,
vcpu              490 arch/powerpc/kvm/booke.c 				       vcpu->arch.shared->msr);
vcpu              493 arch/powerpc/kvm/booke.c 			set_guest_dsrr(vcpu, vcpu->arch.regs.nip,
vcpu              494 arch/powerpc/kvm/booke.c 				       vcpu->arch.shared->msr);
vcpu              497 arch/powerpc/kvm/booke.c 			set_guest_mcsrr(vcpu, vcpu->arch.regs.nip,
vcpu              498 arch/powerpc/kvm/booke.c 					vcpu->arch.shared->msr);
vcpu              502 arch/powerpc/kvm/booke.c 		vcpu->arch.regs.nip = vcpu->arch.ivpr |
vcpu              503 arch/powerpc/kvm/booke.c 					vcpu->arch.ivor[priority];
vcpu              505 arch/powerpc/kvm/booke.c 			kvmppc_set_esr(vcpu, vcpu->arch.queued_esr);
vcpu              507 arch/powerpc/kvm/booke.c 			kvmppc_set_dar(vcpu, vcpu->arch.queued_dear);
vcpu              509 arch/powerpc/kvm/booke.c 			if (vcpu->arch.epr_flags & KVMPPC_EPR_USER)
vcpu              510 arch/powerpc/kvm/booke.c 				kvm_make_request(KVM_REQ_EPR_EXIT, vcpu);
vcpu              511 arch/powerpc/kvm/booke.c 			else if (vcpu->arch.epr_flags & KVMPPC_EPR_KERNEL) {
vcpu              512 arch/powerpc/kvm/booke.c 				BUG_ON(vcpu->arch.irq_type != KVMPPC_IRQ_MPIC);
vcpu              513 arch/powerpc/kvm/booke.c 				kvmppc_mpic_set_epr(vcpu);
vcpu              519 arch/powerpc/kvm/booke.c 		if (vcpu->arch.epcr & SPRN_EPCR_ICM)
vcpu              522 arch/powerpc/kvm/booke.c 		kvmppc_set_msr(vcpu, new_msr);
vcpu              525 arch/powerpc/kvm/booke.c 			clear_bit(priority, &vcpu->arch.pending_exceptions);
vcpu              534 arch/powerpc/kvm/booke.c 	if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
vcpu              535 arch/powerpc/kvm/booke.c 		kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
vcpu              536 arch/powerpc/kvm/booke.c 	if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
vcpu              537 arch/powerpc/kvm/booke.c 		kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
vcpu              538 arch/powerpc/kvm/booke.c 	if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
vcpu              539 arch/powerpc/kvm/booke.c 		kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
vcpu              550 arch/powerpc/kvm/booke.c static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu)
vcpu              554 arch/powerpc/kvm/booke.c 	u32 period = TCR_GET_WP(vcpu->arch.tcr);
vcpu              576 arch/powerpc/kvm/booke.c static void arm_next_watchdog(struct kvm_vcpu *vcpu)
vcpu              585 arch/powerpc/kvm/booke.c 	if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS))
vcpu              586 arch/powerpc/kvm/booke.c 		kvm_clear_request(KVM_REQ_WATCHDOG, vcpu);
vcpu              588 arch/powerpc/kvm/booke.c 	spin_lock_irqsave(&vcpu->arch.wdt_lock, flags);
vcpu              589 arch/powerpc/kvm/booke.c 	nr_jiffies = watchdog_next_timeout(vcpu);
vcpu              595 arch/powerpc/kvm/booke.c 		mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies);
vcpu              597 arch/powerpc/kvm/booke.c 		del_timer(&vcpu->arch.wdt_timer);
vcpu              598 arch/powerpc/kvm/booke.c 	spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
vcpu              603 arch/powerpc/kvm/booke.c 	struct kvm_vcpu *vcpu = from_timer(vcpu, t, arch.wdt_timer);
vcpu              608 arch/powerpc/kvm/booke.c 		new_tsr = tsr = vcpu->arch.tsr;
vcpu              620 arch/powerpc/kvm/booke.c 	} while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr);
vcpu              624 arch/powerpc/kvm/booke.c 		kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
vcpu              625 arch/powerpc/kvm/booke.c 		kvm_vcpu_kick(vcpu);
vcpu              632 arch/powerpc/kvm/booke.c 	if (final && (vcpu->arch.tcr & TCR_WRC_MASK) &&
vcpu              633 arch/powerpc/kvm/booke.c 	    vcpu->arch.watchdog_enabled) {
vcpu              635 arch/powerpc/kvm/booke.c 		kvm_make_request(KVM_REQ_WATCHDOG, vcpu);
vcpu              636 arch/powerpc/kvm/booke.c 		kvm_vcpu_kick(vcpu);
vcpu              646 arch/powerpc/kvm/booke.c 		arm_next_watchdog(vcpu);
vcpu              649 arch/powerpc/kvm/booke.c static void update_timer_ints(struct kvm_vcpu *vcpu)
vcpu              651 arch/powerpc/kvm/booke.c 	if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
vcpu              652 arch/powerpc/kvm/booke.c 		kvmppc_core_queue_dec(vcpu);
vcpu              654 arch/powerpc/kvm/booke.c 		kvmppc_core_dequeue_dec(vcpu);
vcpu              656 arch/powerpc/kvm/booke.c 	if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS))
vcpu              657 arch/powerpc/kvm/booke.c 		kvmppc_core_queue_watchdog(vcpu);
vcpu              659 arch/powerpc/kvm/booke.c 		kvmppc_core_dequeue_watchdog(vcpu);
vcpu              662 arch/powerpc/kvm/booke.c static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
vcpu              664 arch/powerpc/kvm/booke.c 	unsigned long *pending = &vcpu->arch.pending_exceptions;
vcpu              669 arch/powerpc/kvm/booke.c 		if (kvmppc_booke_irqprio_deliver(vcpu, priority))
vcpu              678 arch/powerpc/kvm/booke.c 	vcpu->arch.shared->int_pending = !!*pending;
vcpu              682 arch/powerpc/kvm/booke.c int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
vcpu              687 arch/powerpc/kvm/booke.c 	kvmppc_core_check_exceptions(vcpu);
vcpu              689 arch/powerpc/kvm/booke.c 	if (kvm_request_pending(vcpu)) {
vcpu              694 arch/powerpc/kvm/booke.c 	if (vcpu->arch.shared->msr & MSR_WE) {
vcpu              696 arch/powerpc/kvm/booke.c 		kvm_vcpu_block(vcpu);
vcpu              697 arch/powerpc/kvm/booke.c 		kvm_clear_request(KVM_REQ_UNHALT, vcpu);
vcpu              700 arch/powerpc/kvm/booke.c 		kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
vcpu              707 arch/powerpc/kvm/booke.c int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
vcpu              711 arch/powerpc/kvm/booke.c 	if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu))
vcpu              712 arch/powerpc/kvm/booke.c 		update_timer_ints(vcpu);
vcpu              714 arch/powerpc/kvm/booke.c 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
vcpu              715 arch/powerpc/kvm/booke.c 		kvmppc_core_flush_tlb(vcpu);
vcpu              718 arch/powerpc/kvm/booke.c 	if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) {
vcpu              719 arch/powerpc/kvm/booke.c 		vcpu->run->exit_reason = KVM_EXIT_WATCHDOG;
vcpu              723 arch/powerpc/kvm/booke.c 	if (kvm_check_request(KVM_REQ_EPR_EXIT, vcpu)) {
vcpu              724 arch/powerpc/kvm/booke.c 		vcpu->run->epr.epr = 0;
vcpu              725 arch/powerpc/kvm/booke.c 		vcpu->arch.epr_needed = true;
vcpu              726 arch/powerpc/kvm/booke.c 		vcpu->run->exit_reason = KVM_EXIT_EPR;
vcpu              733 arch/powerpc/kvm/booke.c int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
vcpu              738 arch/powerpc/kvm/booke.c 	if (!vcpu->arch.sane) {
vcpu              743 arch/powerpc/kvm/booke.c 	s = kvmppc_prepare_to_enter(vcpu);
vcpu              758 arch/powerpc/kvm/booke.c 	kvmppc_load_guest_fp(vcpu);
vcpu              769 arch/powerpc/kvm/booke.c 	kvmppc_load_guest_altivec(vcpu);
vcpu              773 arch/powerpc/kvm/booke.c 	debug = vcpu->arch.dbg_reg;
vcpu              776 arch/powerpc/kvm/booke.c 	current->thread.debug = vcpu->arch.dbg_reg;
vcpu              778 arch/powerpc/kvm/booke.c 	vcpu->arch.pgdir = current->mm->pgd;
vcpu              781 arch/powerpc/kvm/booke.c 	ret = __kvmppc_vcpu_run(kvm_run, vcpu);
vcpu              791 arch/powerpc/kvm/booke.c 	kvmppc_save_guest_fp(vcpu);
vcpu              795 arch/powerpc/kvm/booke.c 	kvmppc_save_guest_altivec(vcpu);
vcpu              799 arch/powerpc/kvm/booke.c 	vcpu->mode = OUTSIDE_GUEST_MODE;
vcpu              803 arch/powerpc/kvm/booke.c static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
vcpu              807 arch/powerpc/kvm/booke.c 	er = kvmppc_emulate_instruction(run, vcpu);
vcpu              811 arch/powerpc/kvm/booke.c 		kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
vcpu              821 arch/powerpc/kvm/booke.c 		       __func__, vcpu->arch.regs.nip, vcpu->arch.last_inst);
vcpu              825 arch/powerpc/kvm/booke.c 		run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
vcpu              826 arch/powerpc/kvm/booke.c 		kvmppc_core_queue_program(vcpu, ESR_PIL);
vcpu              837 arch/powerpc/kvm/booke.c static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu)
vcpu              839 arch/powerpc/kvm/booke.c 	struct debug_reg *dbg_reg = &(vcpu->arch.dbg_reg);
vcpu              840 arch/powerpc/kvm/booke.c 	u32 dbsr = vcpu->arch.dbsr;
vcpu              842 arch/powerpc/kvm/booke.c 	if (vcpu->guest_debug == 0) {
vcpu              853 arch/powerpc/kvm/booke.c 		if (dbsr && (vcpu->arch.shared->msr & MSR_DE) &&
vcpu              854 arch/powerpc/kvm/booke.c 			    (vcpu->arch.dbg_reg.dbcr0 & DBCR0_IDM))
vcpu              855 arch/powerpc/kvm/booke.c 			kvmppc_core_queue_debug(vcpu);
vcpu              858 arch/powerpc/kvm/booke.c 		if ((dbsr & DBSR_TIE) && !(vcpu->arch.shared->msr & MSR_DE))
vcpu              859 arch/powerpc/kvm/booke.c 			kvmppc_core_queue_program(vcpu, ESR_PTR);
vcpu              868 arch/powerpc/kvm/booke.c 	vcpu->arch.dbsr = 0;
vcpu              870 arch/powerpc/kvm/booke.c 	run->debug.arch.address = vcpu->arch.regs.nip;
vcpu              910 arch/powerpc/kvm/booke.c static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
vcpu              951 arch/powerpc/kvm/booke.c 		vcpu->arch.dbsr = mfspr(SPRN_DBSR);
vcpu              957 arch/powerpc/kvm/booke.c static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu              966 arch/powerpc/kvm/booke.c 		       __func__, vcpu->arch.regs.nip);
vcpu              971 arch/powerpc/kvm/booke.c 		kvmppc_core_queue_program(vcpu, ESR_PIL);
vcpu              984 arch/powerpc/kvm/booke.c int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu              994 arch/powerpc/kvm/booke.c 	kvmppc_update_timing_stats(vcpu);
vcpu              997 arch/powerpc/kvm/booke.c 	kvmppc_restart_interrupt(vcpu, exit_nr);
vcpu             1007 arch/powerpc/kvm/booke.c 		emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
vcpu             1011 arch/powerpc/kvm/booke.c 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
vcpu             1012 arch/powerpc/kvm/booke.c 			emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
vcpu             1018 arch/powerpc/kvm/booke.c 	trace_kvm_exit(exit_nr, vcpu);
vcpu             1027 arch/powerpc/kvm/booke.c 		r = kvmppc_resume_inst_load(run, vcpu, emulated, last_inst);
vcpu             1034 arch/powerpc/kvm/booke.c 		kvmppc_dump_vcpu(vcpu);
vcpu             1042 arch/powerpc/kvm/booke.c 		kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
vcpu             1047 arch/powerpc/kvm/booke.c 		kvmppc_account_exit(vcpu, DEC_EXITS);
vcpu             1056 arch/powerpc/kvm/booke.c 		kvmppc_account_exit(vcpu, DBELL_EXITS);
vcpu             1061 arch/powerpc/kvm/booke.c 		kvmppc_account_exit(vcpu, GDBELL_EXITS);
vcpu             1072 arch/powerpc/kvm/booke.c 		kvmppc_account_exit(vcpu, GDBELL_EXITS);
vcpu             1087 arch/powerpc/kvm/booke.c 		r = emulation_exit(run, vcpu);
vcpu             1091 arch/powerpc/kvm/booke.c 		if ((vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) &&
vcpu             1097 arch/powerpc/kvm/booke.c 			r = kvmppc_handle_debug(run, vcpu);
vcpu             1099 arch/powerpc/kvm/booke.c 			kvmppc_account_exit(vcpu, DEBUG_EXITS);
vcpu             1103 arch/powerpc/kvm/booke.c 		if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
vcpu             1112 arch/powerpc/kvm/booke.c 			kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
vcpu             1114 arch/powerpc/kvm/booke.c 			kvmppc_account_exit(vcpu, USR_PR_INST);
vcpu             1118 arch/powerpc/kvm/booke.c 		r = emulation_exit(run, vcpu);
vcpu             1122 arch/powerpc/kvm/booke.c 		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
vcpu             1123 arch/powerpc/kvm/booke.c 		kvmppc_account_exit(vcpu, FP_UNAVAIL);
vcpu             1129 arch/powerpc/kvm/booke.c 		if (vcpu->arch.shared->msr & MSR_SPE)
vcpu             1130 arch/powerpc/kvm/booke.c 			kvmppc_vcpu_enable_spe(vcpu);
vcpu             1132 arch/powerpc/kvm/booke.c 			kvmppc_booke_queue_irqprio(vcpu,
vcpu             1139 arch/powerpc/kvm/booke.c 		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
vcpu             1144 arch/powerpc/kvm/booke.c 		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
vcpu             1153 arch/powerpc/kvm/booke.c 		kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
vcpu             1164 arch/powerpc/kvm/booke.c 		       __func__, exit_nr, vcpu->arch.regs.nip);
vcpu             1176 arch/powerpc/kvm/booke.c 		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
vcpu             1181 arch/powerpc/kvm/booke.c 		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_ASSIST);
vcpu             1187 arch/powerpc/kvm/booke.c 		kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
vcpu             1188 arch/powerpc/kvm/booke.c 		                               vcpu->arch.fault_esr);
vcpu             1189 arch/powerpc/kvm/booke.c 		kvmppc_account_exit(vcpu, DSI_EXITS);
vcpu             1194 arch/powerpc/kvm/booke.c 		kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
vcpu             1195 arch/powerpc/kvm/booke.c 		kvmppc_account_exit(vcpu, ISI_EXITS);
vcpu             1200 arch/powerpc/kvm/booke.c 		kvmppc_core_queue_alignment(vcpu, vcpu->arch.fault_dear,
vcpu             1201 arch/powerpc/kvm/booke.c 		                            vcpu->arch.fault_esr);
vcpu             1207 arch/powerpc/kvm/booke.c 		if (!(vcpu->arch.shared->msr & MSR_PR)) {
vcpu             1208 arch/powerpc/kvm/booke.c 			kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
vcpu             1214 arch/powerpc/kvm/booke.c 			kvmppc_core_queue_program(vcpu, ESR_PPR);
vcpu             1221 arch/powerpc/kvm/booke.c 		if (!(vcpu->arch.shared->msr & MSR_PR) &&
vcpu             1222 arch/powerpc/kvm/booke.c 		    (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
vcpu             1224 arch/powerpc/kvm/booke.c 			kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
vcpu             1228 arch/powerpc/kvm/booke.c 			kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
vcpu             1230 arch/powerpc/kvm/booke.c 		kvmppc_account_exit(vcpu, SYSCALL_EXITS);
vcpu             1236 arch/powerpc/kvm/booke.c 		unsigned long eaddr = vcpu->arch.fault_dear;
vcpu             1242 arch/powerpc/kvm/booke.c 		if (!(vcpu->arch.shared->msr & MSR_PR) &&
vcpu             1243 arch/powerpc/kvm/booke.c 		    (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
vcpu             1244 arch/powerpc/kvm/booke.c 			kvmppc_map_magic(vcpu);
vcpu             1245 arch/powerpc/kvm/booke.c 			kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
vcpu             1253 arch/powerpc/kvm/booke.c 		gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
vcpu             1256 arch/powerpc/kvm/booke.c 			kvmppc_core_queue_dtlb_miss(vcpu,
vcpu             1257 arch/powerpc/kvm/booke.c 			                            vcpu->arch.fault_dear,
vcpu             1258 arch/powerpc/kvm/booke.c 			                            vcpu->arch.fault_esr);
vcpu             1259 arch/powerpc/kvm/booke.c 			kvmppc_mmu_dtlb_miss(vcpu);
vcpu             1260 arch/powerpc/kvm/booke.c 			kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
vcpu             1265 arch/powerpc/kvm/booke.c 		idx = srcu_read_lock(&vcpu->kvm->srcu);
vcpu             1267 arch/powerpc/kvm/booke.c 		gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
vcpu             1270 arch/powerpc/kvm/booke.c 		if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
vcpu             1277 arch/powerpc/kvm/booke.c 			kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
vcpu             1278 arch/powerpc/kvm/booke.c 			kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
vcpu             1283 arch/powerpc/kvm/booke.c 			vcpu->arch.paddr_accessed = gpaddr;
vcpu             1284 arch/powerpc/kvm/booke.c 			vcpu->arch.vaddr_accessed = eaddr;
vcpu             1285 arch/powerpc/kvm/booke.c 			r = kvmppc_emulate_mmio(run, vcpu);
vcpu             1286 arch/powerpc/kvm/booke.c 			kvmppc_account_exit(vcpu, MMIO_EXITS);
vcpu             1289 arch/powerpc/kvm/booke.c 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
vcpu             1294 arch/powerpc/kvm/booke.c 		unsigned long eaddr = vcpu->arch.regs.nip;
vcpu             1302 arch/powerpc/kvm/booke.c 		gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
vcpu             1305 arch/powerpc/kvm/booke.c 			kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
vcpu             1306 arch/powerpc/kvm/booke.c 			kvmppc_mmu_itlb_miss(vcpu);
vcpu             1307 arch/powerpc/kvm/booke.c 			kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
vcpu             1311 arch/powerpc/kvm/booke.c 		kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
vcpu             1313 arch/powerpc/kvm/booke.c 		idx = srcu_read_lock(&vcpu->kvm->srcu);
vcpu             1315 arch/powerpc/kvm/booke.c 		gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
vcpu             1318 arch/powerpc/kvm/booke.c 		if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
vcpu             1325 arch/powerpc/kvm/booke.c 			kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
vcpu             1328 arch/powerpc/kvm/booke.c 			kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
vcpu             1331 arch/powerpc/kvm/booke.c 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
vcpu             1336 arch/powerpc/kvm/booke.c 		r = kvmppc_handle_debug(run, vcpu);
vcpu             1339 arch/powerpc/kvm/booke.c 		kvmppc_account_exit(vcpu, DEBUG_EXITS);
vcpu             1354 arch/powerpc/kvm/booke.c 		s = kvmppc_prepare_to_enter(vcpu);
vcpu             1360 arch/powerpc/kvm/booke.c 			kvmppc_load_guest_fp(vcpu);
vcpu             1361 arch/powerpc/kvm/booke.c 			kvmppc_load_guest_altivec(vcpu);
vcpu             1368 arch/powerpc/kvm/booke.c static void kvmppc_set_tsr(struct kvm_vcpu *vcpu, u32 new_tsr)
vcpu             1370 arch/powerpc/kvm/booke.c 	u32 old_tsr = vcpu->arch.tsr;
vcpu             1372 arch/powerpc/kvm/booke.c 	vcpu->arch.tsr = new_tsr;
vcpu             1374 arch/powerpc/kvm/booke.c 	if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS))
vcpu             1375 arch/powerpc/kvm/booke.c 		arm_next_watchdog(vcpu);
vcpu             1377 arch/powerpc/kvm/booke.c 	update_timer_ints(vcpu);
vcpu             1381 arch/powerpc/kvm/booke.c int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu             1386 arch/powerpc/kvm/booke.c 	vcpu->arch.regs.nip = 0;
vcpu             1387 arch/powerpc/kvm/booke.c 	vcpu->arch.shared->pir = vcpu->vcpu_id;
vcpu             1388 arch/powerpc/kvm/booke.c 	kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
vcpu             1389 arch/powerpc/kvm/booke.c 	kvmppc_set_msr(vcpu, 0);
vcpu             1392 arch/powerpc/kvm/booke.c 	vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS;
vcpu             1393 arch/powerpc/kvm/booke.c 	vcpu->arch.shadow_pid = 1;
vcpu             1394 arch/powerpc/kvm/booke.c 	vcpu->arch.shared->msr = 0;
vcpu             1399 arch/powerpc/kvm/booke.c 	vcpu->arch.ivpr = 0x55550000;
vcpu             1401 arch/powerpc/kvm/booke.c 		vcpu->arch.ivor[i] = 0x7700 | i * 4;
vcpu             1403 arch/powerpc/kvm/booke.c 	kvmppc_init_timing_stats(vcpu);
vcpu             1405 arch/powerpc/kvm/booke.c 	r = kvmppc_core_vcpu_setup(vcpu);
vcpu             1406 arch/powerpc/kvm/booke.c 	kvmppc_sanity_check(vcpu);
vcpu             1410 arch/powerpc/kvm/booke.c int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
vcpu             1413 arch/powerpc/kvm/booke.c 	spin_lock_init(&vcpu->arch.wdt_lock);
vcpu             1414 arch/powerpc/kvm/booke.c 	timer_setup(&vcpu->arch.wdt_timer, kvmppc_watchdog_func, 0);
vcpu             1424 arch/powerpc/kvm/booke.c void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
vcpu             1426 arch/powerpc/kvm/booke.c 	del_timer_sync(&vcpu->arch.wdt_timer);
vcpu             1429 arch/powerpc/kvm/booke.c int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu             1433 arch/powerpc/kvm/booke.c 	vcpu_load(vcpu);
vcpu             1435 arch/powerpc/kvm/booke.c 	regs->pc = vcpu->arch.regs.nip;
vcpu             1436 arch/powerpc/kvm/booke.c 	regs->cr = kvmppc_get_cr(vcpu);
vcpu             1437 arch/powerpc/kvm/booke.c 	regs->ctr = vcpu->arch.regs.ctr;
vcpu             1438 arch/powerpc/kvm/booke.c 	regs->lr = vcpu->arch.regs.link;
vcpu             1439 arch/powerpc/kvm/booke.c 	regs->xer = kvmppc_get_xer(vcpu);
vcpu             1440 arch/powerpc/kvm/booke.c 	regs->msr = vcpu->arch.shared->msr;
vcpu             1441 arch/powerpc/kvm/booke.c 	regs->srr0 = kvmppc_get_srr0(vcpu);
vcpu             1442 arch/powerpc/kvm/booke.c 	regs->srr1 = kvmppc_get_srr1(vcpu);
vcpu             1443 arch/powerpc/kvm/booke.c 	regs->pid = vcpu->arch.pid;
vcpu             1444 arch/powerpc/kvm/booke.c 	regs->sprg0 = kvmppc_get_sprg0(vcpu);
vcpu             1445 arch/powerpc/kvm/booke.c 	regs->sprg1 = kvmppc_get_sprg1(vcpu);
vcpu             1446 arch/powerpc/kvm/booke.c 	regs->sprg2 = kvmppc_get_sprg2(vcpu);
vcpu             1447 arch/powerpc/kvm/booke.c 	regs->sprg3 = kvmppc_get_sprg3(vcpu);
vcpu             1448 arch/powerpc/kvm/booke.c 	regs->sprg4 = kvmppc_get_sprg4(vcpu);
vcpu             1449 arch/powerpc/kvm/booke.c 	regs->sprg5 = kvmppc_get_sprg5(vcpu);
vcpu             1450 arch/powerpc/kvm/booke.c 	regs->sprg6 = kvmppc_get_sprg6(vcpu);
vcpu             1451 arch/powerpc/kvm/booke.c 	regs->sprg7 = kvmppc_get_sprg7(vcpu);
vcpu             1454 arch/powerpc/kvm/booke.c 		regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
vcpu             1456 arch/powerpc/kvm/booke.c 	vcpu_put(vcpu);
vcpu             1460 arch/powerpc/kvm/booke.c int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu             1464 arch/powerpc/kvm/booke.c 	vcpu_load(vcpu);
vcpu             1466 arch/powerpc/kvm/booke.c 	vcpu->arch.regs.nip = regs->pc;
vcpu             1467 arch/powerpc/kvm/booke.c 	kvmppc_set_cr(vcpu, regs->cr);
vcpu             1468 arch/powerpc/kvm/booke.c 	vcpu->arch.regs.ctr = regs->ctr;
vcpu             1469 arch/powerpc/kvm/booke.c 	vcpu->arch.regs.link = regs->lr;
vcpu             1470 arch/powerpc/kvm/booke.c 	kvmppc_set_xer(vcpu, regs->xer);
vcpu             1471 arch/powerpc/kvm/booke.c 	kvmppc_set_msr(vcpu, regs->msr);
vcpu             1472 arch/powerpc/kvm/booke.c 	kvmppc_set_srr0(vcpu, regs->srr0);
vcpu             1473 arch/powerpc/kvm/booke.c 	kvmppc_set_srr1(vcpu, regs->srr1);
vcpu             1474 arch/powerpc/kvm/booke.c 	kvmppc_set_pid(vcpu, regs->pid);
vcpu             1475 arch/powerpc/kvm/booke.c 	kvmppc_set_sprg0(vcpu, regs->sprg0);
vcpu             1476 arch/powerpc/kvm/booke.c 	kvmppc_set_sprg1(vcpu, regs->sprg1);
vcpu             1477 arch/powerpc/kvm/booke.c 	kvmppc_set_sprg2(vcpu, regs->sprg2);
vcpu             1478 arch/powerpc/kvm/booke.c 	kvmppc_set_sprg3(vcpu, regs->sprg3);
vcpu             1479 arch/powerpc/kvm/booke.c 	kvmppc_set_sprg4(vcpu, regs->sprg4);
vcpu             1480 arch/powerpc/kvm/booke.c 	kvmppc_set_sprg5(vcpu, regs->sprg5);
vcpu             1481 arch/powerpc/kvm/booke.c 	kvmppc_set_sprg6(vcpu, regs->sprg6);
vcpu             1482 arch/powerpc/kvm/booke.c 	kvmppc_set_sprg7(vcpu, regs->sprg7);
vcpu             1485 arch/powerpc/kvm/booke.c 		kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
vcpu             1487 arch/powerpc/kvm/booke.c 	vcpu_put(vcpu);
vcpu             1491 arch/powerpc/kvm/booke.c static void get_sregs_base(struct kvm_vcpu *vcpu,
vcpu             1498 arch/powerpc/kvm/booke.c 	sregs->u.e.csrr0 = vcpu->arch.csrr0;
vcpu             1499 arch/powerpc/kvm/booke.c 	sregs->u.e.csrr1 = vcpu->arch.csrr1;
vcpu             1500 arch/powerpc/kvm/booke.c 	sregs->u.e.mcsr = vcpu->arch.mcsr;
vcpu             1501 arch/powerpc/kvm/booke.c 	sregs->u.e.esr = kvmppc_get_esr(vcpu);
vcpu             1502 arch/powerpc/kvm/booke.c 	sregs->u.e.dear = kvmppc_get_dar(vcpu);
vcpu             1503 arch/powerpc/kvm/booke.c 	sregs->u.e.tsr = vcpu->arch.tsr;
vcpu             1504 arch/powerpc/kvm/booke.c 	sregs->u.e.tcr = vcpu->arch.tcr;
vcpu             1505 arch/powerpc/kvm/booke.c 	sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
vcpu             1507 arch/powerpc/kvm/booke.c 	sregs->u.e.vrsave = vcpu->arch.vrsave;
vcpu             1510 arch/powerpc/kvm/booke.c static int set_sregs_base(struct kvm_vcpu *vcpu,
vcpu             1516 arch/powerpc/kvm/booke.c 	vcpu->arch.csrr0 = sregs->u.e.csrr0;
vcpu             1517 arch/powerpc/kvm/booke.c 	vcpu->arch.csrr1 = sregs->u.e.csrr1;
vcpu             1518 arch/powerpc/kvm/booke.c 	vcpu->arch.mcsr = sregs->u.e.mcsr;
vcpu             1519 arch/powerpc/kvm/booke.c 	kvmppc_set_esr(vcpu, sregs->u.e.esr);
vcpu             1520 arch/powerpc/kvm/booke.c 	kvmppc_set_dar(vcpu, sregs->u.e.dear);
vcpu             1521 arch/powerpc/kvm/booke.c 	vcpu->arch.vrsave = sregs->u.e.vrsave;
vcpu             1522 arch/powerpc/kvm/booke.c 	kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
vcpu             1525 arch/powerpc/kvm/booke.c 		vcpu->arch.dec = sregs->u.e.dec;
vcpu             1526 arch/powerpc/kvm/booke.c 		kvmppc_emulate_dec(vcpu);
vcpu             1530 arch/powerpc/kvm/booke.c 		kvmppc_set_tsr(vcpu, sregs->u.e.tsr);
vcpu             1535 arch/powerpc/kvm/booke.c static void get_sregs_arch206(struct kvm_vcpu *vcpu,
vcpu             1540 arch/powerpc/kvm/booke.c 	sregs->u.e.pir = vcpu->vcpu_id;
vcpu             1541 arch/powerpc/kvm/booke.c 	sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
vcpu             1542 arch/powerpc/kvm/booke.c 	sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
vcpu             1543 arch/powerpc/kvm/booke.c 	sregs->u.e.decar = vcpu->arch.decar;
vcpu             1544 arch/powerpc/kvm/booke.c 	sregs->u.e.ivpr = vcpu->arch.ivpr;
vcpu             1547 arch/powerpc/kvm/booke.c static int set_sregs_arch206(struct kvm_vcpu *vcpu,
vcpu             1553 arch/powerpc/kvm/booke.c 	if (sregs->u.e.pir != vcpu->vcpu_id)
vcpu             1556 arch/powerpc/kvm/booke.c 	vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
vcpu             1557 arch/powerpc/kvm/booke.c 	vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
vcpu             1558 arch/powerpc/kvm/booke.c 	vcpu->arch.decar = sregs->u.e.decar;
vcpu             1559 arch/powerpc/kvm/booke.c 	vcpu->arch.ivpr = sregs->u.e.ivpr;
vcpu             1564 arch/powerpc/kvm/booke.c int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
vcpu             1568 arch/powerpc/kvm/booke.c 	sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
vcpu             1569 arch/powerpc/kvm/booke.c 	sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
vcpu             1570 arch/powerpc/kvm/booke.c 	sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
vcpu             1571 arch/powerpc/kvm/booke.c 	sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
vcpu             1572 arch/powerpc/kvm/booke.c 	sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
vcpu             1573 arch/powerpc/kvm/booke.c 	sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
vcpu             1574 arch/powerpc/kvm/booke.c 	sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
vcpu             1575 arch/powerpc/kvm/booke.c 	sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
vcpu             1576 arch/powerpc/kvm/booke.c 	sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
vcpu             1577 arch/powerpc/kvm/booke.c 	sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
vcpu             1578 arch/powerpc/kvm/booke.c 	sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
vcpu             1579 arch/powerpc/kvm/booke.c 	sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
vcpu             1580 arch/powerpc/kvm/booke.c 	sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
vcpu             1581 arch/powerpc/kvm/booke.c 	sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
vcpu             1582 arch/powerpc/kvm/booke.c 	sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
vcpu             1583 arch/powerpc/kvm/booke.c 	sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
vcpu             1587 arch/powerpc/kvm/booke.c int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
vcpu             1592 arch/powerpc/kvm/booke.c 	vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
vcpu             1593 arch/powerpc/kvm/booke.c 	vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
vcpu             1594 arch/powerpc/kvm/booke.c 	vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
vcpu             1595 arch/powerpc/kvm/booke.c 	vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
vcpu             1596 arch/powerpc/kvm/booke.c 	vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
vcpu             1597 arch/powerpc/kvm/booke.c 	vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
vcpu             1598 arch/powerpc/kvm/booke.c 	vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
vcpu             1599 arch/powerpc/kvm/booke.c 	vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
vcpu             1600 arch/powerpc/kvm/booke.c 	vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
vcpu             1601 arch/powerpc/kvm/booke.c 	vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
vcpu             1602 arch/powerpc/kvm/booke.c 	vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
vcpu             1603 arch/powerpc/kvm/booke.c 	vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
vcpu             1604 arch/powerpc/kvm/booke.c 	vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
vcpu             1605 arch/powerpc/kvm/booke.c 	vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
vcpu             1606 arch/powerpc/kvm/booke.c 	vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
vcpu             1607 arch/powerpc/kvm/booke.c 	vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
vcpu             1612 arch/powerpc/kvm/booke.c int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
vcpu             1617 arch/powerpc/kvm/booke.c 	vcpu_load(vcpu);
vcpu             1619 arch/powerpc/kvm/booke.c 	sregs->pvr = vcpu->arch.pvr;
vcpu             1621 arch/powerpc/kvm/booke.c 	get_sregs_base(vcpu, sregs);
vcpu             1622 arch/powerpc/kvm/booke.c 	get_sregs_arch206(vcpu, sregs);
vcpu             1623 arch/powerpc/kvm/booke.c 	ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
vcpu             1625 arch/powerpc/kvm/booke.c 	vcpu_put(vcpu);
vcpu             1629 arch/powerpc/kvm/booke.c int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
vcpu             1634 arch/powerpc/kvm/booke.c 	vcpu_load(vcpu);
vcpu             1635 arch/powerpc/kvm/booke.c 	if (vcpu->arch.pvr != sregs->pvr)
vcpu             1638 arch/powerpc/kvm/booke.c 	ret = set_sregs_base(vcpu, sregs);
vcpu             1642 arch/powerpc/kvm/booke.c 	ret = set_sregs_arch206(vcpu, sregs);
vcpu             1646 arch/powerpc/kvm/booke.c 	ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
vcpu             1649 arch/powerpc/kvm/booke.c 	vcpu_put(vcpu);
vcpu             1653 arch/powerpc/kvm/booke.c int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
vcpu             1660 arch/powerpc/kvm/booke.c 		*val = get_reg_val(id, vcpu->arch.dbg_reg.iac1);
vcpu             1663 arch/powerpc/kvm/booke.c 		*val = get_reg_val(id, vcpu->arch.dbg_reg.iac2);
vcpu             1667 arch/powerpc/kvm/booke.c 		*val = get_reg_val(id, vcpu->arch.dbg_reg.iac3);
vcpu             1670 arch/powerpc/kvm/booke.c 		*val = get_reg_val(id, vcpu->arch.dbg_reg.iac4);
vcpu             1674 arch/powerpc/kvm/booke.c 		*val = get_reg_val(id, vcpu->arch.dbg_reg.dac1);
vcpu             1677 arch/powerpc/kvm/booke.c 		*val = get_reg_val(id, vcpu->arch.dbg_reg.dac2);
vcpu             1680 arch/powerpc/kvm/booke.c 		u32 epr = kvmppc_get_epr(vcpu);
vcpu             1686 arch/powerpc/kvm/booke.c 		*val = get_reg_val(id, vcpu->arch.epcr);
vcpu             1690 arch/powerpc/kvm/booke.c 		*val = get_reg_val(id, vcpu->arch.tcr);
vcpu             1693 arch/powerpc/kvm/booke.c 		*val = get_reg_val(id, vcpu->arch.tsr);
vcpu             1699 arch/powerpc/kvm/booke.c 		*val = get_reg_val(id, vcpu->arch.vrsave);
vcpu             1702 arch/powerpc/kvm/booke.c 		r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
vcpu             1709 arch/powerpc/kvm/booke.c int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
vcpu             1716 arch/powerpc/kvm/booke.c 		vcpu->arch.dbg_reg.iac1 = set_reg_val(id, *val);
vcpu             1719 arch/powerpc/kvm/booke.c 		vcpu->arch.dbg_reg.iac2 = set_reg_val(id, *val);
vcpu             1723 arch/powerpc/kvm/booke.c 		vcpu->arch.dbg_reg.iac3 = set_reg_val(id, *val);
vcpu             1726 arch/powerpc/kvm/booke.c 		vcpu->arch.dbg_reg.iac4 = set_reg_val(id, *val);
vcpu             1730 arch/powerpc/kvm/booke.c 		vcpu->arch.dbg_reg.dac1 = set_reg_val(id, *val);
vcpu             1733 arch/powerpc/kvm/booke.c 		vcpu->arch.dbg_reg.dac2 = set_reg_val(id, *val);
vcpu             1737 arch/powerpc/kvm/booke.c 		kvmppc_set_epr(vcpu, new_epr);
vcpu             1743 arch/powerpc/kvm/booke.c 		kvmppc_set_epcr(vcpu, new_epcr);
vcpu             1749 arch/powerpc/kvm/booke.c 		kvmppc_set_tsr_bits(vcpu, tsr_bits);
vcpu             1754 arch/powerpc/kvm/booke.c 		kvmppc_clr_tsr_bits(vcpu, tsr_bits);
vcpu             1759 arch/powerpc/kvm/booke.c 		kvmppc_set_tsr(vcpu, tsr);
vcpu             1764 arch/powerpc/kvm/booke.c 		kvmppc_set_tcr(vcpu, tcr);
vcpu             1768 arch/powerpc/kvm/booke.c 		vcpu->arch.vrsave = set_reg_val(id, *val);
vcpu             1771 arch/powerpc/kvm/booke.c 		r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
vcpu             1778 arch/powerpc/kvm/booke.c int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
vcpu             1783 arch/powerpc/kvm/booke.c int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
vcpu             1788 arch/powerpc/kvm/booke.c int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
vcpu             1793 arch/powerpc/kvm/booke.c 	vcpu_load(vcpu);
vcpu             1794 arch/powerpc/kvm/booke.c 	r = kvmppc_core_vcpu_translate(vcpu, tr);
vcpu             1795 arch/powerpc/kvm/booke.c 	vcpu_put(vcpu);
vcpu             1834 arch/powerpc/kvm/booke.c void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr)
vcpu             1837 arch/powerpc/kvm/booke.c 	vcpu->arch.epcr = new_epcr;
vcpu             1839 arch/powerpc/kvm/booke.c 	vcpu->arch.shadow_epcr &= ~SPRN_EPCR_GICM;
vcpu             1840 arch/powerpc/kvm/booke.c 	if (vcpu->arch.epcr  & SPRN_EPCR_ICM)
vcpu             1841 arch/powerpc/kvm/booke.c 		vcpu->arch.shadow_epcr |= SPRN_EPCR_GICM;
vcpu             1846 arch/powerpc/kvm/booke.c void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
vcpu             1848 arch/powerpc/kvm/booke.c 	vcpu->arch.tcr = new_tcr;
vcpu             1849 arch/powerpc/kvm/booke.c 	arm_next_watchdog(vcpu);
vcpu             1850 arch/powerpc/kvm/booke.c 	update_timer_ints(vcpu);
vcpu             1853 arch/powerpc/kvm/booke.c void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
vcpu             1855 arch/powerpc/kvm/booke.c 	set_bits(tsr_bits, &vcpu->arch.tsr);
vcpu             1857 arch/powerpc/kvm/booke.c 	kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
vcpu             1858 arch/powerpc/kvm/booke.c 	kvm_vcpu_kick(vcpu);
vcpu             1861 arch/powerpc/kvm/booke.c void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
vcpu             1863 arch/powerpc/kvm/booke.c 	clear_bits(tsr_bits, &vcpu->arch.tsr);
vcpu             1870 arch/powerpc/kvm/booke.c 		arm_next_watchdog(vcpu);
vcpu             1872 arch/powerpc/kvm/booke.c 	update_timer_ints(vcpu);
vcpu             1875 arch/powerpc/kvm/booke.c void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
vcpu             1877 arch/powerpc/kvm/booke.c 	if (vcpu->arch.tcr & TCR_ARE) {
vcpu             1878 arch/powerpc/kvm/booke.c 		vcpu->arch.dec = vcpu->arch.decar;
vcpu             1879 arch/powerpc/kvm/booke.c 		kvmppc_emulate_dec(vcpu);
vcpu             1882 arch/powerpc/kvm/booke.c 	kvmppc_set_tsr_bits(vcpu, TSR_DIS);
vcpu             1940 arch/powerpc/kvm/booke.c void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, bool set)
vcpu             1947 arch/powerpc/kvm/booke.c 			vcpu->arch.shadow_msrp |= MSRP_UCLEP;
vcpu             1949 arch/powerpc/kvm/booke.c 			vcpu->arch.shadow_msrp |= MSRP_DEP;
vcpu             1951 arch/powerpc/kvm/booke.c 			vcpu->arch.shadow_msrp |= MSRP_PMMP;
vcpu             1954 arch/powerpc/kvm/booke.c 			vcpu->arch.shadow_msrp &= ~MSRP_UCLEP;
vcpu             1956 arch/powerpc/kvm/booke.c 			vcpu->arch.shadow_msrp &= ~MSRP_DEP;
vcpu             1958 arch/powerpc/kvm/booke.c 			vcpu->arch.shadow_msrp &= ~MSRP_PMMP;
vcpu             1963 arch/powerpc/kvm/booke.c int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
vcpu             1970 arch/powerpc/kvm/booke.c 	if (!(vcpu->arch.shared->msr & MSR_PR) &&
vcpu             1971 arch/powerpc/kvm/booke.c 	    (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
vcpu             1973 arch/powerpc/kvm/booke.c 		pte->raddr = (vcpu->arch.magic_page_pa & PAGE_MASK) |
vcpu             1987 arch/powerpc/kvm/booke.c 		gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
vcpu             1990 arch/powerpc/kvm/booke.c 		gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
vcpu             2000 arch/powerpc/kvm/booke.c 	gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
vcpu             2014 arch/powerpc/kvm/booke.c int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
vcpu             2021 arch/powerpc/kvm/booke.c 	vcpu_load(vcpu);
vcpu             2024 arch/powerpc/kvm/booke.c 		vcpu->arch.dbg_reg.dbcr0 = 0;
vcpu             2025 arch/powerpc/kvm/booke.c 		vcpu->guest_debug = 0;
vcpu             2026 arch/powerpc/kvm/booke.c 		kvm_guest_protect_msr(vcpu, MSR_DE, false);
vcpu             2030 arch/powerpc/kvm/booke.c 	kvm_guest_protect_msr(vcpu, MSR_DE, true);
vcpu             2031 arch/powerpc/kvm/booke.c 	vcpu->guest_debug = dbg->control;
vcpu             2032 arch/powerpc/kvm/booke.c 	vcpu->arch.dbg_reg.dbcr0 = 0;
vcpu             2034 arch/powerpc/kvm/booke.c 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
vcpu             2035 arch/powerpc/kvm/booke.c 		vcpu->arch.dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC;
vcpu             2038 arch/powerpc/kvm/booke.c 	dbg_reg = &(vcpu->arch.dbg_reg);
vcpu             2058 arch/powerpc/kvm/booke.c 	if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
vcpu             2088 arch/powerpc/kvm/booke.c 	vcpu_put(vcpu);
vcpu             2092 arch/powerpc/kvm/booke.c void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu             2094 arch/powerpc/kvm/booke.c 	vcpu->cpu = smp_processor_id();
vcpu             2095 arch/powerpc/kvm/booke.c 	current->thread.kvm_vcpu = vcpu;
vcpu             2098 arch/powerpc/kvm/booke.c void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
vcpu             2101 arch/powerpc/kvm/booke.c 	vcpu->cpu = -1;
vcpu             2107 arch/powerpc/kvm/booke.c void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
vcpu             2109 arch/powerpc/kvm/booke.c 	vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
vcpu             2122 arch/powerpc/kvm/booke.c void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
vcpu             2124 arch/powerpc/kvm/booke.c 	vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
vcpu             2132 arch/powerpc/kvm/booke.c void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu             2134 arch/powerpc/kvm/booke.c 	vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
vcpu             2137 arch/powerpc/kvm/booke.c void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
vcpu             2139 arch/powerpc/kvm/booke.c 	vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
vcpu               65 arch/powerpc/kvm/booke.h void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr);
vcpu               66 arch/powerpc/kvm/booke.h void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr);
vcpu               68 arch/powerpc/kvm/booke.h void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr);
vcpu               69 arch/powerpc/kvm/booke.h void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr);
vcpu               70 arch/powerpc/kvm/booke.h void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits);
vcpu               71 arch/powerpc/kvm/booke.h void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits);
vcpu               73 arch/powerpc/kvm/booke.h int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu               75 arch/powerpc/kvm/booke.h int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
vcpu               76 arch/powerpc/kvm/booke.h int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
vcpu               79 arch/powerpc/kvm/booke.h void kvmppc_load_guest_spe(struct kvm_vcpu *vcpu);
vcpu               80 arch/powerpc/kvm/booke.h void kvmppc_save_guest_spe(struct kvm_vcpu *vcpu);
vcpu               83 arch/powerpc/kvm/booke.h void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu);
vcpu               85 arch/powerpc/kvm/booke.h void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
vcpu               86 arch/powerpc/kvm/booke.h void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu);
vcpu               95 arch/powerpc/kvm/booke.h void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type);
vcpu               97 arch/powerpc/kvm/booke.h extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu);
vcpu               99 arch/powerpc/kvm/booke.h 				       struct kvm_vcpu *vcpu,
vcpu              101 arch/powerpc/kvm/booke.h extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn,
vcpu              103 arch/powerpc/kvm/booke.h extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn,
vcpu              105 arch/powerpc/kvm/booke.h extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu);
vcpu              107 arch/powerpc/kvm/booke.h 				       struct kvm_vcpu *vcpu,
vcpu              109 arch/powerpc/kvm/booke.h extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn,
vcpu              111 arch/powerpc/kvm/booke.h extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn,
vcpu               24 arch/powerpc/kvm/booke_emulate.c static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
vcpu               26 arch/powerpc/kvm/booke_emulate.c 	vcpu->arch.regs.nip = vcpu->arch.shared->srr0;
vcpu               27 arch/powerpc/kvm/booke_emulate.c 	kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1);
vcpu               30 arch/powerpc/kvm/booke_emulate.c static void kvmppc_emul_rfdi(struct kvm_vcpu *vcpu)
vcpu               32 arch/powerpc/kvm/booke_emulate.c 	vcpu->arch.regs.nip = vcpu->arch.dsrr0;
vcpu               33 arch/powerpc/kvm/booke_emulate.c 	kvmppc_set_msr(vcpu, vcpu->arch.dsrr1);
vcpu               36 arch/powerpc/kvm/booke_emulate.c static void kvmppc_emul_rfci(struct kvm_vcpu *vcpu)
vcpu               38 arch/powerpc/kvm/booke_emulate.c 	vcpu->arch.regs.nip = vcpu->arch.csrr0;
vcpu               39 arch/powerpc/kvm/booke_emulate.c 	kvmppc_set_msr(vcpu, vcpu->arch.csrr1);
vcpu               42 arch/powerpc/kvm/booke_emulate.c int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu               53 arch/powerpc/kvm/booke_emulate.c 			kvmppc_emul_rfi(vcpu);
vcpu               54 arch/powerpc/kvm/booke_emulate.c 			kvmppc_set_exit_type(vcpu, EMULATED_RFI_EXITS);
vcpu               59 arch/powerpc/kvm/booke_emulate.c 			kvmppc_emul_rfci(vcpu);
vcpu               60 arch/powerpc/kvm/booke_emulate.c 			kvmppc_set_exit_type(vcpu, EMULATED_RFCI_EXITS);
vcpu               65 arch/powerpc/kvm/booke_emulate.c 			kvmppc_emul_rfdi(vcpu);
vcpu               66 arch/powerpc/kvm/booke_emulate.c 			kvmppc_set_exit_type(vcpu, EMULATED_RFDI_EXITS);
vcpu               80 arch/powerpc/kvm/booke_emulate.c 			kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr);
vcpu               81 arch/powerpc/kvm/booke_emulate.c 			kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS);
vcpu               85 arch/powerpc/kvm/booke_emulate.c 			kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS);
vcpu               86 arch/powerpc/kvm/booke_emulate.c 			kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
vcpu               90 arch/powerpc/kvm/booke_emulate.c 			vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE)
vcpu               91 arch/powerpc/kvm/booke_emulate.c 					| (kvmppc_get_gpr(vcpu, rs) & MSR_EE);
vcpu               92 arch/powerpc/kvm/booke_emulate.c 			kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS);
vcpu               96 arch/powerpc/kvm/booke_emulate.c 			vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE)
vcpu               98 arch/powerpc/kvm/booke_emulate.c 			kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS);
vcpu              120 arch/powerpc/kvm/booke_emulate.c int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
vcpu              127 arch/powerpc/kvm/booke_emulate.c 		vcpu->arch.shared->dar = spr_val;
vcpu              130 arch/powerpc/kvm/booke_emulate.c 		vcpu->arch.shared->esr = spr_val;
vcpu              133 arch/powerpc/kvm/booke_emulate.c 		vcpu->arch.csrr0 = spr_val;
vcpu              136 arch/powerpc/kvm/booke_emulate.c 		vcpu->arch.csrr1 = spr_val;
vcpu              139 arch/powerpc/kvm/booke_emulate.c 		vcpu->arch.dsrr0 = spr_val;
vcpu              142 arch/powerpc/kvm/booke_emulate.c 		vcpu->arch.dsrr1 = spr_val;
vcpu              149 arch/powerpc/kvm/booke_emulate.c 		if (vcpu->guest_debug)
vcpu              153 arch/powerpc/kvm/booke_emulate.c 		vcpu->arch.dbg_reg.iac1 = spr_val;
vcpu              160 arch/powerpc/kvm/booke_emulate.c 		if (vcpu->guest_debug)
vcpu              164 arch/powerpc/kvm/booke_emulate.c 		vcpu->arch.dbg_reg.iac2 = spr_val;
vcpu              172 arch/powerpc/kvm/booke_emulate.c 		if (vcpu->guest_debug)
vcpu              176 arch/powerpc/kvm/booke_emulate.c 		vcpu->arch.dbg_reg.iac3 = spr_val;
vcpu              183 arch/powerpc/kvm/booke_emulate.c 		if (vcpu->guest_debug)
vcpu              187 arch/powerpc/kvm/booke_emulate.c 		vcpu->arch.dbg_reg.iac4 = spr_val;
vcpu              195 arch/powerpc/kvm/booke_emulate.c 		if (vcpu->guest_debug)
vcpu              199 arch/powerpc/kvm/booke_emulate.c 		vcpu->arch.dbg_reg.dac1 = spr_val;
vcpu              206 arch/powerpc/kvm/booke_emulate.c 		if (vcpu->guest_debug)
vcpu              210 arch/powerpc/kvm/booke_emulate.c 		vcpu->arch.dbg_reg.dac2 = spr_val;
vcpu              217 arch/powerpc/kvm/booke_emulate.c 		if (vcpu->guest_debug)
vcpu              225 arch/powerpc/kvm/booke_emulate.c 		vcpu->arch.dbg_reg.dbcr0 = spr_val;
vcpu              232 arch/powerpc/kvm/booke_emulate.c 		if (vcpu->guest_debug)
vcpu              236 arch/powerpc/kvm/booke_emulate.c 		vcpu->arch.dbg_reg.dbcr1 = spr_val;
vcpu              243 arch/powerpc/kvm/booke_emulate.c 		if (vcpu->guest_debug)
vcpu              247 arch/powerpc/kvm/booke_emulate.c 		vcpu->arch.dbg_reg.dbcr2 = spr_val;
vcpu              254 arch/powerpc/kvm/booke_emulate.c 		if (vcpu->guest_debug)
vcpu              257 arch/powerpc/kvm/booke_emulate.c 		vcpu->arch.dbsr &= ~spr_val;
vcpu              258 arch/powerpc/kvm/booke_emulate.c 		if (!(vcpu->arch.dbsr & ~DBSR_IDE))
vcpu              259 arch/powerpc/kvm/booke_emulate.c 			kvmppc_core_dequeue_debug(vcpu);
vcpu              262 arch/powerpc/kvm/booke_emulate.c 		kvmppc_clr_tsr_bits(vcpu, spr_val);
vcpu              269 arch/powerpc/kvm/booke_emulate.c 		if (vcpu->arch.tcr & TCR_WRC_MASK) {
vcpu              271 arch/powerpc/kvm/booke_emulate.c 			spr_val |= vcpu->arch.tcr & TCR_WRC_MASK;
vcpu              273 arch/powerpc/kvm/booke_emulate.c 		kvmppc_set_tcr(vcpu, spr_val);
vcpu              277 arch/powerpc/kvm/booke_emulate.c 		vcpu->arch.decar = spr_val;
vcpu              285 arch/powerpc/kvm/booke_emulate.c 		kvmppc_set_sprg4(vcpu, spr_val);
vcpu              288 arch/powerpc/kvm/booke_emulate.c 		kvmppc_set_sprg5(vcpu, spr_val);
vcpu              291 arch/powerpc/kvm/booke_emulate.c 		kvmppc_set_sprg6(vcpu, spr_val);
vcpu              294 arch/powerpc/kvm/booke_emulate.c 		kvmppc_set_sprg7(vcpu, spr_val);
vcpu              298 arch/powerpc/kvm/booke_emulate.c 		vcpu->arch.ivpr = spr_val;
vcpu              304 arch/powerpc/kvm/booke_emulate.c 		vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = spr_val;
vcpu              307 arch/powerpc/kvm/booke_emulate.c 		vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = spr_val;
vcpu              310 arch/powerpc/kvm/booke_emulate.c 		vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = spr_val;
vcpu              316 arch/powerpc/kvm/booke_emulate.c 		vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = spr_val;
vcpu              319 arch/powerpc/kvm/booke_emulate.c 		vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = spr_val;
vcpu              322 arch/powerpc/kvm/booke_emulate.c 		vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = spr_val;
vcpu              325 arch/powerpc/kvm/booke_emulate.c 		vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = spr_val;
vcpu              328 arch/powerpc/kvm/booke_emulate.c 		vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = spr_val;
vcpu              331 arch/powerpc/kvm/booke_emulate.c 		vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = spr_val;
vcpu              337 arch/powerpc/kvm/booke_emulate.c 		vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = spr_val;
vcpu              340 arch/powerpc/kvm/booke_emulate.c 		vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = spr_val;
vcpu              343 arch/powerpc/kvm/booke_emulate.c 		vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = spr_val;
vcpu              346 arch/powerpc/kvm/booke_emulate.c 		vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = spr_val;
vcpu              349 arch/powerpc/kvm/booke_emulate.c 		vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = spr_val;
vcpu              352 arch/powerpc/kvm/booke_emulate.c 		vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = spr_val;
vcpu              355 arch/powerpc/kvm/booke_emulate.c 		vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = spr_val;
vcpu              358 arch/powerpc/kvm/booke_emulate.c 		vcpu->arch.mcsr &= ~spr_val;
vcpu              362 arch/powerpc/kvm/booke_emulate.c 		kvmppc_set_epcr(vcpu, spr_val);
vcpu              364 arch/powerpc/kvm/booke_emulate.c 		mtspr(SPRN_EPCR, vcpu->arch.shadow_epcr);
vcpu              373 arch/powerpc/kvm/booke_emulate.c 		current->thread.debug = vcpu->arch.dbg_reg;
vcpu              374 arch/powerpc/kvm/booke_emulate.c 		switch_booke_debug_regs(&vcpu->arch.dbg_reg);
vcpu              379 arch/powerpc/kvm/booke_emulate.c int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
vcpu              385 arch/powerpc/kvm/booke_emulate.c 		*spr_val = vcpu->arch.ivpr;
vcpu              388 arch/powerpc/kvm/booke_emulate.c 		*spr_val = vcpu->arch.shared->dar;
vcpu              391 arch/powerpc/kvm/booke_emulate.c 		*spr_val = vcpu->arch.shared->esr;
vcpu              394 arch/powerpc/kvm/booke_emulate.c 		*spr_val = vcpu->arch.epr;
vcpu              397 arch/powerpc/kvm/booke_emulate.c 		*spr_val = vcpu->arch.csrr0;
vcpu              400 arch/powerpc/kvm/booke_emulate.c 		*spr_val = vcpu->arch.csrr1;
vcpu              403 arch/powerpc/kvm/booke_emulate.c 		*spr_val = vcpu->arch.dsrr0;
vcpu              406 arch/powerpc/kvm/booke_emulate.c 		*spr_val = vcpu->arch.dsrr1;
vcpu              409 arch/powerpc/kvm/booke_emulate.c 		*spr_val = vcpu->arch.dbg_reg.iac1;
vcpu              412 arch/powerpc/kvm/booke_emulate.c 		*spr_val = vcpu->arch.dbg_reg.iac2;
vcpu              416 arch/powerpc/kvm/booke_emulate.c 		*spr_val = vcpu->arch.dbg_reg.iac3;
vcpu              419 arch/powerpc/kvm/booke_emulate.c 		*spr_val = vcpu->arch.dbg_reg.iac4;
vcpu              423 arch/powerpc/kvm/booke_emulate.c 		*spr_val = vcpu->arch.dbg_reg.dac1;
vcpu              426 arch/powerpc/kvm/booke_emulate.c 		*spr_val = vcpu->arch.dbg_reg.dac2;
vcpu              429 arch/powerpc/kvm/booke_emulate.c 		*spr_val = vcpu->arch.dbg_reg.dbcr0;
vcpu              430 arch/powerpc/kvm/booke_emulate.c 		if (vcpu->guest_debug)
vcpu              434 arch/powerpc/kvm/booke_emulate.c 		*spr_val = vcpu->arch.dbg_reg.dbcr1;
vcpu              437 arch/powerpc/kvm/booke_emulate.c 		*spr_val = vcpu->arch.dbg_reg.dbcr2;
vcpu              440 arch/powerpc/kvm/booke_emulate.c 		*spr_val = vcpu->arch.dbsr;
vcpu              443 arch/powerpc/kvm/booke_emulate.c 		*spr_val = vcpu->arch.tsr;
vcpu              446 arch/powerpc/kvm/booke_emulate.c 		*spr_val = vcpu->arch.tcr;
vcpu              450 arch/powerpc/kvm/booke_emulate.c 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
vcpu              453 arch/powerpc/kvm/booke_emulate.c 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
vcpu              456 arch/powerpc/kvm/booke_emulate.c 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
vcpu              459 arch/powerpc/kvm/booke_emulate.c 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
vcpu              462 arch/powerpc/kvm/booke_emulate.c 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
vcpu              465 arch/powerpc/kvm/booke_emulate.c 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
vcpu              468 arch/powerpc/kvm/booke_emulate.c 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
vcpu              471 arch/powerpc/kvm/booke_emulate.c 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
vcpu              474 arch/powerpc/kvm/booke_emulate.c 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
vcpu              477 arch/powerpc/kvm/booke_emulate.c 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
vcpu              480 arch/powerpc/kvm/booke_emulate.c 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
vcpu              483 arch/powerpc/kvm/booke_emulate.c 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
vcpu              486 arch/powerpc/kvm/booke_emulate.c 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
vcpu              489 arch/powerpc/kvm/booke_emulate.c 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
vcpu              492 arch/powerpc/kvm/booke_emulate.c 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
vcpu              495 arch/powerpc/kvm/booke_emulate.c 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
vcpu              498 arch/powerpc/kvm/booke_emulate.c 		*spr_val = vcpu->arch.mcsr;
vcpu              502 arch/powerpc/kvm/booke_emulate.c 		*spr_val = vcpu->arch.epcr;
vcpu              139 arch/powerpc/kvm/e500.c 	vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500,
vcpu              140 arch/powerpc/kvm/e500.c 			get_cur_as(&vcpu_e500->vcpu),
vcpu              141 arch/powerpc/kvm/e500.c 			get_cur_pid(&vcpu_e500->vcpu),
vcpu              142 arch/powerpc/kvm/e500.c 			get_cur_pr(&vcpu_e500->vcpu), 1);
vcpu              143 arch/powerpc/kvm/e500.c 	vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500,
vcpu              144 arch/powerpc/kvm/e500.c 			get_cur_as(&vcpu_e500->vcpu), 0,
vcpu              145 arch/powerpc/kvm/e500.c 			get_cur_pr(&vcpu_e500->vcpu), 1);
vcpu              214 arch/powerpc/kvm/e500.c unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu,
vcpu              217 arch/powerpc/kvm/e500.c 	return kvmppc_e500_get_sid(to_e500(vcpu), get_tlb_ts(gtlbe),
vcpu              218 arch/powerpc/kvm/e500.c 				   get_tlb_tid(gtlbe), get_cur_pr(vcpu), 0);
vcpu              221 arch/powerpc/kvm/e500.c void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
vcpu              223 arch/powerpc/kvm/e500.c 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
vcpu              225 arch/powerpc/kvm/e500.c 	if (vcpu->arch.pid != pid) {
vcpu              226 arch/powerpc/kvm/e500.c 		vcpu_e500->pid[0] = vcpu->arch.pid = pid;
vcpu              293 arch/powerpc/kvm/e500.c void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
vcpu              296 arch/powerpc/kvm/e500.c 	kvmppc_e500_recalc_shadow_pid(to_e500(vcpu));
vcpu              299 arch/powerpc/kvm/e500.c static void kvmppc_core_vcpu_load_e500(struct kvm_vcpu *vcpu, int cpu)
vcpu              301 arch/powerpc/kvm/e500.c 	kvmppc_booke_vcpu_load(vcpu, cpu);
vcpu              304 arch/powerpc/kvm/e500.c 	kvmppc_e500_recalc_shadow_pid(to_e500(vcpu));
vcpu              307 arch/powerpc/kvm/e500.c static void kvmppc_core_vcpu_put_e500(struct kvm_vcpu *vcpu)
vcpu              310 arch/powerpc/kvm/e500.c 	if (vcpu->arch.shadow_msr & MSR_SPE)
vcpu              311 arch/powerpc/kvm/e500.c 		kvmppc_vcpu_disable_spe(vcpu);
vcpu              314 arch/powerpc/kvm/e500.c 	kvmppc_booke_vcpu_put(vcpu);
vcpu              346 arch/powerpc/kvm/e500.c int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu              348 arch/powerpc/kvm/e500.c 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
vcpu              353 arch/powerpc/kvm/e500.c 	vcpu->arch.pvr = mfspr(SPRN_PVR);
vcpu              356 arch/powerpc/kvm/e500.c 	vcpu->arch.cpu_type = KVM_CPU_E500V2;
vcpu              361 arch/powerpc/kvm/e500.c static int kvmppc_core_get_sregs_e500(struct kvm_vcpu *vcpu,
vcpu              364 arch/powerpc/kvm/e500.c 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
vcpu              375 arch/powerpc/kvm/e500.c 	sregs->u.e.ivor_high[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
vcpu              376 arch/powerpc/kvm/e500.c 	sregs->u.e.ivor_high[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
vcpu              377 arch/powerpc/kvm/e500.c 	sregs->u.e.ivor_high[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
vcpu              379 arch/powerpc/kvm/e500.c 		vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
vcpu              381 arch/powerpc/kvm/e500.c 	kvmppc_get_sregs_ivor(vcpu, sregs);
vcpu              382 arch/powerpc/kvm/e500.c 	kvmppc_get_sregs_e500_tlb(vcpu, sregs);
vcpu              386 arch/powerpc/kvm/e500.c static int kvmppc_core_set_sregs_e500(struct kvm_vcpu *vcpu,
vcpu              389 arch/powerpc/kvm/e500.c 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
vcpu              398 arch/powerpc/kvm/e500.c 	ret = kvmppc_set_sregs_e500_tlb(vcpu, sregs);
vcpu              406 arch/powerpc/kvm/e500.c 		vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] =
vcpu              408 arch/powerpc/kvm/e500.c 		vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] =
vcpu              410 arch/powerpc/kvm/e500.c 		vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] =
vcpu              415 arch/powerpc/kvm/e500.c 		vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] =
vcpu              419 arch/powerpc/kvm/e500.c 	return kvmppc_set_sregs_ivor(vcpu, sregs);
vcpu              422 arch/powerpc/kvm/e500.c static int kvmppc_get_one_reg_e500(struct kvm_vcpu *vcpu, u64 id,
vcpu              425 arch/powerpc/kvm/e500.c 	int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
vcpu              429 arch/powerpc/kvm/e500.c static int kvmppc_set_one_reg_e500(struct kvm_vcpu *vcpu, u64 id,
vcpu              432 arch/powerpc/kvm/e500.c 	int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
vcpu              440 arch/powerpc/kvm/e500.c 	struct kvm_vcpu *vcpu;
vcpu              443 arch/powerpc/kvm/e500.c 	BUILD_BUG_ON_MSG(offsetof(struct kvmppc_vcpu_e500, vcpu) != 0,
vcpu              452 arch/powerpc/kvm/e500.c 	vcpu = &vcpu_e500->vcpu;
vcpu              453 arch/powerpc/kvm/e500.c 	err = kvm_vcpu_init(vcpu, kvm, id);
vcpu              466 arch/powerpc/kvm/e500.c 	vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO);
vcpu              467 arch/powerpc/kvm/e500.c 	if (!vcpu->arch.shared) {
vcpu              472 arch/powerpc/kvm/e500.c 	return vcpu;
vcpu              479 arch/powerpc/kvm/e500.c 	kvm_vcpu_uninit(vcpu);
vcpu              486 arch/powerpc/kvm/e500.c static void kvmppc_core_vcpu_free_e500(struct kvm_vcpu *vcpu)
vcpu              488 arch/powerpc/kvm/e500.c 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
vcpu              490 arch/powerpc/kvm/e500.c 	free_page((unsigned long)vcpu->arch.shared);
vcpu              493 arch/powerpc/kvm/e500.c 	kvm_vcpu_uninit(vcpu);
vcpu               58 arch/powerpc/kvm/e500.h 	struct kvm_vcpu vcpu;
vcpu              100 arch/powerpc/kvm/e500.h static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu)
vcpu              102 arch/powerpc/kvm/e500.h 	return container_of(vcpu, struct kvmppc_vcpu_e500, vcpu);
vcpu              127 arch/powerpc/kvm/e500.h int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu);
vcpu              128 arch/powerpc/kvm/e500.h int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu);
vcpu              129 arch/powerpc/kvm/e500.h int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, gva_t ea);
vcpu              130 arch/powerpc/kvm/e500.h int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int type, gva_t ea);
vcpu              131 arch/powerpc/kvm/e500.h int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea);
vcpu              135 arch/powerpc/kvm/e500.h void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
vcpu              136 arch/powerpc/kvm/e500.h int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
vcpu              138 arch/powerpc/kvm/e500.h int kvmppc_get_one_reg_e500_tlb(struct kvm_vcpu *vcpu, u64 id,
vcpu              140 arch/powerpc/kvm/e500.h int kvmppc_set_one_reg_e500_tlb(struct kvm_vcpu *vcpu, u64 id,
vcpu              208 arch/powerpc/kvm/e500.h static inline unsigned int get_cur_pid(struct kvm_vcpu *vcpu)
vcpu              210 arch/powerpc/kvm/e500.h 	return vcpu->arch.pid & 0xff;
vcpu              213 arch/powerpc/kvm/e500.h static inline unsigned int get_cur_as(struct kvm_vcpu *vcpu)
vcpu              215 arch/powerpc/kvm/e500.h 	return !!(vcpu->arch.shared->msr & (MSR_IS | MSR_DS));
vcpu              218 arch/powerpc/kvm/e500.h static inline unsigned int get_cur_pr(struct kvm_vcpu *vcpu)
vcpu              220 arch/powerpc/kvm/e500.h 	return !!(vcpu->arch.shared->msr & MSR_PR);
vcpu              223 arch/powerpc/kvm/e500.h static inline unsigned int get_cur_spid(const struct kvm_vcpu *vcpu)
vcpu              225 arch/powerpc/kvm/e500.h 	return (vcpu->arch.shared->mas6 >> 16) & 0xff;
vcpu              228 arch/powerpc/kvm/e500.h static inline unsigned int get_cur_sas(const struct kvm_vcpu *vcpu)
vcpu              230 arch/powerpc/kvm/e500.h 	return vcpu->arch.shared->mas6 & 0x1;
vcpu              233 arch/powerpc/kvm/e500.h static inline unsigned int get_tlb_tlbsel(const struct kvm_vcpu *vcpu)
vcpu              239 arch/powerpc/kvm/e500.h 	return (vcpu->arch.shared->mas0 >> 28) & 0x1;
vcpu              242 arch/powerpc/kvm/e500.h static inline unsigned int get_tlb_nv_bit(const struct kvm_vcpu *vcpu)
vcpu              244 arch/powerpc/kvm/e500.h 	return vcpu->arch.shared->mas0 & 0xfff;
vcpu              247 arch/powerpc/kvm/e500.h static inline unsigned int get_tlb_esel_bit(const struct kvm_vcpu *vcpu)
vcpu              249 arch/powerpc/kvm/e500.h 	return (vcpu->arch.shared->mas0 >> 16) & 0xfff;
vcpu              252 arch/powerpc/kvm/e500.h static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
vcpu              263 arch/powerpc/kvm/e500.h 	if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS))
vcpu              268 arch/powerpc/kvm/e500.h 	if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
vcpu              287 arch/powerpc/kvm/e500.h #define kvmppc_e500_get_tlb_stid(vcpu, gtlbe)       get_tlb_tid(gtlbe)
vcpu              288 arch/powerpc/kvm/e500.h #define get_tlbmiss_tid(vcpu)           get_cur_pid(vcpu)
vcpu              305 arch/powerpc/kvm/e500.h static inline int get_lpid(struct kvm_vcpu *vcpu)
vcpu              307 arch/powerpc/kvm/e500.h 	return get_thread_specific_lpid(vcpu->kvm->arch.lpid);
vcpu              310 arch/powerpc/kvm/e500.h unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu,
vcpu              313 arch/powerpc/kvm/e500.h static inline unsigned int get_tlbmiss_tid(struct kvm_vcpu *vcpu)
vcpu              315 arch/powerpc/kvm/e500.h 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
vcpu              316 arch/powerpc/kvm/e500.h 	unsigned int tidseld = (vcpu->arch.shared->mas4 >> 16) & 0xf;
vcpu              325 arch/powerpc/kvm/e500.h static inline bool has_feature(const struct kvm_vcpu *vcpu,
vcpu              331 arch/powerpc/kvm/e500.h 		has_ftr = ((vcpu->arch.mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2);
vcpu               51 arch/powerpc/kvm/e500_emulate.c static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb)
vcpu               53 arch/powerpc/kvm/e500_emulate.c 	ulong param = vcpu->arch.regs.gpr[rb];
vcpu               59 arch/powerpc/kvm/e500_emulate.c 	clear_bit(prio, &vcpu->arch.pending_exceptions);
vcpu               63 arch/powerpc/kvm/e500_emulate.c static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb)
vcpu               65 arch/powerpc/kvm/e500_emulate.c 	ulong param = vcpu->arch.regs.gpr[rb];
vcpu               74 arch/powerpc/kvm/e500_emulate.c 	kvm_for_each_vcpu(i, cvcpu, vcpu->kvm) {
vcpu               86 arch/powerpc/kvm/e500_emulate.c static int kvmppc_e500_emul_ehpriv(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu               94 arch/powerpc/kvm/e500_emulate.c 		run->debug.arch.address = vcpu->arch.regs.nip;
vcpu               96 arch/powerpc/kvm/e500_emulate.c 		kvmppc_account_exit(vcpu, DEBUG_EXITS);
vcpu              106 arch/powerpc/kvm/e500_emulate.c static int kvmppc_e500_emul_dcbtls(struct kvm_vcpu *vcpu)
vcpu              108 arch/powerpc/kvm/e500_emulate.c 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
vcpu              115 arch/powerpc/kvm/e500_emulate.c static int kvmppc_e500_emul_mftmr(struct kvm_vcpu *vcpu, unsigned int inst,
vcpu              120 arch/powerpc/kvm/e500_emulate.c 		kvmppc_set_gpr(vcpu, rt,
vcpu              128 arch/powerpc/kvm/e500_emulate.c int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu              142 arch/powerpc/kvm/e500_emulate.c 			emulated = kvmppc_e500_emul_dcbtls(vcpu);
vcpu              147 arch/powerpc/kvm/e500_emulate.c 			emulated = kvmppc_e500_emul_msgsnd(vcpu, rb);
vcpu              151 arch/powerpc/kvm/e500_emulate.c 			emulated = kvmppc_e500_emul_msgclr(vcpu, rb);
vcpu              156 arch/powerpc/kvm/e500_emulate.c 			emulated = kvmppc_e500_emul_tlbre(vcpu);
vcpu              160 arch/powerpc/kvm/e500_emulate.c 			emulated = kvmppc_e500_emul_tlbwe(vcpu);
vcpu              164 arch/powerpc/kvm/e500_emulate.c 			ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
vcpu              165 arch/powerpc/kvm/e500_emulate.c 			emulated = kvmppc_e500_emul_tlbsx(vcpu, ea);
vcpu              170 arch/powerpc/kvm/e500_emulate.c 			ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
vcpu              171 arch/powerpc/kvm/e500_emulate.c 			emulated = kvmppc_e500_emul_tlbilx(vcpu, type, ea);
vcpu              176 arch/powerpc/kvm/e500_emulate.c 			ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
vcpu              177 arch/powerpc/kvm/e500_emulate.c 			emulated = kvmppc_e500_emul_tlbivax(vcpu, ea);
vcpu              181 arch/powerpc/kvm/e500_emulate.c 			emulated = kvmppc_e500_emul_mftmr(vcpu, inst, rt);
vcpu              185 arch/powerpc/kvm/e500_emulate.c 			emulated = kvmppc_e500_emul_ehpriv(run, vcpu, inst,
vcpu              200 arch/powerpc/kvm/e500_emulate.c 		emulated = kvmppc_booke_emulate_op(run, vcpu, inst, advance);
vcpu              205 arch/powerpc/kvm/e500_emulate.c int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
vcpu              207 arch/powerpc/kvm/e500_emulate.c 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
vcpu              213 arch/powerpc/kvm/e500_emulate.c 		kvmppc_set_pid(vcpu, spr_val);
vcpu              226 arch/powerpc/kvm/e500_emulate.c 		vcpu->arch.shared->mas0 = spr_val;
vcpu              229 arch/powerpc/kvm/e500_emulate.c 		vcpu->arch.shared->mas1 = spr_val;
vcpu              232 arch/powerpc/kvm/e500_emulate.c 		vcpu->arch.shared->mas2 = spr_val;
vcpu              235 arch/powerpc/kvm/e500_emulate.c 		vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff;
vcpu              236 arch/powerpc/kvm/e500_emulate.c 		vcpu->arch.shared->mas7_3 |= spr_val;
vcpu              239 arch/powerpc/kvm/e500_emulate.c 		vcpu->arch.shared->mas4 = spr_val;
vcpu              242 arch/powerpc/kvm/e500_emulate.c 		vcpu->arch.shared->mas6 = spr_val;
vcpu              245 arch/powerpc/kvm/e500_emulate.c 		vcpu->arch.shared->mas7_3 &= (u64)0xffffffff;
vcpu              246 arch/powerpc/kvm/e500_emulate.c 		vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32;
vcpu              274 arch/powerpc/kvm/e500_emulate.c 		vcpu->arch.pwrmgtcr0 = spr_val;
vcpu              287 arch/powerpc/kvm/e500_emulate.c 		vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val;
vcpu              290 arch/powerpc/kvm/e500_emulate.c 		vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = spr_val;
vcpu              293 arch/powerpc/kvm/e500_emulate.c 		vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = spr_val;
vcpu              298 arch/powerpc/kvm/e500_emulate.c 		vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL] = spr_val;
vcpu              301 arch/powerpc/kvm/e500_emulate.c 		vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_ASSIST] = spr_val;
vcpu              305 arch/powerpc/kvm/e500_emulate.c 		vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val;
vcpu              309 arch/powerpc/kvm/e500_emulate.c 		vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL] = spr_val;
vcpu              312 arch/powerpc/kvm/e500_emulate.c 		vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT] = spr_val;
vcpu              316 arch/powerpc/kvm/e500_emulate.c 		emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val);
vcpu              322 arch/powerpc/kvm/e500_emulate.c int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
vcpu              324 arch/powerpc/kvm/e500_emulate.c 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
vcpu              339 arch/powerpc/kvm/e500_emulate.c 		*spr_val = vcpu->arch.shared->mas0;
vcpu              342 arch/powerpc/kvm/e500_emulate.c 		*spr_val = vcpu->arch.shared->mas1;
vcpu              345 arch/powerpc/kvm/e500_emulate.c 		*spr_val = vcpu->arch.shared->mas2;
vcpu              348 arch/powerpc/kvm/e500_emulate.c 		*spr_val = (u32)vcpu->arch.shared->mas7_3;
vcpu              351 arch/powerpc/kvm/e500_emulate.c 		*spr_val = vcpu->arch.shared->mas4;
vcpu              354 arch/powerpc/kvm/e500_emulate.c 		*spr_val = vcpu->arch.shared->mas6;
vcpu              357 arch/powerpc/kvm/e500_emulate.c 		*spr_val = vcpu->arch.shared->mas7_3 >> 32;
vcpu              361 arch/powerpc/kvm/e500_emulate.c 		*spr_val = vcpu->arch.decar;
vcpu              364 arch/powerpc/kvm/e500_emulate.c 		*spr_val = vcpu->arch.tlbcfg[0];
vcpu              367 arch/powerpc/kvm/e500_emulate.c 		*spr_val = vcpu->arch.tlbcfg[1];
vcpu              370 arch/powerpc/kvm/e500_emulate.c 		if (!has_feature(vcpu, VCPU_FTR_MMU_V2))
vcpu              372 arch/powerpc/kvm/e500_emulate.c 		*spr_val = vcpu->arch.tlbps[0];
vcpu              375 arch/powerpc/kvm/e500_emulate.c 		if (!has_feature(vcpu, VCPU_FTR_MMU_V2))
vcpu              377 arch/powerpc/kvm/e500_emulate.c 		*spr_val = vcpu->arch.tlbps[1];
vcpu              400 arch/powerpc/kvm/e500_emulate.c 		*spr_val = vcpu->arch.mmucfg;
vcpu              403 arch/powerpc/kvm/e500_emulate.c 		if (!has_feature(vcpu, VCPU_FTR_MMU_V2))
vcpu              409 arch/powerpc/kvm/e500_emulate.c 		*spr_val = vcpu->arch.eptcfg;
vcpu              413 arch/powerpc/kvm/e500_emulate.c 		*spr_val = vcpu->arch.pwrmgtcr0;
vcpu              419 arch/powerpc/kvm/e500_emulate.c 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
vcpu              422 arch/powerpc/kvm/e500_emulate.c 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
vcpu              425 arch/powerpc/kvm/e500_emulate.c 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
vcpu              430 arch/powerpc/kvm/e500_emulate.c 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL];
vcpu              433 arch/powerpc/kvm/e500_emulate.c 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_ASSIST];
vcpu              437 arch/powerpc/kvm/e500_emulate.c 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
vcpu              441 arch/powerpc/kvm/e500_emulate.c 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL];
vcpu              444 arch/powerpc/kvm/e500_emulate.c 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT];
vcpu              448 arch/powerpc/kvm/e500_emulate.c 		emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val);
vcpu               64 arch/powerpc/kvm/e500_mmu.c static unsigned int get_tlb_esel(struct kvm_vcpu *vcpu, int tlbsel)
vcpu               66 arch/powerpc/kvm/e500_mmu.c 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
vcpu               67 arch/powerpc/kvm/e500_mmu.c 	int esel = get_tlb_esel_bit(vcpu);
vcpu               71 arch/powerpc/kvm/e500_mmu.c 		esel += gtlb0_set_base(vcpu_e500, vcpu->arch.shared->mas2);
vcpu              126 arch/powerpc/kvm/e500_mmu.c static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
vcpu              129 arch/powerpc/kvm/e500_mmu.c 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
vcpu              134 arch/powerpc/kvm/e500_mmu.c 	tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1;
vcpu              136 arch/powerpc/kvm/e500_mmu.c 	tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f;
vcpu              138 arch/powerpc/kvm/e500_mmu.c 	vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
vcpu              140 arch/powerpc/kvm/e500_mmu.c 	vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
vcpu              141 arch/powerpc/kvm/e500_mmu.c 		| MAS1_TID(get_tlbmiss_tid(vcpu))
vcpu              143 arch/powerpc/kvm/e500_mmu.c 	vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN)
vcpu              144 arch/powerpc/kvm/e500_mmu.c 		| (vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK);
vcpu              145 arch/powerpc/kvm/e500_mmu.c 	vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
vcpu              146 arch/powerpc/kvm/e500_mmu.c 	vcpu->arch.shared->mas6 = (vcpu->arch.shared->mas6 & MAS6_SPID1)
vcpu              147 arch/powerpc/kvm/e500_mmu.c 		| (get_cur_pid(vcpu) << 16)
vcpu              193 arch/powerpc/kvm/e500_mmu.c static void kvmppc_set_tlb1map_range(struct kvm_vcpu *vcpu,
vcpu              197 arch/powerpc/kvm/e500_mmu.c 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
vcpu              240 arch/powerpc/kvm/e500_mmu.c 	kvmppc_core_flush_tlb(&vcpu_e500->vcpu);
vcpu              245 arch/powerpc/kvm/e500_mmu.c int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, gva_t ea)
vcpu              247 arch/powerpc/kvm/e500_mmu.c 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
vcpu              264 arch/powerpc/kvm/e500_mmu.c 				get_cur_pid(vcpu), -1);
vcpu              270 arch/powerpc/kvm/e500_mmu.c 	kvmppc_core_flush_tlb(&vcpu_e500->vcpu);
vcpu              307 arch/powerpc/kvm/e500_mmu.c int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int type, gva_t ea)
vcpu              309 arch/powerpc/kvm/e500_mmu.c 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
vcpu              310 arch/powerpc/kvm/e500_mmu.c 	int pid = get_cur_spid(vcpu);
vcpu              322 arch/powerpc/kvm/e500_mmu.c int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
vcpu              324 arch/powerpc/kvm/e500_mmu.c 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
vcpu              328 arch/powerpc/kvm/e500_mmu.c 	tlbsel = get_tlb_tlbsel(vcpu);
vcpu              329 arch/powerpc/kvm/e500_mmu.c 	esel = get_tlb_esel(vcpu, tlbsel);
vcpu              332 arch/powerpc/kvm/e500_mmu.c 	vcpu->arch.shared->mas0 &= ~MAS0_NV(~0);
vcpu              333 arch/powerpc/kvm/e500_mmu.c 	vcpu->arch.shared->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
vcpu              334 arch/powerpc/kvm/e500_mmu.c 	vcpu->arch.shared->mas1 = gtlbe->mas1;
vcpu              335 arch/powerpc/kvm/e500_mmu.c 	vcpu->arch.shared->mas2 = gtlbe->mas2;
vcpu              336 arch/powerpc/kvm/e500_mmu.c 	vcpu->arch.shared->mas7_3 = gtlbe->mas7_3;
vcpu              341 arch/powerpc/kvm/e500_mmu.c int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea)
vcpu              343 arch/powerpc/kvm/e500_mmu.c 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
vcpu              344 arch/powerpc/kvm/e500_mmu.c 	int as = !!get_cur_sas(vcpu);
vcpu              345 arch/powerpc/kvm/e500_mmu.c 	unsigned int pid = get_cur_spid(vcpu);
vcpu              360 arch/powerpc/kvm/e500_mmu.c 		vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
vcpu              362 arch/powerpc/kvm/e500_mmu.c 		vcpu->arch.shared->mas1 = gtlbe->mas1;
vcpu              363 arch/powerpc/kvm/e500_mmu.c 		vcpu->arch.shared->mas2 = gtlbe->mas2;
vcpu              364 arch/powerpc/kvm/e500_mmu.c 		vcpu->arch.shared->mas7_3 = gtlbe->mas7_3;
vcpu              369 arch/powerpc/kvm/e500_mmu.c 		tlbsel = vcpu->arch.shared->mas4 >> 28 & 0x1;
vcpu              372 arch/powerpc/kvm/e500_mmu.c 		vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel)
vcpu              375 arch/powerpc/kvm/e500_mmu.c 		vcpu->arch.shared->mas1 =
vcpu              376 arch/powerpc/kvm/e500_mmu.c 			  (vcpu->arch.shared->mas6 & MAS6_SPID0)
vcpu              377 arch/powerpc/kvm/e500_mmu.c 			| ((vcpu->arch.shared->mas6 & MAS6_SAS) ? MAS1_TS : 0)
vcpu              378 arch/powerpc/kvm/e500_mmu.c 			| (vcpu->arch.shared->mas4 & MAS4_TSIZED(~0));
vcpu              379 arch/powerpc/kvm/e500_mmu.c 		vcpu->arch.shared->mas2 &= MAS2_EPN;
vcpu              380 arch/powerpc/kvm/e500_mmu.c 		vcpu->arch.shared->mas2 |= vcpu->arch.shared->mas4 &
vcpu              382 arch/powerpc/kvm/e500_mmu.c 		vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 |
vcpu              386 arch/powerpc/kvm/e500_mmu.c 	kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
vcpu              390 arch/powerpc/kvm/e500_mmu.c int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
vcpu              392 arch/powerpc/kvm/e500_mmu.c 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
vcpu              398 arch/powerpc/kvm/e500_mmu.c 	tlbsel = get_tlb_tlbsel(vcpu);
vcpu              399 arch/powerpc/kvm/e500_mmu.c 	esel = get_tlb_esel(vcpu, tlbsel);
vcpu              410 arch/powerpc/kvm/e500_mmu.c 	gtlbe->mas1 = vcpu->arch.shared->mas1;
vcpu              411 arch/powerpc/kvm/e500_mmu.c 	gtlbe->mas2 = vcpu->arch.shared->mas2;
vcpu              412 arch/powerpc/kvm/e500_mmu.c 	if (!(vcpu->arch.shared->msr & MSR_CM))
vcpu              414 arch/powerpc/kvm/e500_mmu.c 	gtlbe->mas7_3 = vcpu->arch.shared->mas7_3;
vcpu              416 arch/powerpc/kvm/e500_mmu.c 	trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1,
vcpu              428 arch/powerpc/kvm/e500_mmu.c 			kvmppc_set_tlb1map_range(vcpu, gtlbe);
vcpu              431 arch/powerpc/kvm/e500_mmu.c 	idx = srcu_read_lock(&vcpu->kvm->srcu);
vcpu              434 arch/powerpc/kvm/e500_mmu.c 	if (tlbe_is_host_safe(vcpu, gtlbe)) {
vcpu              444 arch/powerpc/kvm/e500_mmu.c 		kvmppc_mmu_map(vcpu, eaddr, raddr, index_of(tlbsel, esel));
vcpu              447 arch/powerpc/kvm/e500_mmu.c 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
vcpu              449 arch/powerpc/kvm/e500_mmu.c 	kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
vcpu              453 arch/powerpc/kvm/e500_mmu.c static int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
vcpu              456 arch/powerpc/kvm/e500_mmu.c 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
vcpu              469 arch/powerpc/kvm/e500_mmu.c int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
vcpu              481 arch/powerpc/kvm/e500_mmu.c 	index = kvmppc_e500_tlb_search(vcpu, eaddr, pid, as);
vcpu              487 arch/powerpc/kvm/e500_mmu.c 	tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr);
vcpu              495 arch/powerpc/kvm/e500_mmu.c int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
vcpu              497 arch/powerpc/kvm/e500_mmu.c 	unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
vcpu              499 arch/powerpc/kvm/e500_mmu.c 	return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
vcpu              502 arch/powerpc/kvm/e500_mmu.c int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
vcpu              504 arch/powerpc/kvm/e500_mmu.c 	unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
vcpu              506 arch/powerpc/kvm/e500_mmu.c 	return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
vcpu              509 arch/powerpc/kvm/e500_mmu.c void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
vcpu              511 arch/powerpc/kvm/e500_mmu.c 	unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
vcpu              513 arch/powerpc/kvm/e500_mmu.c 	kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.regs.nip, as);
vcpu              516 arch/powerpc/kvm/e500_mmu.c void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
vcpu              518 arch/powerpc/kvm/e500_mmu.c 	unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
vcpu              520 arch/powerpc/kvm/e500_mmu.c 	kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as);
vcpu              523 arch/powerpc/kvm/e500_mmu.c gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
vcpu              526 arch/powerpc/kvm/e500_mmu.c 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
vcpu              536 arch/powerpc/kvm/e500_mmu.c void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu)
vcpu              546 arch/powerpc/kvm/e500_mmu.c 	kvmppc_core_flush_tlb(&vcpu_e500->vcpu);
vcpu              571 arch/powerpc/kvm/e500_mmu.c void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
vcpu              573 arch/powerpc/kvm/e500_mmu.c 	sregs->u.e.mas0 = vcpu->arch.shared->mas0;
vcpu              574 arch/powerpc/kvm/e500_mmu.c 	sregs->u.e.mas1 = vcpu->arch.shared->mas1;
vcpu              575 arch/powerpc/kvm/e500_mmu.c 	sregs->u.e.mas2 = vcpu->arch.shared->mas2;
vcpu              576 arch/powerpc/kvm/e500_mmu.c 	sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3;
vcpu              577 arch/powerpc/kvm/e500_mmu.c 	sregs->u.e.mas4 = vcpu->arch.shared->mas4;
vcpu              578 arch/powerpc/kvm/e500_mmu.c 	sregs->u.e.mas6 = vcpu->arch.shared->mas6;
vcpu              580 arch/powerpc/kvm/e500_mmu.c 	sregs->u.e.mmucfg = vcpu->arch.mmucfg;
vcpu              581 arch/powerpc/kvm/e500_mmu.c 	sregs->u.e.tlbcfg[0] = vcpu->arch.tlbcfg[0];
vcpu              582 arch/powerpc/kvm/e500_mmu.c 	sregs->u.e.tlbcfg[1] = vcpu->arch.tlbcfg[1];
vcpu              587 arch/powerpc/kvm/e500_mmu.c int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
vcpu              590 arch/powerpc/kvm/e500_mmu.c 		vcpu->arch.shared->mas0 = sregs->u.e.mas0;
vcpu              591 arch/powerpc/kvm/e500_mmu.c 		vcpu->arch.shared->mas1 = sregs->u.e.mas1;
vcpu              592 arch/powerpc/kvm/e500_mmu.c 		vcpu->arch.shared->mas2 = sregs->u.e.mas2;
vcpu              593 arch/powerpc/kvm/e500_mmu.c 		vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3;
vcpu              594 arch/powerpc/kvm/e500_mmu.c 		vcpu->arch.shared->mas4 = sregs->u.e.mas4;
vcpu              595 arch/powerpc/kvm/e500_mmu.c 		vcpu->arch.shared->mas6 = sregs->u.e.mas6;
vcpu              601 arch/powerpc/kvm/e500_mmu.c int kvmppc_get_one_reg_e500_tlb(struct kvm_vcpu *vcpu, u64 id,
vcpu              609 arch/powerpc/kvm/e500_mmu.c 		*val = get_reg_val(id, vcpu->arch.shared->mas0);
vcpu              612 arch/powerpc/kvm/e500_mmu.c 		*val = get_reg_val(id, vcpu->arch.shared->mas1);
vcpu              615 arch/powerpc/kvm/e500_mmu.c 		*val = get_reg_val(id, vcpu->arch.shared->mas2);
vcpu              618 arch/powerpc/kvm/e500_mmu.c 		*val = get_reg_val(id, vcpu->arch.shared->mas7_3);
vcpu              621 arch/powerpc/kvm/e500_mmu.c 		*val = get_reg_val(id, vcpu->arch.shared->mas4);
vcpu              624 arch/powerpc/kvm/e500_mmu.c 		*val = get_reg_val(id, vcpu->arch.shared->mas6);
vcpu              627 arch/powerpc/kvm/e500_mmu.c 		*val = get_reg_val(id, vcpu->arch.mmucfg);
vcpu              630 arch/powerpc/kvm/e500_mmu.c 		*val = get_reg_val(id, vcpu->arch.eptcfg);
vcpu              637 arch/powerpc/kvm/e500_mmu.c 		*val = get_reg_val(id, vcpu->arch.tlbcfg[i]);
vcpu              644 arch/powerpc/kvm/e500_mmu.c 		*val = get_reg_val(id, vcpu->arch.tlbps[i]);
vcpu              654 arch/powerpc/kvm/e500_mmu.c int kvmppc_set_one_reg_e500_tlb(struct kvm_vcpu *vcpu, u64 id,
vcpu              662 arch/powerpc/kvm/e500_mmu.c 		vcpu->arch.shared->mas0 = set_reg_val(id, *val);
vcpu              665 arch/powerpc/kvm/e500_mmu.c 		vcpu->arch.shared->mas1 = set_reg_val(id, *val);
vcpu              668 arch/powerpc/kvm/e500_mmu.c 		vcpu->arch.shared->mas2 = set_reg_val(id, *val);
vcpu              671 arch/powerpc/kvm/e500_mmu.c 		vcpu->arch.shared->mas7_3 = set_reg_val(id, *val);
vcpu              674 arch/powerpc/kvm/e500_mmu.c 		vcpu->arch.shared->mas4 = set_reg_val(id, *val);
vcpu              677 arch/powerpc/kvm/e500_mmu.c 		vcpu->arch.shared->mas6 = set_reg_val(id, *val);
vcpu              682 arch/powerpc/kvm/e500_mmu.c 		if (reg != vcpu->arch.mmucfg)
vcpu              688 arch/powerpc/kvm/e500_mmu.c 		if (reg != vcpu->arch.eptcfg)
vcpu              699 arch/powerpc/kvm/e500_mmu.c 		if (reg != vcpu->arch.tlbcfg[i])
vcpu              709 arch/powerpc/kvm/e500_mmu.c 		if (reg != vcpu->arch.tlbps[i])
vcpu              721 arch/powerpc/kvm/e500_mmu.c static int vcpu_mmu_geometry_update(struct kvm_vcpu *vcpu,
vcpu              724 arch/powerpc/kvm/e500_mmu.c 	vcpu->arch.tlbcfg[0] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
vcpu              726 arch/powerpc/kvm/e500_mmu.c 		vcpu->arch.tlbcfg[0] |= params->tlb_sizes[0];
vcpu              727 arch/powerpc/kvm/e500_mmu.c 	vcpu->arch.tlbcfg[0] |= params->tlb_ways[0] << TLBnCFG_ASSOC_SHIFT;
vcpu              729 arch/powerpc/kvm/e500_mmu.c 	vcpu->arch.tlbcfg[1] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
vcpu              730 arch/powerpc/kvm/e500_mmu.c 	vcpu->arch.tlbcfg[1] |= params->tlb_sizes[1];
vcpu              731 arch/powerpc/kvm/e500_mmu.c 	vcpu->arch.tlbcfg[1] |= params->tlb_ways[1] << TLBnCFG_ASSOC_SHIFT;
vcpu              735 arch/powerpc/kvm/e500_mmu.c int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
vcpu              738 arch/powerpc/kvm/e500_mmu.c 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
vcpu              835 arch/powerpc/kvm/e500_mmu.c 	vcpu_mmu_geometry_update(vcpu, &params);
vcpu              860 arch/powerpc/kvm/e500_mmu.c int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
vcpu              863 arch/powerpc/kvm/e500_mmu.c 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
vcpu              865 arch/powerpc/kvm/e500_mmu.c 	kvmppc_core_flush_tlb(vcpu);
vcpu              870 arch/powerpc/kvm/e500_mmu.c static int vcpu_mmu_init(struct kvm_vcpu *vcpu,
vcpu              874 arch/powerpc/kvm/e500_mmu.c 	vcpu->arch.mmucfg = mfspr(SPRN_MMUCFG) & ~MMUCFG_LPIDSIZE;
vcpu              877 arch/powerpc/kvm/e500_mmu.c 	vcpu->arch.tlbcfg[0] = mfspr(SPRN_TLB0CFG) &
vcpu              879 arch/powerpc/kvm/e500_mmu.c 	vcpu->arch.tlbcfg[0] |= params[0].entries;
vcpu              880 arch/powerpc/kvm/e500_mmu.c 	vcpu->arch.tlbcfg[0] |= params[0].ways << TLBnCFG_ASSOC_SHIFT;
vcpu              882 arch/powerpc/kvm/e500_mmu.c 	vcpu->arch.tlbcfg[1] = mfspr(SPRN_TLB1CFG) &
vcpu              884 arch/powerpc/kvm/e500_mmu.c 	vcpu->arch.tlbcfg[1] |= params[1].entries;
vcpu              885 arch/powerpc/kvm/e500_mmu.c 	vcpu->arch.tlbcfg[1] |= params[1].ways << TLBnCFG_ASSOC_SHIFT;
vcpu              887 arch/powerpc/kvm/e500_mmu.c 	if (has_feature(vcpu, VCPU_FTR_MMU_V2)) {
vcpu              888 arch/powerpc/kvm/e500_mmu.c 		vcpu->arch.tlbps[0] = mfspr(SPRN_TLB0PS);
vcpu              889 arch/powerpc/kvm/e500_mmu.c 		vcpu->arch.tlbps[1] = mfspr(SPRN_TLB1PS);
vcpu              891 arch/powerpc/kvm/e500_mmu.c 		vcpu->arch.mmucfg &= ~MMUCFG_LRAT;
vcpu              894 arch/powerpc/kvm/e500_mmu.c 		vcpu->arch.eptcfg = 0;
vcpu              895 arch/powerpc/kvm/e500_mmu.c 		vcpu->arch.tlbcfg[0] &= ~TLBnCFG_PT;
vcpu              896 arch/powerpc/kvm/e500_mmu.c 		vcpu->arch.tlbcfg[1] &= ~TLBnCFG_IND;
vcpu              904 arch/powerpc/kvm/e500_mmu.c 	struct kvm_vcpu *vcpu = &vcpu_e500->vcpu;
vcpu              947 arch/powerpc/kvm/e500_mmu.c 	vcpu_mmu_init(vcpu, vcpu_e500->gtlb_params);
vcpu              131 arch/powerpc/kvm/e500_mmu_host.c 		__write_host_tlbe(stlbe, mas0, vcpu_e500->vcpu.kvm->arch.lpid);
vcpu              136 arch/powerpc/kvm/e500_mmu_host.c 				  vcpu_e500->vcpu.kvm->arch.lpid);
vcpu              149 arch/powerpc/kvm/e500_mmu_host.c 	stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe);
vcpu              158 arch/powerpc/kvm/e500_mmu_host.c void kvmppc_map_magic(struct kvm_vcpu *vcpu)
vcpu              160 arch/powerpc/kvm/e500_mmu_host.c 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
vcpu              162 arch/powerpc/kvm/e500_mmu_host.c 	ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
vcpu              174 arch/powerpc/kvm/e500_mmu_host.c 	magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
vcpu              295 arch/powerpc/kvm/e500_mmu_host.c void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
vcpu              297 arch/powerpc/kvm/e500_mmu_host.c 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
vcpu              305 arch/powerpc/kvm/e500_mmu_host.c 	struct kvm_vcpu *vcpu,
vcpu              311 arch/powerpc/kvm/e500_mmu_host.c 	u32 pr = vcpu->arch.shared->msr & MSR_PR;
vcpu              334 arch/powerpc/kvm/e500_mmu_host.c 	struct kvm *kvm = vcpu_e500->vcpu.kvm;
vcpu              353 arch/powerpc/kvm/e500_mmu_host.c 	slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
vcpu              469 arch/powerpc/kvm/e500_mmu_host.c 	pgdir = vcpu_e500->vcpu.arch.pgdir;
vcpu              495 arch/powerpc/kvm/e500_mmu_host.c 	kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
vcpu              586 arch/powerpc/kvm/e500_mmu_host.c void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
vcpu              589 arch/powerpc/kvm/e500_mmu_host.c 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
vcpu              605 arch/powerpc/kvm/e500_mmu_host.c 			kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K,
vcpu              625 arch/powerpc/kvm/e500_mmu_host.c int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
vcpu              640 arch/powerpc/kvm/e500_mmu_host.c 	geaddr = kvmppc_get_pc(vcpu);
vcpu              642 arch/powerpc/kvm/e500_mmu_host.c 	addr_space = (vcpu->arch.shared->msr & MSR_IS) >> MSR_IR_LG;
vcpu              645 arch/powerpc/kvm/e500_mmu_host.c 	mtspr(SPRN_MAS6, (vcpu->arch.pid << MAS6_SPID_SHIFT) | addr_space);
vcpu              646 arch/powerpc/kvm/e500_mmu_host.c 	mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(vcpu));
vcpu              672 arch/powerpc/kvm/e500_mmu_host.c 	pr = vcpu->arch.shared->msr & MSR_PR;
vcpu              685 arch/powerpc/kvm/e500_mmu_host.c 	if (has_feature(vcpu, VCPU_FTR_MMU_V2) &&
vcpu              715 arch/powerpc/kvm/e500_mmu_host.c int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
vcpu               27 arch/powerpc/kvm/e500mc.c void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type)
vcpu               48 arch/powerpc/kvm/e500mc.c 	tag = PPC_DBELL_LPID(get_lpid(vcpu)) | vcpu->vcpu_id;
vcpu               73 arch/powerpc/kvm/e500mc.c 	mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(&vcpu_e500->vcpu));
vcpu               94 arch/powerpc/kvm/e500mc.c 	mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(&vcpu_e500->vcpu));
vcpu              100 arch/powerpc/kvm/e500mc.c void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
vcpu              102 arch/powerpc/kvm/e500mc.c 	vcpu->arch.pid = pid;
vcpu              105 arch/powerpc/kvm/e500mc.c void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
vcpu              112 arch/powerpc/kvm/e500mc.c static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
vcpu              114 arch/powerpc/kvm/e500mc.c 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
vcpu              116 arch/powerpc/kvm/e500mc.c 	kvmppc_booke_vcpu_load(vcpu, cpu);
vcpu              118 arch/powerpc/kvm/e500mc.c 	mtspr(SPRN_LPID, get_lpid(vcpu));
vcpu              119 arch/powerpc/kvm/e500mc.c 	mtspr(SPRN_EPCR, vcpu->arch.shadow_epcr);
vcpu              120 arch/powerpc/kvm/e500mc.c 	mtspr(SPRN_GPIR, vcpu->vcpu_id);
vcpu              121 arch/powerpc/kvm/e500mc.c 	mtspr(SPRN_MSRP, vcpu->arch.shadow_msrp);
vcpu              122 arch/powerpc/kvm/e500mc.c 	vcpu->arch.eplc = EPC_EGS | (get_lpid(vcpu) << EPC_ELPID_SHIFT);
vcpu              123 arch/powerpc/kvm/e500mc.c 	vcpu->arch.epsc = vcpu->arch.eplc;
vcpu              124 arch/powerpc/kvm/e500mc.c 	mtspr(SPRN_EPLC, vcpu->arch.eplc);
vcpu              125 arch/powerpc/kvm/e500mc.c 	mtspr(SPRN_EPSC, vcpu->arch.epsc);
vcpu              127 arch/powerpc/kvm/e500mc.c 	mtspr(SPRN_GIVPR, vcpu->arch.ivpr);
vcpu              128 arch/powerpc/kvm/e500mc.c 	mtspr(SPRN_GIVOR2, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]);
vcpu              129 arch/powerpc/kvm/e500mc.c 	mtspr(SPRN_GIVOR8, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]);
vcpu              130 arch/powerpc/kvm/e500mc.c 	mtspr(SPRN_GSPRG0, (unsigned long)vcpu->arch.shared->sprg0);
vcpu              131 arch/powerpc/kvm/e500mc.c 	mtspr(SPRN_GSPRG1, (unsigned long)vcpu->arch.shared->sprg1);
vcpu              132 arch/powerpc/kvm/e500mc.c 	mtspr(SPRN_GSPRG2, (unsigned long)vcpu->arch.shared->sprg2);
vcpu              133 arch/powerpc/kvm/e500mc.c 	mtspr(SPRN_GSPRG3, (unsigned long)vcpu->arch.shared->sprg3);
vcpu              135 arch/powerpc/kvm/e500mc.c 	mtspr(SPRN_GSRR0, vcpu->arch.shared->srr0);
vcpu              136 arch/powerpc/kvm/e500mc.c 	mtspr(SPRN_GSRR1, vcpu->arch.shared->srr1);
vcpu              138 arch/powerpc/kvm/e500mc.c 	mtspr(SPRN_GEPR, vcpu->arch.epr);
vcpu              139 arch/powerpc/kvm/e500mc.c 	mtspr(SPRN_GDEAR, vcpu->arch.shared->dar);
vcpu              140 arch/powerpc/kvm/e500mc.c 	mtspr(SPRN_GESR, vcpu->arch.shared->esr);
vcpu              142 arch/powerpc/kvm/e500mc.c 	if (vcpu->arch.oldpir != mfspr(SPRN_PIR) ||
vcpu              143 arch/powerpc/kvm/e500mc.c 	    __this_cpu_read(last_vcpu_of_lpid[get_lpid(vcpu)]) != vcpu) {
vcpu              145 arch/powerpc/kvm/e500mc.c 		__this_cpu_write(last_vcpu_of_lpid[get_lpid(vcpu)], vcpu);
vcpu              149 arch/powerpc/kvm/e500mc.c static void kvmppc_core_vcpu_put_e500mc(struct kvm_vcpu *vcpu)
vcpu              151 arch/powerpc/kvm/e500mc.c 	vcpu->arch.eplc = mfspr(SPRN_EPLC);
vcpu              152 arch/powerpc/kvm/e500mc.c 	vcpu->arch.epsc = mfspr(SPRN_EPSC);
vcpu              154 arch/powerpc/kvm/e500mc.c 	vcpu->arch.shared->sprg0 = mfspr(SPRN_GSPRG0);
vcpu              155 arch/powerpc/kvm/e500mc.c 	vcpu->arch.shared->sprg1 = mfspr(SPRN_GSPRG1);
vcpu              156 arch/powerpc/kvm/e500mc.c 	vcpu->arch.shared->sprg2 = mfspr(SPRN_GSPRG2);
vcpu              157 arch/powerpc/kvm/e500mc.c 	vcpu->arch.shared->sprg3 = mfspr(SPRN_GSPRG3);
vcpu              159 arch/powerpc/kvm/e500mc.c 	vcpu->arch.shared->srr0 = mfspr(SPRN_GSRR0);
vcpu              160 arch/powerpc/kvm/e500mc.c 	vcpu->arch.shared->srr1 = mfspr(SPRN_GSRR1);
vcpu              162 arch/powerpc/kvm/e500mc.c 	vcpu->arch.epr = mfspr(SPRN_GEPR);
vcpu              163 arch/powerpc/kvm/e500mc.c 	vcpu->arch.shared->dar = mfspr(SPRN_GDEAR);
vcpu              164 arch/powerpc/kvm/e500mc.c 	vcpu->arch.shared->esr = mfspr(SPRN_GESR);
vcpu              166 arch/powerpc/kvm/e500mc.c 	vcpu->arch.oldpir = mfspr(SPRN_PIR);
vcpu              168 arch/powerpc/kvm/e500mc.c 	kvmppc_booke_vcpu_put(vcpu);
vcpu              195 arch/powerpc/kvm/e500mc.c int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu              197 arch/powerpc/kvm/e500mc.c 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
vcpu              199 arch/powerpc/kvm/e500mc.c 	vcpu->arch.shadow_epcr = SPRN_EPCR_DSIGS | SPRN_EPCR_DGTMI | \
vcpu              202 arch/powerpc/kvm/e500mc.c 	vcpu->arch.shadow_epcr |= SPRN_EPCR_ICM;
vcpu              204 arch/powerpc/kvm/e500mc.c 	vcpu->arch.shadow_msrp = MSRP_UCLEP | MSRP_PMMP;
vcpu              206 arch/powerpc/kvm/e500mc.c 	vcpu->arch.pvr = mfspr(SPRN_PVR);
vcpu              209 arch/powerpc/kvm/e500mc.c 	vcpu->arch.cpu_type = KVM_CPU_E500MC;
vcpu              214 arch/powerpc/kvm/e500mc.c static int kvmppc_core_get_sregs_e500mc(struct kvm_vcpu *vcpu,
vcpu              217 arch/powerpc/kvm/e500mc.c 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
vcpu              228 arch/powerpc/kvm/e500mc.c 	kvmppc_get_sregs_e500_tlb(vcpu, sregs);
vcpu              231 arch/powerpc/kvm/e500mc.c 		vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
vcpu              232 arch/powerpc/kvm/e500mc.c 	sregs->u.e.ivor_high[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL];
vcpu              233 arch/powerpc/kvm/e500mc.c 	sregs->u.e.ivor_high[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT];
vcpu              235 arch/powerpc/kvm/e500mc.c 	return kvmppc_get_sregs_ivor(vcpu, sregs);
vcpu              238 arch/powerpc/kvm/e500mc.c static int kvmppc_core_set_sregs_e500mc(struct kvm_vcpu *vcpu,
vcpu              241 arch/powerpc/kvm/e500mc.c 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
vcpu              250 arch/powerpc/kvm/e500mc.c 	ret = kvmppc_set_sregs_e500_tlb(vcpu, sregs);
vcpu              258 arch/powerpc/kvm/e500mc.c 		vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] =
vcpu              263 arch/powerpc/kvm/e500mc.c 		vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL] =
vcpu              265 arch/powerpc/kvm/e500mc.c 		vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT] =
vcpu              269 arch/powerpc/kvm/e500mc.c 	return kvmppc_set_sregs_ivor(vcpu, sregs);
vcpu              272 arch/powerpc/kvm/e500mc.c static int kvmppc_get_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id,
vcpu              279 arch/powerpc/kvm/e500mc.c 		*val = get_reg_val(id, vcpu->arch.sprg9);
vcpu              282 arch/powerpc/kvm/e500mc.c 		r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
vcpu              288 arch/powerpc/kvm/e500mc.c static int kvmppc_set_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id,
vcpu              295 arch/powerpc/kvm/e500mc.c 		vcpu->arch.sprg9 = set_reg_val(id, *val);
vcpu              298 arch/powerpc/kvm/e500mc.c 		r = kvmppc_set_one_reg_e500_tlb(vcpu, id, val);
vcpu              308 arch/powerpc/kvm/e500mc.c 	struct kvm_vcpu *vcpu;
vcpu              316 arch/powerpc/kvm/e500mc.c 	vcpu = &vcpu_e500->vcpu;
vcpu              319 arch/powerpc/kvm/e500mc.c 	vcpu->arch.oldpir = 0xffffffff;
vcpu              321 arch/powerpc/kvm/e500mc.c 	err = kvm_vcpu_init(vcpu, kvm, id);
vcpu              329 arch/powerpc/kvm/e500mc.c 	vcpu->arch.shared = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
vcpu              330 arch/powerpc/kvm/e500mc.c 	if (!vcpu->arch.shared) {
vcpu              335 arch/powerpc/kvm/e500mc.c 	return vcpu;
vcpu              340 arch/powerpc/kvm/e500mc.c 	kvm_vcpu_uninit(vcpu);
vcpu              348 arch/powerpc/kvm/e500mc.c static void kvmppc_core_vcpu_free_e500mc(struct kvm_vcpu *vcpu)
vcpu              350 arch/powerpc/kvm/e500mc.c 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
vcpu              352 arch/powerpc/kvm/e500mc.c 	free_page((unsigned long)vcpu->arch.shared);
vcpu              354 arch/powerpc/kvm/e500mc.c 	kvm_vcpu_uninit(vcpu);
vcpu               26 arch/powerpc/kvm/emulate.c void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
vcpu               31 arch/powerpc/kvm/emulate.c 	pr_debug("mtDEC: %lx\n", vcpu->arch.dec);
vcpu               32 arch/powerpc/kvm/emulate.c 	hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
vcpu               36 arch/powerpc/kvm/emulate.c 	kvmppc_core_dequeue_dec(vcpu);
vcpu               41 arch/powerpc/kvm/emulate.c 	if (vcpu->arch.dec == 0)
vcpu               51 arch/powerpc/kvm/emulate.c 	dec_time = vcpu->arch.dec;
vcpu               58 arch/powerpc/kvm/emulate.c 	hrtimer_start(&vcpu->arch.dec_timer,
vcpu               60 arch/powerpc/kvm/emulate.c 	vcpu->arch.dec_jiffies = get_tb();
vcpu               63 arch/powerpc/kvm/emulate.c u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
vcpu               65 arch/powerpc/kvm/emulate.c 	u64 jd = tb - vcpu->arch.dec_jiffies;
vcpu               68 arch/powerpc/kvm/emulate.c 	if (vcpu->arch.dec < jd)
vcpu               72 arch/powerpc/kvm/emulate.c 	return vcpu->arch.dec - jd;
vcpu               75 arch/powerpc/kvm/emulate.c static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
vcpu               78 arch/powerpc/kvm/emulate.c 	ulong spr_val = kvmppc_get_gpr(vcpu, rs);
vcpu               82 arch/powerpc/kvm/emulate.c 		kvmppc_set_srr0(vcpu, spr_val);
vcpu               85 arch/powerpc/kvm/emulate.c 		kvmppc_set_srr1(vcpu, spr_val);
vcpu               94 arch/powerpc/kvm/emulate.c 		vcpu->arch.dec = (u32) spr_val;
vcpu               95 arch/powerpc/kvm/emulate.c 		kvmppc_emulate_dec(vcpu);
vcpu               99 arch/powerpc/kvm/emulate.c 		kvmppc_set_sprg0(vcpu, spr_val);
vcpu              102 arch/powerpc/kvm/emulate.c 		kvmppc_set_sprg1(vcpu, spr_val);
vcpu              105 arch/powerpc/kvm/emulate.c 		kvmppc_set_sprg2(vcpu, spr_val);
vcpu              108 arch/powerpc/kvm/emulate.c 		kvmppc_set_sprg3(vcpu, spr_val);
vcpu              115 arch/powerpc/kvm/emulate.c 		emulated = vcpu->kvm->arch.kvm_ops->emulate_mtspr(vcpu, sprn,
vcpu              123 arch/powerpc/kvm/emulate.c 	kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
vcpu              128 arch/powerpc/kvm/emulate.c static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
vcpu              135 arch/powerpc/kvm/emulate.c 		spr_val = kvmppc_get_srr0(vcpu);
vcpu              138 arch/powerpc/kvm/emulate.c 		spr_val = kvmppc_get_srr1(vcpu);
vcpu              141 arch/powerpc/kvm/emulate.c 		spr_val = vcpu->arch.pvr;
vcpu              144 arch/powerpc/kvm/emulate.c 		spr_val = vcpu->vcpu_id;
vcpu              158 arch/powerpc/kvm/emulate.c 		spr_val = kvmppc_get_sprg0(vcpu);
vcpu              161 arch/powerpc/kvm/emulate.c 		spr_val = kvmppc_get_sprg1(vcpu);
vcpu              164 arch/powerpc/kvm/emulate.c 		spr_val = kvmppc_get_sprg2(vcpu);
vcpu              167 arch/powerpc/kvm/emulate.c 		spr_val = kvmppc_get_sprg3(vcpu);
vcpu              173 arch/powerpc/kvm/emulate.c 		spr_val = kvmppc_get_dec(vcpu, get_tb());
vcpu              176 arch/powerpc/kvm/emulate.c 		emulated = vcpu->kvm->arch.kvm_ops->emulate_mfspr(vcpu, sprn,
vcpu              186 arch/powerpc/kvm/emulate.c 		kvmppc_set_gpr(vcpu, rt, spr_val);
vcpu              187 arch/powerpc/kvm/emulate.c 	kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
vcpu              194 arch/powerpc/kvm/emulate.c int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
vcpu              202 arch/powerpc/kvm/emulate.c 	kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
vcpu              204 arch/powerpc/kvm/emulate.c 	emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
vcpu              218 arch/powerpc/kvm/emulate.c 		kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
vcpu              220 arch/powerpc/kvm/emulate.c 		kvmppc_core_queue_program(vcpu,
vcpu              221 arch/powerpc/kvm/emulate.c 					  vcpu->arch.shared->esr | ESR_PTR);
vcpu              234 arch/powerpc/kvm/emulate.c 			kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
vcpu              236 arch/powerpc/kvm/emulate.c 			kvmppc_core_queue_program(vcpu,
vcpu              237 arch/powerpc/kvm/emulate.c 					vcpu->arch.shared->esr | ESR_PTR);
vcpu              243 arch/powerpc/kvm/emulate.c 			emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt);
vcpu              251 arch/powerpc/kvm/emulate.c 			emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs);
vcpu              275 arch/powerpc/kvm/emulate.c 			run->debug.arch.address = kvmppc_get_pc(vcpu);
vcpu              288 arch/powerpc/kvm/emulate.c 		emulated = vcpu->kvm->arch.kvm_ops->emulate_op(run, vcpu, inst,
vcpu              299 arch/powerpc/kvm/emulate.c 	trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
vcpu              303 arch/powerpc/kvm/emulate.c 		kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
vcpu               28 arch/powerpc/kvm/emulate_loadstore.c static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
vcpu               30 arch/powerpc/kvm/emulate_loadstore.c 	if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
vcpu               31 arch/powerpc/kvm/emulate_loadstore.c 		kvmppc_core_queue_fpunavail(vcpu);
vcpu               40 arch/powerpc/kvm/emulate_loadstore.c static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
vcpu               42 arch/powerpc/kvm/emulate_loadstore.c 	if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
vcpu               43 arch/powerpc/kvm/emulate_loadstore.c 		kvmppc_core_queue_vsx_unavail(vcpu);
vcpu               52 arch/powerpc/kvm/emulate_loadstore.c static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
vcpu               54 arch/powerpc/kvm/emulate_loadstore.c 	if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) {
vcpu               55 arch/powerpc/kvm/emulate_loadstore.c 		kvmppc_core_queue_vec_unavail(vcpu);
vcpu               72 arch/powerpc/kvm/emulate_loadstore.c int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
vcpu               74 arch/powerpc/kvm/emulate_loadstore.c 	struct kvm_run *run = vcpu->run;
vcpu               81 arch/powerpc/kvm/emulate_loadstore.c 	kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
vcpu               83 arch/powerpc/kvm/emulate_loadstore.c 	emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
vcpu               87 arch/powerpc/kvm/emulate_loadstore.c 	vcpu->arch.mmio_vsx_copy_nums = 0;
vcpu               88 arch/powerpc/kvm/emulate_loadstore.c 	vcpu->arch.mmio_vsx_offset = 0;
vcpu               89 arch/powerpc/kvm/emulate_loadstore.c 	vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE;
vcpu               90 arch/powerpc/kvm/emulate_loadstore.c 	vcpu->arch.mmio_sp64_extend = 0;
vcpu               91 arch/powerpc/kvm/emulate_loadstore.c 	vcpu->arch.mmio_sign_extend = 0;
vcpu               92 arch/powerpc/kvm/emulate_loadstore.c 	vcpu->arch.mmio_vmx_copy_nums = 0;
vcpu               93 arch/powerpc/kvm/emulate_loadstore.c 	vcpu->arch.mmio_vmx_offset = 0;
vcpu               94 arch/powerpc/kvm/emulate_loadstore.c 	vcpu->arch.mmio_host_swabbed = 0;
vcpu               97 arch/powerpc/kvm/emulate_loadstore.c 	vcpu->arch.regs.msr = vcpu->arch.shared->msr;
vcpu               98 arch/powerpc/kvm/emulate_loadstore.c 	if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
vcpu              107 arch/powerpc/kvm/emulate_loadstore.c 				emulated = kvmppc_handle_loads(run, vcpu,
vcpu              110 arch/powerpc/kvm/emulate_loadstore.c 				emulated = kvmppc_handle_load(run, vcpu,
vcpu              114 arch/powerpc/kvm/emulate_loadstore.c 				kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
vcpu              120 arch/powerpc/kvm/emulate_loadstore.c 			if (kvmppc_check_fp_disabled(vcpu))
vcpu              124 arch/powerpc/kvm/emulate_loadstore.c 				vcpu->arch.mmio_sp64_extend = 1;
vcpu              127 arch/powerpc/kvm/emulate_loadstore.c 				emulated = kvmppc_handle_loads(run, vcpu,
vcpu              130 arch/powerpc/kvm/emulate_loadstore.c 				emulated = kvmppc_handle_load(run, vcpu,
vcpu              134 arch/powerpc/kvm/emulate_loadstore.c 				kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
vcpu              140 arch/powerpc/kvm/emulate_loadstore.c 			if (kvmppc_check_altivec_disabled(vcpu))
vcpu              144 arch/powerpc/kvm/emulate_loadstore.c 			vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
vcpu              145 arch/powerpc/kvm/emulate_loadstore.c 			vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
vcpu              148 arch/powerpc/kvm/emulate_loadstore.c 				vcpu->arch.mmio_copy_type =
vcpu              151 arch/powerpc/kvm/emulate_loadstore.c 				vcpu->arch.mmio_copy_type =
vcpu              154 arch/powerpc/kvm/emulate_loadstore.c 				vcpu->arch.mmio_copy_type =
vcpu              157 arch/powerpc/kvm/emulate_loadstore.c 				vcpu->arch.mmio_copy_type =
vcpu              162 arch/powerpc/kvm/emulate_loadstore.c 			vcpu->arch.mmio_vmx_offset =
vcpu              163 arch/powerpc/kvm/emulate_loadstore.c 				(vcpu->arch.vaddr_accessed & 0xf)/size;
vcpu              166 arch/powerpc/kvm/emulate_loadstore.c 				vcpu->arch.mmio_vmx_copy_nums = 2;
vcpu              168 arch/powerpc/kvm/emulate_loadstore.c 						vcpu, KVM_MMIO_REG_VMX|op.reg,
vcpu              171 arch/powerpc/kvm/emulate_loadstore.c 				vcpu->arch.mmio_vmx_copy_nums = 1;
vcpu              172 arch/powerpc/kvm/emulate_loadstore.c 				emulated = kvmppc_handle_vmx_load(run, vcpu,
vcpu              183 arch/powerpc/kvm/emulate_loadstore.c 				if (kvmppc_check_altivec_disabled(vcpu))
vcpu              186 arch/powerpc/kvm/emulate_loadstore.c 				if (kvmppc_check_vsx_disabled(vcpu))
vcpu              191 arch/powerpc/kvm/emulate_loadstore.c 				vcpu->arch.mmio_sp64_extend = 1;
vcpu              195 arch/powerpc/kvm/emulate_loadstore.c 					vcpu->arch.mmio_copy_type =
vcpu              198 arch/powerpc/kvm/emulate_loadstore.c 					vcpu->arch.mmio_copy_type =
vcpu              202 arch/powerpc/kvm/emulate_loadstore.c 					vcpu->arch.mmio_copy_type =
vcpu              205 arch/powerpc/kvm/emulate_loadstore.c 					vcpu->arch.mmio_copy_type =
vcpu              212 arch/powerpc/kvm/emulate_loadstore.c 				vcpu->arch.mmio_vsx_copy_nums = 1;
vcpu              215 arch/powerpc/kvm/emulate_loadstore.c 				vcpu->arch.mmio_vsx_copy_nums =
vcpu              220 arch/powerpc/kvm/emulate_loadstore.c 			emulated = kvmppc_handle_vsx_load(run, vcpu,
vcpu              230 arch/powerpc/kvm/emulate_loadstore.c 			emulated = kvmppc_handle_store(run, vcpu, op.val,
vcpu              234 arch/powerpc/kvm/emulate_loadstore.c 				kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
vcpu              239 arch/powerpc/kvm/emulate_loadstore.c 			if (kvmppc_check_fp_disabled(vcpu))
vcpu              246 arch/powerpc/kvm/emulate_loadstore.c 			if (vcpu->kvm->arch.kvm_ops->giveup_ext)
vcpu              247 arch/powerpc/kvm/emulate_loadstore.c 				vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
vcpu              251 arch/powerpc/kvm/emulate_loadstore.c 				vcpu->arch.mmio_sp64_extend = 1;
vcpu              253 arch/powerpc/kvm/emulate_loadstore.c 			emulated = kvmppc_handle_store(run, vcpu,
vcpu              254 arch/powerpc/kvm/emulate_loadstore.c 					VCPU_FPR(vcpu, op.reg), size, 1);
vcpu              257 arch/powerpc/kvm/emulate_loadstore.c 				kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
vcpu              263 arch/powerpc/kvm/emulate_loadstore.c 			if (kvmppc_check_altivec_disabled(vcpu))
vcpu              267 arch/powerpc/kvm/emulate_loadstore.c 			vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
vcpu              268 arch/powerpc/kvm/emulate_loadstore.c 			vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
vcpu              270 arch/powerpc/kvm/emulate_loadstore.c 			if (vcpu->kvm->arch.kvm_ops->giveup_ext)
vcpu              271 arch/powerpc/kvm/emulate_loadstore.c 				vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
vcpu              274 arch/powerpc/kvm/emulate_loadstore.c 				vcpu->arch.mmio_copy_type =
vcpu              277 arch/powerpc/kvm/emulate_loadstore.c 				vcpu->arch.mmio_copy_type =
vcpu              280 arch/powerpc/kvm/emulate_loadstore.c 				vcpu->arch.mmio_copy_type =
vcpu              283 arch/powerpc/kvm/emulate_loadstore.c 				vcpu->arch.mmio_copy_type =
vcpu              288 arch/powerpc/kvm/emulate_loadstore.c 			vcpu->arch.mmio_vmx_offset =
vcpu              289 arch/powerpc/kvm/emulate_loadstore.c 				(vcpu->arch.vaddr_accessed & 0xf)/size;
vcpu              292 arch/powerpc/kvm/emulate_loadstore.c 				vcpu->arch.mmio_vmx_copy_nums = 2;
vcpu              294 arch/powerpc/kvm/emulate_loadstore.c 						vcpu, op.reg, 8, 1);
vcpu              296 arch/powerpc/kvm/emulate_loadstore.c 				vcpu->arch.mmio_vmx_copy_nums = 1;
vcpu              298 arch/powerpc/kvm/emulate_loadstore.c 						vcpu, op.reg, size, 1);
vcpu              308 arch/powerpc/kvm/emulate_loadstore.c 				if (kvmppc_check_altivec_disabled(vcpu))
vcpu              311 arch/powerpc/kvm/emulate_loadstore.c 				if (kvmppc_check_vsx_disabled(vcpu))
vcpu              315 arch/powerpc/kvm/emulate_loadstore.c 			if (vcpu->kvm->arch.kvm_ops->giveup_ext)
vcpu              316 arch/powerpc/kvm/emulate_loadstore.c 				vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
vcpu              320 arch/powerpc/kvm/emulate_loadstore.c 				vcpu->arch.mmio_sp64_extend = 1;
vcpu              323 arch/powerpc/kvm/emulate_loadstore.c 				vcpu->arch.mmio_copy_type =
vcpu              326 arch/powerpc/kvm/emulate_loadstore.c 				vcpu->arch.mmio_copy_type =
vcpu              333 arch/powerpc/kvm/emulate_loadstore.c 				vcpu->arch.mmio_vsx_copy_nums = 1;
vcpu              336 arch/powerpc/kvm/emulate_loadstore.c 				vcpu->arch.mmio_vsx_copy_nums =
vcpu              341 arch/powerpc/kvm/emulate_loadstore.c 			emulated = kvmppc_handle_vsx_store(run, vcpu,
vcpu              362 arch/powerpc/kvm/emulate_loadstore.c 		kvmppc_core_queue_program(vcpu, 0);
vcpu              365 arch/powerpc/kvm/emulate_loadstore.c 	trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
vcpu              369 arch/powerpc/kvm/emulate_loadstore.c 		kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
vcpu              117 arch/powerpc/kvm/mpic.c 	struct kvm_vcpu *vcpu = current->thread.kvm_vcpu;
vcpu              118 arch/powerpc/kvm/mpic.c 	return vcpu ? vcpu->arch.irq_cpu_id : -1;
vcpu              179 arch/powerpc/kvm/mpic.c 	struct kvm_vcpu *vcpu;
vcpu              249 arch/powerpc/kvm/mpic.c 	if (!dst->vcpu) {
vcpu              255 arch/powerpc/kvm/mpic.c 	pr_debug("%s: cpu %d output %d\n", __func__, dst->vcpu->arch.irq_cpu_id,
vcpu              261 arch/powerpc/kvm/mpic.c 	kvm_vcpu_ioctl_interrupt(dst->vcpu, &irq);
vcpu              267 arch/powerpc/kvm/mpic.c 	if (!dst->vcpu) {
vcpu              273 arch/powerpc/kvm/mpic.c 	pr_debug("%s: cpu %d output %d\n", __func__, dst->vcpu->arch.irq_cpu_id,
vcpu              279 arch/powerpc/kvm/mpic.c 	kvmppc_core_dequeue_external(dst->vcpu);
vcpu             1177 arch/powerpc/kvm/mpic.c void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
vcpu             1179 arch/powerpc/kvm/mpic.c 	struct openpic *opp = vcpu->arch.mpic;
vcpu             1180 arch/powerpc/kvm/mpic.c 	int cpu = vcpu->arch.irq_cpu_id;
vcpu             1186 arch/powerpc/kvm/mpic.c 		kvmppc_set_epr(vcpu, openpic_iack(opp, &opp->dst[cpu], cpu));
vcpu             1372 arch/powerpc/kvm/mpic.c static int kvm_mpic_read(struct kvm_vcpu *vcpu,
vcpu             1414 arch/powerpc/kvm/mpic.c static int kvm_mpic_write(struct kvm_vcpu *vcpu,
vcpu             1733 arch/powerpc/kvm/mpic.c int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
vcpu             1741 arch/powerpc/kvm/mpic.c 	if (opp->kvm != vcpu->kvm)
vcpu             1748 arch/powerpc/kvm/mpic.c 	if (opp->dst[cpu].vcpu) {
vcpu             1752 arch/powerpc/kvm/mpic.c 	if (vcpu->arch.irq_type) {
vcpu             1757 arch/powerpc/kvm/mpic.c 	opp->dst[cpu].vcpu = vcpu;
vcpu             1760 arch/powerpc/kvm/mpic.c 	vcpu->arch.mpic = opp;
vcpu             1761 arch/powerpc/kvm/mpic.c 	vcpu->arch.irq_cpu_id = cpu;
vcpu             1762 arch/powerpc/kvm/mpic.c 	vcpu->arch.irq_type = KVMPPC_IRQ_MPIC;
vcpu             1766 arch/powerpc/kvm/mpic.c 		vcpu->arch.epr_flags |= KVMPPC_EPR_KERNEL;
vcpu             1778 arch/powerpc/kvm/mpic.c void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu)
vcpu             1780 arch/powerpc/kvm/mpic.c 	BUG_ON(!opp->dst[vcpu->arch.irq_cpu_id].vcpu);
vcpu             1782 arch/powerpc/kvm/mpic.c 	opp->dst[vcpu->arch.irq_cpu_id].vcpu = NULL;
vcpu               53 arch/powerpc/kvm/powerpc.c bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
vcpu               55 arch/powerpc/kvm/powerpc.c 	return kvm_arch_vcpu_runnable(vcpu);
vcpu               58 arch/powerpc/kvm/powerpc.c bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
vcpu               63 arch/powerpc/kvm/powerpc.c int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
vcpu               77 arch/powerpc/kvm/powerpc.c int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
vcpu               93 arch/powerpc/kvm/powerpc.c 			kvmppc_account_exit(vcpu, SIGNAL_EXITS);
vcpu               94 arch/powerpc/kvm/powerpc.c 			vcpu->run->exit_reason = KVM_EXIT_INTR;
vcpu               99 arch/powerpc/kvm/powerpc.c 		vcpu->mode = IN_GUEST_MODE;
vcpu              112 arch/powerpc/kvm/powerpc.c 		if (kvm_request_pending(vcpu)) {
vcpu              115 arch/powerpc/kvm/powerpc.c 			trace_kvm_check_requests(vcpu);
vcpu              116 arch/powerpc/kvm/powerpc.c 			r = kvmppc_core_check_requests(vcpu);
vcpu              123 arch/powerpc/kvm/powerpc.c 		if (kvmppc_core_prepare_to_enter(vcpu)) {
vcpu              140 arch/powerpc/kvm/powerpc.c static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
vcpu              142 arch/powerpc/kvm/powerpc.c 	struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
vcpu              160 arch/powerpc/kvm/powerpc.c int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
vcpu              162 arch/powerpc/kvm/powerpc.c 	int nr = kvmppc_get_gpr(vcpu, 11);
vcpu              164 arch/powerpc/kvm/powerpc.c 	unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
vcpu              165 arch/powerpc/kvm/powerpc.c 	unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
vcpu              166 arch/powerpc/kvm/powerpc.c 	unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
vcpu              167 arch/powerpc/kvm/powerpc.c 	unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
vcpu              170 arch/powerpc/kvm/powerpc.c 	if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
vcpu              184 arch/powerpc/kvm/powerpc.c 		if (vcpu->arch.intr_msr & MSR_LE)
vcpu              186 arch/powerpc/kvm/powerpc.c 		if (shared_big_endian != vcpu->arch.shared_big_endian)
vcpu              187 arch/powerpc/kvm/powerpc.c 			kvmppc_swab_shared(vcpu);
vcpu              188 arch/powerpc/kvm/powerpc.c 		vcpu->arch.shared_big_endian = shared_big_endian;
vcpu              197 arch/powerpc/kvm/powerpc.c 			vcpu->arch.disable_kernel_nx = true;
vcpu              198 arch/powerpc/kvm/powerpc.c 			kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
vcpu              201 arch/powerpc/kvm/powerpc.c 		vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
vcpu              202 arch/powerpc/kvm/powerpc.c 		vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
vcpu              209 arch/powerpc/kvm/powerpc.c 		if ((vcpu->arch.magic_page_pa & 0xf000) !=
vcpu              210 arch/powerpc/kvm/powerpc.c 		    ((ulong)vcpu->arch.shared & 0xf000)) {
vcpu              211 arch/powerpc/kvm/powerpc.c 			void *old_shared = vcpu->arch.shared;
vcpu              212 arch/powerpc/kvm/powerpc.c 			ulong shared = (ulong)vcpu->arch.shared;
vcpu              216 arch/powerpc/kvm/powerpc.c 			shared |= vcpu->arch.magic_page_pa & 0xf000;
vcpu              219 arch/powerpc/kvm/powerpc.c 			vcpu->arch.shared = new_shared;
vcpu              238 arch/powerpc/kvm/powerpc.c 		kvm_vcpu_block(vcpu);
vcpu              239 arch/powerpc/kvm/powerpc.c 		kvm_clear_request(KVM_REQ_UNHALT, vcpu);
vcpu              246 arch/powerpc/kvm/powerpc.c 	kvmppc_set_gpr(vcpu, 4, r2);
vcpu              252 arch/powerpc/kvm/powerpc.c int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
vcpu              257 arch/powerpc/kvm/powerpc.c 	if (!vcpu->arch.pvr)
vcpu              261 arch/powerpc/kvm/powerpc.c 	if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
vcpu              265 arch/powerpc/kvm/powerpc.c 	if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
vcpu              276 arch/powerpc/kvm/powerpc.c 	vcpu->arch.sane = r;
vcpu              281 arch/powerpc/kvm/powerpc.c int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
vcpu              286 arch/powerpc/kvm/powerpc.c 	er = kvmppc_emulate_loadstore(vcpu);
vcpu              308 arch/powerpc/kvm/powerpc.c 		kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
vcpu              323 arch/powerpc/kvm/powerpc.c int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
vcpu              326 arch/powerpc/kvm/powerpc.c 	ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
vcpu              330 arch/powerpc/kvm/powerpc.c 	vcpu->stat.st++;
vcpu              332 arch/powerpc/kvm/powerpc.c 	if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr)
vcpu              333 arch/powerpc/kvm/powerpc.c 		r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr,
vcpu              339 arch/powerpc/kvm/powerpc.c 	r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
vcpu              350 arch/powerpc/kvm/powerpc.c 	if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
vcpu              352 arch/powerpc/kvm/powerpc.c 	    !(kvmppc_get_msr(vcpu) & MSR_PR)) {
vcpu              353 arch/powerpc/kvm/powerpc.c 		void *magic = vcpu->arch.shared;
vcpu              359 arch/powerpc/kvm/powerpc.c 	if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
vcpu              366 arch/powerpc/kvm/powerpc.c int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
vcpu              369 arch/powerpc/kvm/powerpc.c 	ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
vcpu              373 arch/powerpc/kvm/powerpc.c 	vcpu->stat.ld++;
vcpu              375 arch/powerpc/kvm/powerpc.c 	if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr)
vcpu              376 arch/powerpc/kvm/powerpc.c 		rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr,
vcpu              382 arch/powerpc/kvm/powerpc.c 	rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
vcpu              396 arch/powerpc/kvm/powerpc.c 	if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
vcpu              398 arch/powerpc/kvm/powerpc.c 	    !(kvmppc_get_msr(vcpu) & MSR_PR)) {
vcpu              399 arch/powerpc/kvm/powerpc.c 		void *magic = vcpu->arch.shared;
vcpu              405 arch/powerpc/kvm/powerpc.c 	if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size))
vcpu              463 arch/powerpc/kvm/powerpc.c 	struct kvm_vcpu *vcpu;
vcpu              475 arch/powerpc/kvm/powerpc.c 	kvm_for_each_vcpu(i, vcpu, kvm)
vcpu              476 arch/powerpc/kvm/powerpc.c 		kvm_arch_vcpu_free(vcpu);
vcpu              721 arch/powerpc/kvm/powerpc.c 	struct kvm_vcpu *vcpu;
vcpu              722 arch/powerpc/kvm/powerpc.c 	vcpu = kvmppc_core_vcpu_create(kvm, id);
vcpu              723 arch/powerpc/kvm/powerpc.c 	if (!IS_ERR(vcpu)) {
vcpu              724 arch/powerpc/kvm/powerpc.c 		vcpu->arch.wqp = &vcpu->wq;
vcpu              725 arch/powerpc/kvm/powerpc.c 		kvmppc_create_vcpu_debugfs(vcpu, id);
vcpu              727 arch/powerpc/kvm/powerpc.c 	return vcpu;
vcpu              730 arch/powerpc/kvm/powerpc.c void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
vcpu              734 arch/powerpc/kvm/powerpc.c void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
vcpu              737 arch/powerpc/kvm/powerpc.c 	hrtimer_cancel(&vcpu->arch.dec_timer);
vcpu              739 arch/powerpc/kvm/powerpc.c 	kvmppc_remove_vcpu_debugfs(vcpu);
vcpu              741 arch/powerpc/kvm/powerpc.c 	switch (vcpu->arch.irq_type) {
vcpu              743 arch/powerpc/kvm/powerpc.c 		kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
vcpu              747 arch/powerpc/kvm/powerpc.c 			kvmppc_xive_cleanup_vcpu(vcpu);
vcpu              749 arch/powerpc/kvm/powerpc.c 			kvmppc_xics_free_icp(vcpu);
vcpu              752 arch/powerpc/kvm/powerpc.c 		kvmppc_xive_native_cleanup_vcpu(vcpu);
vcpu              756 arch/powerpc/kvm/powerpc.c 	kvmppc_core_vcpu_free(vcpu);
vcpu              759 arch/powerpc/kvm/powerpc.c void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
vcpu              761 arch/powerpc/kvm/powerpc.c 	kvm_arch_vcpu_free(vcpu);
vcpu              764 arch/powerpc/kvm/powerpc.c int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
vcpu              766 arch/powerpc/kvm/powerpc.c 	return kvmppc_core_pending_dec(vcpu);
vcpu              771 arch/powerpc/kvm/powerpc.c 	struct kvm_vcpu *vcpu;
vcpu              773 arch/powerpc/kvm/powerpc.c 	vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
vcpu              774 arch/powerpc/kvm/powerpc.c 	kvmppc_decrementer_func(vcpu);
vcpu              779 arch/powerpc/kvm/powerpc.c int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
vcpu              783 arch/powerpc/kvm/powerpc.c 	hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
vcpu              784 arch/powerpc/kvm/powerpc.c 	vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
vcpu              785 arch/powerpc/kvm/powerpc.c 	vcpu->arch.dec_expires = get_tb();
vcpu              788 arch/powerpc/kvm/powerpc.c 	mutex_init(&vcpu->arch.exit_timing_lock);
vcpu              790 arch/powerpc/kvm/powerpc.c 	ret = kvmppc_subarch_vcpu_init(vcpu);
vcpu              794 arch/powerpc/kvm/powerpc.c void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
vcpu              796 arch/powerpc/kvm/powerpc.c 	kvmppc_mmu_destroy(vcpu);
vcpu              797 arch/powerpc/kvm/powerpc.c 	kvmppc_subarch_vcpu_uninit(vcpu);
vcpu              800 arch/powerpc/kvm/powerpc.c void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu              810 arch/powerpc/kvm/powerpc.c 	mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
vcpu              812 arch/powerpc/kvm/powerpc.c 	kvmppc_core_vcpu_load(vcpu, cpu);
vcpu              815 arch/powerpc/kvm/powerpc.c void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
vcpu              817 arch/powerpc/kvm/powerpc.c 	kvmppc_core_vcpu_put(vcpu);
vcpu              819 arch/powerpc/kvm/powerpc.c 	vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
vcpu              891 arch/powerpc/kvm/powerpc.c static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
vcpu              895 arch/powerpc/kvm/powerpc.c 	int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
vcpu              896 arch/powerpc/kvm/powerpc.c 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
vcpu              902 arch/powerpc/kvm/powerpc.c 		val.vval = VCPU_VSX_VR(vcpu, index - 32);
vcpu              904 arch/powerpc/kvm/powerpc.c 		VCPU_VSX_VR(vcpu, index - 32) = val.vval;
vcpu              906 arch/powerpc/kvm/powerpc.c 		VCPU_VSX_FPR(vcpu, index, offset) = gpr;
vcpu              910 arch/powerpc/kvm/powerpc.c static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
vcpu              914 arch/powerpc/kvm/powerpc.c 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
vcpu              917 arch/powerpc/kvm/powerpc.c 		val.vval = VCPU_VSX_VR(vcpu, index - 32);
vcpu              920 arch/powerpc/kvm/powerpc.c 		VCPU_VSX_VR(vcpu, index - 32) = val.vval;
vcpu              922 arch/powerpc/kvm/powerpc.c 		VCPU_VSX_FPR(vcpu, index, 0) = gpr;
vcpu              923 arch/powerpc/kvm/powerpc.c 		VCPU_VSX_FPR(vcpu, index, 1) = gpr;
vcpu              927 arch/powerpc/kvm/powerpc.c static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
vcpu              931 arch/powerpc/kvm/powerpc.c 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
vcpu              938 arch/powerpc/kvm/powerpc.c 		VCPU_VSX_VR(vcpu, index - 32) = val.vval;
vcpu              942 arch/powerpc/kvm/powerpc.c 		VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0];
vcpu              943 arch/powerpc/kvm/powerpc.c 		VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0];
vcpu              947 arch/powerpc/kvm/powerpc.c static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
vcpu              951 arch/powerpc/kvm/powerpc.c 	int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
vcpu              952 arch/powerpc/kvm/powerpc.c 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
vcpu              959 arch/powerpc/kvm/powerpc.c 		val.vval = VCPU_VSX_VR(vcpu, index - 32);
vcpu              961 arch/powerpc/kvm/powerpc.c 		VCPU_VSX_VR(vcpu, index - 32) = val.vval;
vcpu              965 arch/powerpc/kvm/powerpc.c 		val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
vcpu              967 arch/powerpc/kvm/powerpc.c 		VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
vcpu              973 arch/powerpc/kvm/powerpc.c static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu,
vcpu              982 arch/powerpc/kvm/powerpc.c 	if (kvmppc_need_byteswap(vcpu))
vcpu              990 arch/powerpc/kvm/powerpc.c static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu,
vcpu              993 arch/powerpc/kvm/powerpc.c 	return kvmppc_get_vmx_offset_generic(vcpu, index, 8);
vcpu              996 arch/powerpc/kvm/powerpc.c static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu,
vcpu              999 arch/powerpc/kvm/powerpc.c 	return kvmppc_get_vmx_offset_generic(vcpu, index, 4);
vcpu             1002 arch/powerpc/kvm/powerpc.c static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu,
vcpu             1005 arch/powerpc/kvm/powerpc.c 	return kvmppc_get_vmx_offset_generic(vcpu, index, 2);
vcpu             1008 arch/powerpc/kvm/powerpc.c static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu,
vcpu             1011 arch/powerpc/kvm/powerpc.c 	return kvmppc_get_vmx_offset_generic(vcpu, index, 1);
vcpu             1015 arch/powerpc/kvm/powerpc.c static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
vcpu             1019 arch/powerpc/kvm/powerpc.c 	int offset = kvmppc_get_vmx_dword_offset(vcpu,
vcpu             1020 arch/powerpc/kvm/powerpc.c 			vcpu->arch.mmio_vmx_offset);
vcpu             1021 arch/powerpc/kvm/powerpc.c 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
vcpu             1026 arch/powerpc/kvm/powerpc.c 	val.vval = VCPU_VSX_VR(vcpu, index);
vcpu             1028 arch/powerpc/kvm/powerpc.c 	VCPU_VSX_VR(vcpu, index) = val.vval;
vcpu             1031 arch/powerpc/kvm/powerpc.c static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
vcpu             1035 arch/powerpc/kvm/powerpc.c 	int offset = kvmppc_get_vmx_word_offset(vcpu,
vcpu             1036 arch/powerpc/kvm/powerpc.c 			vcpu->arch.mmio_vmx_offset);
vcpu             1037 arch/powerpc/kvm/powerpc.c 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
vcpu             1042 arch/powerpc/kvm/powerpc.c 	val.vval = VCPU_VSX_VR(vcpu, index);
vcpu             1044 arch/powerpc/kvm/powerpc.c 	VCPU_VSX_VR(vcpu, index) = val.vval;
vcpu             1047 arch/powerpc/kvm/powerpc.c static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
vcpu             1051 arch/powerpc/kvm/powerpc.c 	int offset = kvmppc_get_vmx_hword_offset(vcpu,
vcpu             1052 arch/powerpc/kvm/powerpc.c 			vcpu->arch.mmio_vmx_offset);
vcpu             1053 arch/powerpc/kvm/powerpc.c 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
vcpu             1058 arch/powerpc/kvm/powerpc.c 	val.vval = VCPU_VSX_VR(vcpu, index);
vcpu             1060 arch/powerpc/kvm/powerpc.c 	VCPU_VSX_VR(vcpu, index) = val.vval;
vcpu             1063 arch/powerpc/kvm/powerpc.c static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
vcpu             1067 arch/powerpc/kvm/powerpc.c 	int offset = kvmppc_get_vmx_byte_offset(vcpu,
vcpu             1068 arch/powerpc/kvm/powerpc.c 			vcpu->arch.mmio_vmx_offset);
vcpu             1069 arch/powerpc/kvm/powerpc.c 	int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
vcpu             1074 arch/powerpc/kvm/powerpc.c 	val.vval = VCPU_VSX_VR(vcpu, index);
vcpu             1076 arch/powerpc/kvm/powerpc.c 	VCPU_VSX_VR(vcpu, index) = val.vval;
vcpu             1110 arch/powerpc/kvm/powerpc.c static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
vcpu             1120 arch/powerpc/kvm/powerpc.c 	if (!vcpu->arch.mmio_host_swabbed) {
vcpu             1137 arch/powerpc/kvm/powerpc.c 	if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
vcpu             1140 arch/powerpc/kvm/powerpc.c 	if (vcpu->arch.mmio_sign_extend) {
vcpu             1156 arch/powerpc/kvm/powerpc.c 	switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
vcpu             1158 arch/powerpc/kvm/powerpc.c 		kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
vcpu             1161 arch/powerpc/kvm/powerpc.c 		if (vcpu->kvm->arch.kvm_ops->giveup_ext)
vcpu             1162 arch/powerpc/kvm/powerpc.c 			vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
vcpu             1164 arch/powerpc/kvm/powerpc.c 		VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
vcpu             1168 arch/powerpc/kvm/powerpc.c 		vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
vcpu             1171 arch/powerpc/kvm/powerpc.c 		VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
vcpu             1172 arch/powerpc/kvm/powerpc.c 		vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
vcpu             1177 arch/powerpc/kvm/powerpc.c 		if (vcpu->kvm->arch.kvm_ops->giveup_ext)
vcpu             1178 arch/powerpc/kvm/powerpc.c 			vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX);
vcpu             1180 arch/powerpc/kvm/powerpc.c 		if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD)
vcpu             1181 arch/powerpc/kvm/powerpc.c 			kvmppc_set_vsr_dword(vcpu, gpr);
vcpu             1182 arch/powerpc/kvm/powerpc.c 		else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD)
vcpu             1183 arch/powerpc/kvm/powerpc.c 			kvmppc_set_vsr_word(vcpu, gpr);
vcpu             1184 arch/powerpc/kvm/powerpc.c 		else if (vcpu->arch.mmio_copy_type ==
vcpu             1186 arch/powerpc/kvm/powerpc.c 			kvmppc_set_vsr_dword_dump(vcpu, gpr);
vcpu             1187 arch/powerpc/kvm/powerpc.c 		else if (vcpu->arch.mmio_copy_type ==
vcpu             1189 arch/powerpc/kvm/powerpc.c 			kvmppc_set_vsr_word_dump(vcpu, gpr);
vcpu             1194 arch/powerpc/kvm/powerpc.c 		if (vcpu->kvm->arch.kvm_ops->giveup_ext)
vcpu             1195 arch/powerpc/kvm/powerpc.c 			vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC);
vcpu             1197 arch/powerpc/kvm/powerpc.c 		if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD)
vcpu             1198 arch/powerpc/kvm/powerpc.c 			kvmppc_set_vmx_dword(vcpu, gpr);
vcpu             1199 arch/powerpc/kvm/powerpc.c 		else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD)
vcpu             1200 arch/powerpc/kvm/powerpc.c 			kvmppc_set_vmx_word(vcpu, gpr);
vcpu             1201 arch/powerpc/kvm/powerpc.c 		else if (vcpu->arch.mmio_copy_type ==
vcpu             1203 arch/powerpc/kvm/powerpc.c 			kvmppc_set_vmx_hword(vcpu, gpr);
vcpu             1204 arch/powerpc/kvm/powerpc.c 		else if (vcpu->arch.mmio_copy_type ==
vcpu             1206 arch/powerpc/kvm/powerpc.c 			kvmppc_set_vmx_byte(vcpu, gpr);
vcpu             1211 arch/powerpc/kvm/powerpc.c 		if (kvmppc_need_byteswap(vcpu))
vcpu             1213 arch/powerpc/kvm/powerpc.c 		kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr,
vcpu             1222 arch/powerpc/kvm/powerpc.c static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu             1230 arch/powerpc/kvm/powerpc.c 	if (kvmppc_need_byteswap(vcpu)) {
vcpu             1241 arch/powerpc/kvm/powerpc.c 	run->mmio.phys_addr = vcpu->arch.paddr_accessed;
vcpu             1245 arch/powerpc/kvm/powerpc.c 	vcpu->arch.io_gpr = rt;
vcpu             1246 arch/powerpc/kvm/powerpc.c 	vcpu->arch.mmio_host_swabbed = host_swabbed;
vcpu             1247 arch/powerpc/kvm/powerpc.c 	vcpu->mmio_needed = 1;
vcpu             1248 arch/powerpc/kvm/powerpc.c 	vcpu->mmio_is_write = 0;
vcpu             1249 arch/powerpc/kvm/powerpc.c 	vcpu->arch.mmio_sign_extend = sign_extend;
vcpu             1251 arch/powerpc/kvm/powerpc.c 	idx = srcu_read_lock(&vcpu->kvm->srcu);
vcpu             1253 arch/powerpc/kvm/powerpc.c 	ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
vcpu             1256 arch/powerpc/kvm/powerpc.c 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
vcpu             1259 arch/powerpc/kvm/powerpc.c 		kvmppc_complete_mmio_load(vcpu, run);
vcpu             1260 arch/powerpc/kvm/powerpc.c 		vcpu->mmio_needed = 0;
vcpu             1267 arch/powerpc/kvm/powerpc.c int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu             1271 arch/powerpc/kvm/powerpc.c 	return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0);
vcpu             1276 arch/powerpc/kvm/powerpc.c int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu             1280 arch/powerpc/kvm/powerpc.c 	return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1);
vcpu             1284 arch/powerpc/kvm/powerpc.c int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu             1291 arch/powerpc/kvm/powerpc.c 	if (vcpu->arch.mmio_vsx_copy_nums > 4)
vcpu             1294 arch/powerpc/kvm/powerpc.c 	while (vcpu->arch.mmio_vsx_copy_nums) {
vcpu             1295 arch/powerpc/kvm/powerpc.c 		emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
vcpu             1301 arch/powerpc/kvm/powerpc.c 		vcpu->arch.paddr_accessed += run->mmio.len;
vcpu             1303 arch/powerpc/kvm/powerpc.c 		vcpu->arch.mmio_vsx_copy_nums--;
vcpu             1304 arch/powerpc/kvm/powerpc.c 		vcpu->arch.mmio_vsx_offset++;
vcpu             1310 arch/powerpc/kvm/powerpc.c int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu             1318 arch/powerpc/kvm/powerpc.c 	if (kvmppc_need_byteswap(vcpu)) {
vcpu             1329 arch/powerpc/kvm/powerpc.c 	run->mmio.phys_addr = vcpu->arch.paddr_accessed;
vcpu             1332 arch/powerpc/kvm/powerpc.c 	vcpu->mmio_needed = 1;
vcpu             1333 arch/powerpc/kvm/powerpc.c 	vcpu->mmio_is_write = 1;
vcpu             1335 arch/powerpc/kvm/powerpc.c 	if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
vcpu             1355 arch/powerpc/kvm/powerpc.c 	idx = srcu_read_lock(&vcpu->kvm->srcu);
vcpu             1357 arch/powerpc/kvm/powerpc.c 	ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
vcpu             1360 arch/powerpc/kvm/powerpc.c 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
vcpu             1363 arch/powerpc/kvm/powerpc.c 		vcpu->mmio_needed = 0;
vcpu             1372 arch/powerpc/kvm/powerpc.c static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
vcpu             1377 arch/powerpc/kvm/powerpc.c 	int copy_type = vcpu->arch.mmio_copy_type;
vcpu             1383 arch/powerpc/kvm/powerpc.c 			kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
vcpu             1391 arch/powerpc/kvm/powerpc.c 			*val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
vcpu             1393 arch/powerpc/kvm/powerpc.c 			reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
vcpu             1400 arch/powerpc/kvm/powerpc.c 			kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
vcpu             1410 arch/powerpc/kvm/powerpc.c 			reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
vcpu             1413 arch/powerpc/kvm/powerpc.c 			reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
vcpu             1426 arch/powerpc/kvm/powerpc.c int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu             1432 arch/powerpc/kvm/powerpc.c 	vcpu->arch.io_gpr = rs;
vcpu             1435 arch/powerpc/kvm/powerpc.c 	if (vcpu->arch.mmio_vsx_copy_nums > 4)
vcpu             1438 arch/powerpc/kvm/powerpc.c 	while (vcpu->arch.mmio_vsx_copy_nums) {
vcpu             1439 arch/powerpc/kvm/powerpc.c 		if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
vcpu             1442 arch/powerpc/kvm/powerpc.c 		emulated = kvmppc_handle_store(run, vcpu,
vcpu             1448 arch/powerpc/kvm/powerpc.c 		vcpu->arch.paddr_accessed += run->mmio.len;
vcpu             1450 arch/powerpc/kvm/powerpc.c 		vcpu->arch.mmio_vsx_copy_nums--;
vcpu             1451 arch/powerpc/kvm/powerpc.c 		vcpu->arch.mmio_vsx_offset++;
vcpu             1457 arch/powerpc/kvm/powerpc.c static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
vcpu             1463 arch/powerpc/kvm/powerpc.c 	vcpu->arch.paddr_accessed += run->mmio.len;
vcpu             1465 arch/powerpc/kvm/powerpc.c 	if (!vcpu->mmio_is_write) {
vcpu             1466 arch/powerpc/kvm/powerpc.c 		emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr,
vcpu             1467 arch/powerpc/kvm/powerpc.c 			 run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
vcpu             1469 arch/powerpc/kvm/powerpc.c 		emulated = kvmppc_handle_vsx_store(run, vcpu,
vcpu             1470 arch/powerpc/kvm/powerpc.c 			 vcpu->arch.io_gpr, run->mmio.len, 1);
vcpu             1493 arch/powerpc/kvm/powerpc.c int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu             1498 arch/powerpc/kvm/powerpc.c 	if (vcpu->arch.mmio_vsx_copy_nums > 2)
vcpu             1501 arch/powerpc/kvm/powerpc.c 	while (vcpu->arch.mmio_vmx_copy_nums) {
vcpu             1502 arch/powerpc/kvm/powerpc.c 		emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
vcpu             1508 arch/powerpc/kvm/powerpc.c 		vcpu->arch.paddr_accessed += run->mmio.len;
vcpu             1509 arch/powerpc/kvm/powerpc.c 		vcpu->arch.mmio_vmx_copy_nums--;
vcpu             1510 arch/powerpc/kvm/powerpc.c 		vcpu->arch.mmio_vmx_offset++;
vcpu             1516 arch/powerpc/kvm/powerpc.c int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
vcpu             1523 arch/powerpc/kvm/powerpc.c 		kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
vcpu             1528 arch/powerpc/kvm/powerpc.c 	reg.vval = VCPU_VSX_VR(vcpu, index);
vcpu             1534 arch/powerpc/kvm/powerpc.c int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
vcpu             1541 arch/powerpc/kvm/powerpc.c 		kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset);
vcpu             1546 arch/powerpc/kvm/powerpc.c 	reg.vval = VCPU_VSX_VR(vcpu, index);
vcpu             1552 arch/powerpc/kvm/powerpc.c int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
vcpu             1559 arch/powerpc/kvm/powerpc.c 		kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
vcpu             1564 arch/powerpc/kvm/powerpc.c 	reg.vval = VCPU_VSX_VR(vcpu, index);
vcpu             1570 arch/powerpc/kvm/powerpc.c int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
vcpu             1577 arch/powerpc/kvm/powerpc.c 		kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset);
vcpu             1582 arch/powerpc/kvm/powerpc.c 	reg.vval = VCPU_VSX_VR(vcpu, index);
vcpu             1588 arch/powerpc/kvm/powerpc.c int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu             1595 arch/powerpc/kvm/powerpc.c 	if (vcpu->arch.mmio_vsx_copy_nums > 2)
vcpu             1598 arch/powerpc/kvm/powerpc.c 	vcpu->arch.io_gpr = rs;
vcpu             1600 arch/powerpc/kvm/powerpc.c 	while (vcpu->arch.mmio_vmx_copy_nums) {
vcpu             1601 arch/powerpc/kvm/powerpc.c 		switch (vcpu->arch.mmio_copy_type) {
vcpu             1603 arch/powerpc/kvm/powerpc.c 			if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1)
vcpu             1608 arch/powerpc/kvm/powerpc.c 			if (kvmppc_get_vmx_word(vcpu, index, &val) == -1)
vcpu             1612 arch/powerpc/kvm/powerpc.c 			if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1)
vcpu             1616 arch/powerpc/kvm/powerpc.c 			if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1)
vcpu             1623 arch/powerpc/kvm/powerpc.c 		emulated = kvmppc_handle_store(run, vcpu, val, bytes,
vcpu             1628 arch/powerpc/kvm/powerpc.c 		vcpu->arch.paddr_accessed += run->mmio.len;
vcpu             1629 arch/powerpc/kvm/powerpc.c 		vcpu->arch.mmio_vmx_copy_nums--;
vcpu             1630 arch/powerpc/kvm/powerpc.c 		vcpu->arch.mmio_vmx_offset++;
vcpu             1636 arch/powerpc/kvm/powerpc.c static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu,
vcpu             1642 arch/powerpc/kvm/powerpc.c 	vcpu->arch.paddr_accessed += run->mmio.len;
vcpu             1644 arch/powerpc/kvm/powerpc.c 	if (!vcpu->mmio_is_write) {
vcpu             1645 arch/powerpc/kvm/powerpc.c 		emulated = kvmppc_handle_vmx_load(run, vcpu,
vcpu             1646 arch/powerpc/kvm/powerpc.c 				vcpu->arch.io_gpr, run->mmio.len, 1);
vcpu             1648 arch/powerpc/kvm/powerpc.c 		emulated = kvmppc_handle_vmx_store(run, vcpu,
vcpu             1649 arch/powerpc/kvm/powerpc.c 				vcpu->arch.io_gpr, run->mmio.len, 1);
vcpu             1671 arch/powerpc/kvm/powerpc.c int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
vcpu             1681 arch/powerpc/kvm/powerpc.c 	r = kvmppc_get_one_reg(vcpu, reg->id, &val);
vcpu             1691 arch/powerpc/kvm/powerpc.c 			val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
vcpu             1698 arch/powerpc/kvm/powerpc.c 			val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
vcpu             1701 arch/powerpc/kvm/powerpc.c 			val = get_reg_val(reg->id, vcpu->arch.vrsave);
vcpu             1719 arch/powerpc/kvm/powerpc.c int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
vcpu             1732 arch/powerpc/kvm/powerpc.c 	r = kvmppc_set_one_reg(vcpu, reg->id, &val);
vcpu             1742 arch/powerpc/kvm/powerpc.c 			vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
vcpu             1749 arch/powerpc/kvm/powerpc.c 			vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
vcpu             1756 arch/powerpc/kvm/powerpc.c 			vcpu->arch.vrsave = set_reg_val(reg->id, val);
vcpu             1768 arch/powerpc/kvm/powerpc.c int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu             1772 arch/powerpc/kvm/powerpc.c 	vcpu_load(vcpu);
vcpu             1774 arch/powerpc/kvm/powerpc.c 	if (vcpu->mmio_needed) {
vcpu             1775 arch/powerpc/kvm/powerpc.c 		vcpu->mmio_needed = 0;
vcpu             1776 arch/powerpc/kvm/powerpc.c 		if (!vcpu->mmio_is_write)
vcpu             1777 arch/powerpc/kvm/powerpc.c 			kvmppc_complete_mmio_load(vcpu, run);
vcpu             1779 arch/powerpc/kvm/powerpc.c 		if (vcpu->arch.mmio_vsx_copy_nums > 0) {
vcpu             1780 arch/powerpc/kvm/powerpc.c 			vcpu->arch.mmio_vsx_copy_nums--;
vcpu             1781 arch/powerpc/kvm/powerpc.c 			vcpu->arch.mmio_vsx_offset++;
vcpu             1784 arch/powerpc/kvm/powerpc.c 		if (vcpu->arch.mmio_vsx_copy_nums > 0) {
vcpu             1785 arch/powerpc/kvm/powerpc.c 			r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run);
vcpu             1787 arch/powerpc/kvm/powerpc.c 				vcpu->mmio_needed = 1;
vcpu             1793 arch/powerpc/kvm/powerpc.c 		if (vcpu->arch.mmio_vmx_copy_nums > 0) {
vcpu             1794 arch/powerpc/kvm/powerpc.c 			vcpu->arch.mmio_vmx_copy_nums--;
vcpu             1795 arch/powerpc/kvm/powerpc.c 			vcpu->arch.mmio_vmx_offset++;
vcpu             1798 arch/powerpc/kvm/powerpc.c 		if (vcpu->arch.mmio_vmx_copy_nums > 0) {
vcpu             1799 arch/powerpc/kvm/powerpc.c 			r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run);
vcpu             1801 arch/powerpc/kvm/powerpc.c 				vcpu->mmio_needed = 1;
vcpu             1806 arch/powerpc/kvm/powerpc.c 	} else if (vcpu->arch.osi_needed) {
vcpu             1811 arch/powerpc/kvm/powerpc.c 			kvmppc_set_gpr(vcpu, i, gprs[i]);
vcpu             1812 arch/powerpc/kvm/powerpc.c 		vcpu->arch.osi_needed = 0;
vcpu             1813 arch/powerpc/kvm/powerpc.c 	} else if (vcpu->arch.hcall_needed) {
vcpu             1816 arch/powerpc/kvm/powerpc.c 		kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
vcpu             1818 arch/powerpc/kvm/powerpc.c 			kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
vcpu             1819 arch/powerpc/kvm/powerpc.c 		vcpu->arch.hcall_needed = 0;
vcpu             1821 arch/powerpc/kvm/powerpc.c 	} else if (vcpu->arch.epr_needed) {
vcpu             1822 arch/powerpc/kvm/powerpc.c 		kvmppc_set_epr(vcpu, run->epr.epr);
vcpu             1823 arch/powerpc/kvm/powerpc.c 		vcpu->arch.epr_needed = 0;
vcpu             1827 arch/powerpc/kvm/powerpc.c 	kvm_sigset_activate(vcpu);
vcpu             1832 arch/powerpc/kvm/powerpc.c 		r = kvmppc_vcpu_run(run, vcpu);
vcpu             1834 arch/powerpc/kvm/powerpc.c 	kvm_sigset_deactivate(vcpu);
vcpu             1839 arch/powerpc/kvm/powerpc.c 	vcpu_put(vcpu);
vcpu             1843 arch/powerpc/kvm/powerpc.c int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
vcpu             1846 arch/powerpc/kvm/powerpc.c 		kvmppc_core_dequeue_external(vcpu);
vcpu             1850 arch/powerpc/kvm/powerpc.c 	kvmppc_core_queue_external(vcpu, irq);
vcpu             1852 arch/powerpc/kvm/powerpc.c 	kvm_vcpu_kick(vcpu);
vcpu             1857 arch/powerpc/kvm/powerpc.c static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
vcpu             1868 arch/powerpc/kvm/powerpc.c 		vcpu->arch.osi_enabled = true;
vcpu             1872 arch/powerpc/kvm/powerpc.c 		vcpu->arch.papr_enabled = true;
vcpu             1877 arch/powerpc/kvm/powerpc.c 			vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
vcpu             1879 arch/powerpc/kvm/powerpc.c 			vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
vcpu             1884 arch/powerpc/kvm/powerpc.c 		vcpu->arch.watchdog_enabled = true;
vcpu             1896 arch/powerpc/kvm/powerpc.c 		r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
vcpu             1913 arch/powerpc/kvm/powerpc.c 			r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
vcpu             1933 arch/powerpc/kvm/powerpc.c 				r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
vcpu             1935 arch/powerpc/kvm/powerpc.c 				r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
vcpu             1959 arch/powerpc/kvm/powerpc.c 			r = kvmppc_xive_native_connect_vcpu(dev, vcpu,
vcpu             1969 arch/powerpc/kvm/powerpc.c 		if (!is_kvmppc_hv_enabled(vcpu->kvm))
vcpu             1972 arch/powerpc/kvm/powerpc.c 		vcpu->kvm->arch.fwnmi_enabled = true;
vcpu             1981 arch/powerpc/kvm/powerpc.c 		r = kvmppc_sanity_check(vcpu);
vcpu             1999 arch/powerpc/kvm/powerpc.c int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
vcpu             2005 arch/powerpc/kvm/powerpc.c int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
vcpu             2014 arch/powerpc/kvm/powerpc.c 	struct kvm_vcpu *vcpu = filp->private_data;
vcpu             2021 arch/powerpc/kvm/powerpc.c 		return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
vcpu             2029 arch/powerpc/kvm/powerpc.c 	struct kvm_vcpu *vcpu = filp->private_data;
vcpu             2038 arch/powerpc/kvm/powerpc.c 		vcpu_load(vcpu);
vcpu             2041 arch/powerpc/kvm/powerpc.c 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
vcpu             2042 arch/powerpc/kvm/powerpc.c 		vcpu_put(vcpu);
vcpu             2054 arch/powerpc/kvm/powerpc.c 			r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
vcpu             2056 arch/powerpc/kvm/powerpc.c 			r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
vcpu             2064 arch/powerpc/kvm/powerpc.c 		vcpu_load(vcpu);
vcpu             2067 arch/powerpc/kvm/powerpc.c 		r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
vcpu             2068 arch/powerpc/kvm/powerpc.c 		vcpu_put(vcpu);
vcpu             2080 arch/powerpc/kvm/powerpc.c vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
vcpu               22 arch/powerpc/kvm/timing.c void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu)
vcpu               27 arch/powerpc/kvm/timing.c 	mutex_lock(&vcpu->arch.exit_timing_lock);
vcpu               29 arch/powerpc/kvm/timing.c 	vcpu->arch.last_exit_type = 0xDEAD;
vcpu               31 arch/powerpc/kvm/timing.c 		vcpu->arch.timing_count_type[i] = 0;
vcpu               32 arch/powerpc/kvm/timing.c 		vcpu->arch.timing_max_duration[i] = 0;
vcpu               33 arch/powerpc/kvm/timing.c 		vcpu->arch.timing_min_duration[i] = 0xFFFFFFFF;
vcpu               34 arch/powerpc/kvm/timing.c 		vcpu->arch.timing_sum_duration[i] = 0;
vcpu               35 arch/powerpc/kvm/timing.c 		vcpu->arch.timing_sum_quad_duration[i] = 0;
vcpu               37 arch/powerpc/kvm/timing.c 	vcpu->arch.timing_last_exit = 0;
vcpu               38 arch/powerpc/kvm/timing.c 	vcpu->arch.timing_exit.tv64 = 0;
vcpu               39 arch/powerpc/kvm/timing.c 	vcpu->arch.timing_last_enter.tv64 = 0;
vcpu               41 arch/powerpc/kvm/timing.c 	mutex_unlock(&vcpu->arch.exit_timing_lock);
vcpu               44 arch/powerpc/kvm/timing.c static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type)
vcpu               48 arch/powerpc/kvm/timing.c 	mutex_lock(&vcpu->arch.exit_timing_lock);
vcpu               50 arch/powerpc/kvm/timing.c 	vcpu->arch.timing_count_type[type]++;
vcpu               53 arch/powerpc/kvm/timing.c 	old = vcpu->arch.timing_sum_duration[type];
vcpu               54 arch/powerpc/kvm/timing.c 	vcpu->arch.timing_sum_duration[type] += duration;
vcpu               55 arch/powerpc/kvm/timing.c 	if (unlikely(old > vcpu->arch.timing_sum_duration[type])) {
vcpu               58 arch/powerpc/kvm/timing.c 			__func__, old, vcpu->arch.timing_sum_duration[type],
vcpu               59 arch/powerpc/kvm/timing.c 			type, vcpu->arch.timing_count_type[type]);
vcpu               63 arch/powerpc/kvm/timing.c 	old = vcpu->arch.timing_sum_quad_duration[type];
vcpu               64 arch/powerpc/kvm/timing.c 	vcpu->arch.timing_sum_quad_duration[type] += (duration*duration);
vcpu               65 arch/powerpc/kvm/timing.c 	if (unlikely(old > vcpu->arch.timing_sum_quad_duration[type])) {
vcpu               69 arch/powerpc/kvm/timing.c 			vcpu->arch.timing_sum_quad_duration[type],
vcpu               70 arch/powerpc/kvm/timing.c 			type, vcpu->arch.timing_count_type[type]);
vcpu               74 arch/powerpc/kvm/timing.c 	if (unlikely(duration < vcpu->arch.timing_min_duration[type]))
vcpu               75 arch/powerpc/kvm/timing.c 		vcpu->arch.timing_min_duration[type] = duration;
vcpu               76 arch/powerpc/kvm/timing.c 	if (unlikely(duration > vcpu->arch.timing_max_duration[type]))
vcpu               77 arch/powerpc/kvm/timing.c 		vcpu->arch.timing_max_duration[type] = duration;
vcpu               79 arch/powerpc/kvm/timing.c 	mutex_unlock(&vcpu->arch.exit_timing_lock);
vcpu               82 arch/powerpc/kvm/timing.c void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu)
vcpu               84 arch/powerpc/kvm/timing.c 	u64 exit = vcpu->arch.timing_last_exit;
vcpu               85 arch/powerpc/kvm/timing.c 	u64 enter = vcpu->arch.timing_last_enter.tv64;
vcpu               88 arch/powerpc/kvm/timing.c 	vcpu->arch.timing_last_exit = vcpu->arch.timing_exit.tv64;
vcpu               90 arch/powerpc/kvm/timing.c 	if (unlikely(vcpu->arch.last_exit_type == 0xDEAD || exit == 0))
vcpu               94 arch/powerpc/kvm/timing.c 	add_exit_timing(vcpu, (enter - exit), vcpu->arch.last_exit_type);
vcpu               96 arch/powerpc/kvm/timing.c 	add_exit_timing(vcpu, (vcpu->arch.timing_last_exit - enter),
vcpu              131 arch/powerpc/kvm/timing.c 	struct kvm_vcpu *vcpu = m->private;
vcpu              139 arch/powerpc/kvm/timing.c 		min = vcpu->arch.timing_min_duration[i];
vcpu              141 arch/powerpc/kvm/timing.c 		max = vcpu->arch.timing_max_duration[i];
vcpu              143 arch/powerpc/kvm/timing.c 		sum = vcpu->arch.timing_sum_duration[i];
vcpu              145 arch/powerpc/kvm/timing.c 		sum_quad = vcpu->arch.timing_sum_quad_duration[i];
vcpu              150 arch/powerpc/kvm/timing.c 			vcpu->arch.timing_count_type[i],
vcpu              179 arch/powerpc/kvm/timing.c 		struct kvm_vcpu *vcpu = seqf->private;
vcpu              184 arch/powerpc/kvm/timing.c 		kvmppc_init_timing_stats(vcpu);
vcpu              207 arch/powerpc/kvm/timing.c void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu, unsigned int id)
vcpu              215 arch/powerpc/kvm/timing.c 					kvm_debugfs_dir, vcpu,
vcpu              224 arch/powerpc/kvm/timing.c 	vcpu->arch.debugfs_exit_timing = debugfs_file;
vcpu              227 arch/powerpc/kvm/timing.c void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu)
vcpu              229 arch/powerpc/kvm/timing.c 	if (vcpu->arch.debugfs_exit_timing) {
vcpu              230 arch/powerpc/kvm/timing.c 		debugfs_remove(vcpu->arch.debugfs_exit_timing);
vcpu              231 arch/powerpc/kvm/timing.c 		vcpu->arch.debugfs_exit_timing = NULL;
vcpu               16 arch/powerpc/kvm/timing.h void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu);
vcpu               17 arch/powerpc/kvm/timing.h void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu);
vcpu               18 arch/powerpc/kvm/timing.h void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu, unsigned int id);
vcpu               19 arch/powerpc/kvm/timing.h void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu);
vcpu               21 arch/powerpc/kvm/timing.h static inline void kvmppc_set_exit_type(struct kvm_vcpu *vcpu, int type)
vcpu               23 arch/powerpc/kvm/timing.h 	vcpu->arch.last_exit_type = type;
vcpu               28 arch/powerpc/kvm/timing.h static inline void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu) {}
vcpu               29 arch/powerpc/kvm/timing.h static inline void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu) {}
vcpu               30 arch/powerpc/kvm/timing.h static inline void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu,
vcpu               32 arch/powerpc/kvm/timing.h static inline void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu) {}
vcpu               33 arch/powerpc/kvm/timing.h static inline void kvmppc_set_exit_type(struct kvm_vcpu *vcpu, int type) {}
vcpu               37 arch/powerpc/kvm/timing.h static inline void kvmppc_account_exit_stat(struct kvm_vcpu *vcpu, int type)
vcpu               47 arch/powerpc/kvm/timing.h 		vcpu->stat.ext_intr_exits++;
vcpu               50 arch/powerpc/kvm/timing.h 		vcpu->stat.dec_exits++;
vcpu               53 arch/powerpc/kvm/timing.h 		vcpu->stat.emulated_inst_exits++;
vcpu               56 arch/powerpc/kvm/timing.h 		vcpu->stat.dsi_exits++;
vcpu               59 arch/powerpc/kvm/timing.h 		vcpu->stat.isi_exits++;
vcpu               62 arch/powerpc/kvm/timing.h 		vcpu->stat.syscall_exits++;
vcpu               65 arch/powerpc/kvm/timing.h 		vcpu->stat.dtlb_real_miss_exits++;
vcpu               68 arch/powerpc/kvm/timing.h 		vcpu->stat.dtlb_virt_miss_exits++;
vcpu               71 arch/powerpc/kvm/timing.h 		vcpu->stat.mmio_exits++;
vcpu               74 arch/powerpc/kvm/timing.h 		vcpu->stat.itlb_real_miss_exits++;
vcpu               77 arch/powerpc/kvm/timing.h 		vcpu->stat.itlb_virt_miss_exits++;
vcpu               80 arch/powerpc/kvm/timing.h 		vcpu->stat.signal_exits++;
vcpu               83 arch/powerpc/kvm/timing.h 		vcpu->stat.dbell_exits++;
vcpu               86 arch/powerpc/kvm/timing.h 		vcpu->stat.gdbell_exits++;
vcpu               92 arch/powerpc/kvm/timing.h static inline void kvmppc_account_exit(struct kvm_vcpu *vcpu, int type)
vcpu               94 arch/powerpc/kvm/timing.h 	kvmppc_set_exit_type(vcpu, type);
vcpu               95 arch/powerpc/kvm/timing.h 	kvmppc_account_exit_stat(vcpu, type);
vcpu              101 arch/powerpc/kvm/trace.h 	TP_PROTO(struct kvm_vcpu *vcpu),
vcpu              102 arch/powerpc/kvm/trace.h 	TP_ARGS(vcpu),
vcpu              110 arch/powerpc/kvm/trace.h 		__entry->cpu_nr		= vcpu->vcpu_id;
vcpu              111 arch/powerpc/kvm/trace.h 		__entry->requests	= vcpu->requests;
vcpu               39 arch/powerpc/kvm/trace_booke.h 	TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
vcpu               40 arch/powerpc/kvm/trace_booke.h 	TP_ARGS(exit_nr, vcpu),
vcpu               52 arch/powerpc/kvm/trace_booke.h 		__entry->pc		= kvmppc_get_pc(vcpu);
vcpu               53 arch/powerpc/kvm/trace_booke.h 		__entry->dar		= kvmppc_get_fault_dar(vcpu);
vcpu               54 arch/powerpc/kvm/trace_booke.h 		__entry->msr		= vcpu->arch.shared->msr;
vcpu               55 arch/powerpc/kvm/trace_booke.h 		__entry->last_inst	= vcpu->arch.last_inst;
vcpu              195 arch/powerpc/kvm/trace_booke.h 	TP_PROTO(struct kvm_vcpu *vcpu, unsigned int priority),
vcpu              196 arch/powerpc/kvm/trace_booke.h 	TP_ARGS(vcpu, priority),
vcpu              205 arch/powerpc/kvm/trace_booke.h 		__entry->cpu_nr		= vcpu->vcpu_id;
vcpu              207 arch/powerpc/kvm/trace_booke.h 		__entry->pending	= vcpu->arch.pending_exceptions;
vcpu              222 arch/powerpc/kvm/trace_hv.h 	TP_PROTO(struct kvm_vcpu *vcpu),
vcpu              223 arch/powerpc/kvm/trace_hv.h 	TP_ARGS(vcpu),
vcpu              233 arch/powerpc/kvm/trace_hv.h 		__entry->vcpu_id	= vcpu->vcpu_id;
vcpu              234 arch/powerpc/kvm/trace_hv.h 		__entry->pc		= kvmppc_get_pc(vcpu);
vcpu              235 arch/powerpc/kvm/trace_hv.h 		__entry->ceded		= vcpu->arch.ceded;
vcpu              236 arch/powerpc/kvm/trace_hv.h 		__entry->pending_exceptions  = vcpu->arch.pending_exceptions;
vcpu              246 arch/powerpc/kvm/trace_hv.h 	TP_PROTO(struct kvm_vcpu *vcpu),
vcpu              247 arch/powerpc/kvm/trace_hv.h 	TP_ARGS(vcpu),
vcpu              258 arch/powerpc/kvm/trace_hv.h 		__entry->vcpu_id = vcpu->vcpu_id;
vcpu              259 arch/powerpc/kvm/trace_hv.h 		__entry->trap	 = vcpu->arch.trap;
vcpu              260 arch/powerpc/kvm/trace_hv.h 		__entry->ceded	 = vcpu->arch.ceded;
vcpu              261 arch/powerpc/kvm/trace_hv.h 		__entry->pc	 = kvmppc_get_pc(vcpu);
vcpu              262 arch/powerpc/kvm/trace_hv.h 		__entry->msr	 = vcpu->arch.shregs.msr;
vcpu              273 arch/powerpc/kvm/trace_hv.h 	TP_PROTO(struct kvm_vcpu *vcpu, unsigned long *hptep,
vcpu              277 arch/powerpc/kvm/trace_hv.h 	TP_ARGS(vcpu, hptep, memslot, ea, dsisr),
vcpu              291 arch/powerpc/kvm/trace_hv.h 		__entry->vcpu_id  = vcpu->vcpu_id;
vcpu              309 arch/powerpc/kvm/trace_hv.h 	TP_PROTO(struct kvm_vcpu *vcpu, unsigned long *hptep, long ret),
vcpu              311 arch/powerpc/kvm/trace_hv.h 	TP_ARGS(vcpu, hptep, ret),
vcpu              321 arch/powerpc/kvm/trace_hv.h 		__entry->vcpu_id  = vcpu->vcpu_id;
vcpu              333 arch/powerpc/kvm/trace_hv.h 	TP_PROTO(struct kvm_vcpu *vcpu),
vcpu              335 arch/powerpc/kvm/trace_hv.h 	TP_ARGS(vcpu),
vcpu              347 arch/powerpc/kvm/trace_hv.h 		__entry->vcpu_id  = vcpu->vcpu_id;
vcpu              348 arch/powerpc/kvm/trace_hv.h 		__entry->req   = kvmppc_get_gpr(vcpu, 3);
vcpu              349 arch/powerpc/kvm/trace_hv.h 		__entry->gpr4  = kvmppc_get_gpr(vcpu, 4);
vcpu              350 arch/powerpc/kvm/trace_hv.h 		__entry->gpr5  = kvmppc_get_gpr(vcpu, 5);
vcpu              351 arch/powerpc/kvm/trace_hv.h 		__entry->gpr6  = kvmppc_get_gpr(vcpu, 6);
vcpu              352 arch/powerpc/kvm/trace_hv.h 		__entry->gpr7  = kvmppc_get_gpr(vcpu, 7);
vcpu              362 arch/powerpc/kvm/trace_hv.h 	TP_PROTO(struct kvm_vcpu *vcpu, int ret),
vcpu              364 arch/powerpc/kvm/trace_hv.h 	TP_ARGS(vcpu, ret),
vcpu              373 arch/powerpc/kvm/trace_hv.h 		__entry->vcpu_id  = vcpu->vcpu_id;
vcpu              375 arch/powerpc/kvm/trace_hv.h 		__entry->hcall_rc = kvmppc_get_gpr(vcpu, 3);
vcpu              457 arch/powerpc/kvm/trace_hv.h 	TP_PROTO(struct kvm_vcpu *vcpu),
vcpu              459 arch/powerpc/kvm/trace_hv.h 	TP_ARGS(vcpu),
vcpu              467 arch/powerpc/kvm/trace_hv.h 		__entry->vcpu_id  = vcpu->vcpu_id;
vcpu              475 arch/powerpc/kvm/trace_hv.h 	TP_PROTO(struct kvm_vcpu *vcpu, struct kvm_run *run),
vcpu              477 arch/powerpc/kvm/trace_hv.h 	TP_ARGS(vcpu, run),
vcpu              486 arch/powerpc/kvm/trace_hv.h 		__entry->vcpu_id  = vcpu->vcpu_id;
vcpu              488 arch/powerpc/kvm/trace_hv.h 		__entry->ret      = vcpu->arch.ret;
vcpu               13 arch/powerpc/kvm/trace_pr.h 	TP_PROTO(int r, struct kvm_vcpu *vcpu),
vcpu               14 arch/powerpc/kvm/trace_pr.h 	TP_ARGS(r, vcpu),
vcpu               23 arch/powerpc/kvm/trace_pr.h 		__entry->pc		= kvmppc_get_pc(vcpu);
vcpu              122 arch/powerpc/kvm/trace_pr.h 	TP_PROTO(const char *type, struct kvm_vcpu *vcpu, unsigned long long p1,
vcpu              124 arch/powerpc/kvm/trace_pr.h 	TP_ARGS(type, vcpu, p1, p2),
vcpu              134 arch/powerpc/kvm/trace_pr.h 		__entry->count		= to_book3s(vcpu)->hpte_cache_count;
vcpu              218 arch/powerpc/kvm/trace_pr.h 	TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
vcpu              219 arch/powerpc/kvm/trace_pr.h 	TP_ARGS(exit_nr, vcpu),
vcpu              232 arch/powerpc/kvm/trace_pr.h 		__entry->pc		= kvmppc_get_pc(vcpu);
vcpu              233 arch/powerpc/kvm/trace_pr.h 		__entry->dar		= kvmppc_get_fault_dar(vcpu);
vcpu              234 arch/powerpc/kvm/trace_pr.h 		__entry->msr		= kvmppc_get_msr(vcpu);
vcpu              235 arch/powerpc/kvm/trace_pr.h 		__entry->srr1		= vcpu->arch.shadow_srr1;
vcpu              236 arch/powerpc/kvm/trace_pr.h 		__entry->last_inst	= vcpu->arch.last_inst;
vcpu               26 arch/powerpc/perf/hv-24x7-domains.h DOMAIN(VCPU_HOME_CORE, 0x03, vcpu, false)
vcpu               27 arch/powerpc/perf/hv-24x7-domains.h DOMAIN(VCPU_HOME_CHIP, 0x04, vcpu, false)
vcpu               28 arch/powerpc/perf/hv-24x7-domains.h DOMAIN(VCPU_HOME_NODE, 0x05, vcpu, false)
vcpu               29 arch/powerpc/perf/hv-24x7-domains.h DOMAIN(VCPU_REMOTE_NODE, 0x06, vcpu, false)
vcpu              125 arch/powerpc/perf/hv-24x7.c EVENT_DEFINE_RANGE_FORMAT(vcpu, config, 16, 31);
vcpu              621 arch/s390/include/asm/kvm_host.h #define guestdbg_enabled(vcpu) \
vcpu              622 arch/s390/include/asm/kvm_host.h 		(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)
vcpu              623 arch/s390/include/asm/kvm_host.h #define guestdbg_sstep_enabled(vcpu) \
vcpu              624 arch/s390/include/asm/kvm_host.h 		(vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
vcpu              625 arch/s390/include/asm/kvm_host.h #define guestdbg_hw_bp_enabled(vcpu) \
vcpu              626 arch/s390/include/asm/kvm_host.h 		(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
vcpu              627 arch/s390/include/asm/kvm_host.h #define guestdbg_exit_pending(vcpu) (guestdbg_enabled(vcpu) && \
vcpu              628 arch/s390/include/asm/kvm_host.h 		(vcpu->guest_debug & KVM_GUESTDBG_EXIT_PENDING))
vcpu              725 arch/s390/include/asm/kvm_host.h 	int (*hook)(struct kvm_vcpu *vcpu);
vcpu              893 arch/s390/include/asm/kvm_host.h bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
vcpu              895 arch/s390/include/asm/kvm_host.h void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
vcpu              898 arch/s390/include/asm/kvm_host.h void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
vcpu              901 arch/s390/include/asm/kvm_host.h void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
vcpu              916 arch/s390/include/asm/kvm_host.h static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
vcpu              917 arch/s390/include/asm/kvm_host.h static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
vcpu              924 arch/s390/include/asm/kvm_host.h static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
vcpu              925 arch/s390/include/asm/kvm_host.h static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
vcpu              927 arch/s390/include/asm/kvm_host.h void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu);
vcpu               21 arch/s390/kvm/diag.c static int diag_release_pages(struct kvm_vcpu *vcpu)
vcpu               24 arch/s390/kvm/diag.c 	unsigned long prefix  = kvm_s390_get_prefix(vcpu);
vcpu               26 arch/s390/kvm/diag.c 	start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
vcpu               27 arch/s390/kvm/diag.c 	end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + PAGE_SIZE;
vcpu               28 arch/s390/kvm/diag.c 	vcpu->stat.diagnose_10++;
vcpu               32 arch/s390/kvm/diag.c 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
vcpu               34 arch/s390/kvm/diag.c 	VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end);
vcpu               41 arch/s390/kvm/diag.c 		gmap_discard(vcpu->arch.gmap, start, end);
vcpu               49 arch/s390/kvm/diag.c 		gmap_discard(vcpu->arch.gmap, start, prefix);
vcpu               51 arch/s390/kvm/diag.c 			gmap_discard(vcpu->arch.gmap, 0, PAGE_SIZE);
vcpu               53 arch/s390/kvm/diag.c 			gmap_discard(vcpu->arch.gmap, PAGE_SIZE, 2 * PAGE_SIZE);
vcpu               54 arch/s390/kvm/diag.c 		gmap_discard(vcpu->arch.gmap, prefix + 2 * PAGE_SIZE, end);
vcpu               59 arch/s390/kvm/diag.c static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
vcpu               73 arch/s390/kvm/diag.c 	u16 rx = (vcpu->arch.sie_block->ipa & 0xf0) >> 4;
vcpu               74 arch/s390/kvm/diag.c 	u16 ry = (vcpu->arch.sie_block->ipa & 0x0f);
vcpu               76 arch/s390/kvm/diag.c 	VCPU_EVENT(vcpu, 3, "diag page reference parameter block at 0x%llx",
vcpu               77 arch/s390/kvm/diag.c 		   vcpu->run->s.regs.gprs[rx]);
vcpu               78 arch/s390/kvm/diag.c 	vcpu->stat.diagnose_258++;
vcpu               79 arch/s390/kvm/diag.c 	if (vcpu->run->s.regs.gprs[rx] & 7)
vcpu               80 arch/s390/kvm/diag.c 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
vcpu               81 arch/s390/kvm/diag.c 	rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm));
vcpu               83 arch/s390/kvm/diag.c 		return kvm_s390_inject_prog_cond(vcpu, rc);
vcpu               85 arch/s390/kvm/diag.c 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
vcpu               89 arch/s390/kvm/diag.c 		VCPU_EVENT(vcpu, 3, "pageref token addr 0x%llx "
vcpu               92 arch/s390/kvm/diag.c 		if (vcpu->arch.pfault_token != KVM_S390_PFAULT_TOKEN_INVALID) {
vcpu               98 arch/s390/kvm/diag.c 			vcpu->run->s.regs.gprs[ry] = 8;
vcpu              104 arch/s390/kvm/diag.c 			return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
vcpu              106 arch/s390/kvm/diag.c 		if (kvm_is_error_gpa(vcpu->kvm, parm.token_addr))
vcpu              107 arch/s390/kvm/diag.c 			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
vcpu              109 arch/s390/kvm/diag.c 		vcpu->arch.pfault_token = parm.token_addr;
vcpu              110 arch/s390/kvm/diag.c 		vcpu->arch.pfault_select = parm.select_mask;
vcpu              111 arch/s390/kvm/diag.c 		vcpu->arch.pfault_compare = parm.compare_mask;
vcpu              112 arch/s390/kvm/diag.c 		vcpu->run->s.regs.gprs[ry] = 0;
vcpu              121 arch/s390/kvm/diag.c 		VCPU_EVENT(vcpu, 3, "pageref cancel addr 0x%llx", parm.token_addr);
vcpu              124 arch/s390/kvm/diag.c 			return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
vcpu              126 arch/s390/kvm/diag.c 		vcpu->run->s.regs.gprs[ry] = 0;
vcpu              131 arch/s390/kvm/diag.c 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
vcpu              132 arch/s390/kvm/diag.c 			vcpu->run->s.regs.gprs[ry] = 4;
vcpu              134 arch/s390/kvm/diag.c 			vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
vcpu              146 arch/s390/kvm/diag.c static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
vcpu              148 arch/s390/kvm/diag.c 	VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
vcpu              149 arch/s390/kvm/diag.c 	vcpu->stat.diagnose_44++;
vcpu              150 arch/s390/kvm/diag.c 	kvm_vcpu_on_spin(vcpu, true);
vcpu              154 arch/s390/kvm/diag.c static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
vcpu              159 arch/s390/kvm/diag.c 	tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
vcpu              160 arch/s390/kvm/diag.c 	vcpu->stat.diagnose_9c++;
vcpu              161 arch/s390/kvm/diag.c 	VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d", tid);
vcpu              163 arch/s390/kvm/diag.c 	if (tid == vcpu->vcpu_id)
vcpu              166 arch/s390/kvm/diag.c 	tcpu = kvm_get_vcpu_by_id(vcpu->kvm, tid);
vcpu              172 arch/s390/kvm/diag.c static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
vcpu              174 arch/s390/kvm/diag.c 	unsigned int reg = vcpu->arch.sie_block->ipa & 0xf;
vcpu              175 arch/s390/kvm/diag.c 	unsigned long subcode = vcpu->run->s.regs.gprs[reg] & 0xffff;
vcpu              177 arch/s390/kvm/diag.c 	VCPU_EVENT(vcpu, 3, "diag ipl functions, subcode %lx", subcode);
vcpu              178 arch/s390/kvm/diag.c 	vcpu->stat.diagnose_308++;
vcpu              181 arch/s390/kvm/diag.c 		vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR;
vcpu              184 arch/s390/kvm/diag.c 		vcpu->run->s390_reset_flags = 0;
vcpu              190 arch/s390/kvm/diag.c 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
vcpu              191 arch/s390/kvm/diag.c 		kvm_s390_vcpu_stop(vcpu);
vcpu              192 arch/s390/kvm/diag.c 	vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM;
vcpu              193 arch/s390/kvm/diag.c 	vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL;
vcpu              194 arch/s390/kvm/diag.c 	vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT;
vcpu              195 arch/s390/kvm/diag.c 	vcpu->run->exit_reason = KVM_EXIT_S390_RESET;
vcpu              196 arch/s390/kvm/diag.c 	VCPU_EVENT(vcpu, 3, "requesting userspace resets %llx",
vcpu              197 arch/s390/kvm/diag.c 	  vcpu->run->s390_reset_flags);
vcpu              198 arch/s390/kvm/diag.c 	trace_kvm_s390_request_resets(vcpu->run->s390_reset_flags);
vcpu              202 arch/s390/kvm/diag.c static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
vcpu              206 arch/s390/kvm/diag.c 	vcpu->stat.diagnose_500++;
vcpu              208 arch/s390/kvm/diag.c 	if (!vcpu->kvm->arch.css_support ||
vcpu              209 arch/s390/kvm/diag.c 	    (vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY))
vcpu              212 arch/s390/kvm/diag.c 	VCPU_EVENT(vcpu, 4, "diag 0x500 schid 0x%8.8x queue 0x%x cookie 0x%llx",
vcpu              213 arch/s390/kvm/diag.c 			    (u32) vcpu->run->s.regs.gprs[2],
vcpu              214 arch/s390/kvm/diag.c 			    (u32) vcpu->run->s.regs.gprs[3],
vcpu              215 arch/s390/kvm/diag.c 			    vcpu->run->s.regs.gprs[4]);
vcpu              223 arch/s390/kvm/diag.c 	ret = kvm_io_bus_write_cookie(vcpu, KVM_VIRTIO_CCW_NOTIFY_BUS,
vcpu              224 arch/s390/kvm/diag.c 				      vcpu->run->s.regs.gprs[2] & 0xffffffff,
vcpu              225 arch/s390/kvm/diag.c 				      8, &vcpu->run->s.regs.gprs[3],
vcpu              226 arch/s390/kvm/diag.c 				      vcpu->run->s.regs.gprs[4]);
vcpu              233 arch/s390/kvm/diag.c 		vcpu->run->s.regs.gprs[2] = ret;
vcpu              238 arch/s390/kvm/diag.c int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
vcpu              240 arch/s390/kvm/diag.c 	int code = kvm_s390_get_base_disp_rs(vcpu, NULL) & 0xffff;
vcpu              242 arch/s390/kvm/diag.c 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
vcpu              243 arch/s390/kvm/diag.c 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
vcpu              245 arch/s390/kvm/diag.c 	trace_kvm_s390_handle_diag(vcpu, code);
vcpu              248 arch/s390/kvm/diag.c 		return diag_release_pages(vcpu);
vcpu              250 arch/s390/kvm/diag.c 		return __diag_time_slice_end(vcpu);
vcpu              252 arch/s390/kvm/diag.c 		return __diag_time_slice_end_directed(vcpu);
vcpu              254 arch/s390/kvm/diag.c 		return __diag_page_ref_service(vcpu);
vcpu              256 arch/s390/kvm/diag.c 		return __diag_ipl_functions(vcpu);
vcpu              258 arch/s390/kvm/diag.c 		return __diag_virtio_hypercall(vcpu);
vcpu              260 arch/s390/kvm/diag.c 		vcpu->stat.diagnose_other++;
vcpu              264 arch/s390/kvm/gaccess.c int ipte_lock_held(struct kvm_vcpu *vcpu)
vcpu              266 arch/s390/kvm/gaccess.c 	if (vcpu->arch.sie_block->eca & ECA_SII) {
vcpu              269 arch/s390/kvm/gaccess.c 		read_lock(&vcpu->kvm->arch.sca_lock);
vcpu              270 arch/s390/kvm/gaccess.c 		rc = kvm_s390_get_ipte_control(vcpu->kvm)->kh != 0;
vcpu              271 arch/s390/kvm/gaccess.c 		read_unlock(&vcpu->kvm->arch.sca_lock);
vcpu              274 arch/s390/kvm/gaccess.c 	return vcpu->kvm->arch.ipte_lock_count != 0;
vcpu              277 arch/s390/kvm/gaccess.c static void ipte_lock_simple(struct kvm_vcpu *vcpu)
vcpu              281 arch/s390/kvm/gaccess.c 	mutex_lock(&vcpu->kvm->arch.ipte_mutex);
vcpu              282 arch/s390/kvm/gaccess.c 	vcpu->kvm->arch.ipte_lock_count++;
vcpu              283 arch/s390/kvm/gaccess.c 	if (vcpu->kvm->arch.ipte_lock_count > 1)
vcpu              286 arch/s390/kvm/gaccess.c 	read_lock(&vcpu->kvm->arch.sca_lock);
vcpu              287 arch/s390/kvm/gaccess.c 	ic = kvm_s390_get_ipte_control(vcpu->kvm);
vcpu              291 arch/s390/kvm/gaccess.c 			read_unlock(&vcpu->kvm->arch.sca_lock);
vcpu              298 arch/s390/kvm/gaccess.c 	read_unlock(&vcpu->kvm->arch.sca_lock);
vcpu              300 arch/s390/kvm/gaccess.c 	mutex_unlock(&vcpu->kvm->arch.ipte_mutex);
vcpu              303 arch/s390/kvm/gaccess.c static void ipte_unlock_simple(struct kvm_vcpu *vcpu)
vcpu              307 arch/s390/kvm/gaccess.c 	mutex_lock(&vcpu->kvm->arch.ipte_mutex);
vcpu              308 arch/s390/kvm/gaccess.c 	vcpu->kvm->arch.ipte_lock_count--;
vcpu              309 arch/s390/kvm/gaccess.c 	if (vcpu->kvm->arch.ipte_lock_count)
vcpu              311 arch/s390/kvm/gaccess.c 	read_lock(&vcpu->kvm->arch.sca_lock);
vcpu              312 arch/s390/kvm/gaccess.c 	ic = kvm_s390_get_ipte_control(vcpu->kvm);
vcpu              318 arch/s390/kvm/gaccess.c 	read_unlock(&vcpu->kvm->arch.sca_lock);
vcpu              319 arch/s390/kvm/gaccess.c 	wake_up(&vcpu->kvm->arch.ipte_wq);
vcpu              321 arch/s390/kvm/gaccess.c 	mutex_unlock(&vcpu->kvm->arch.ipte_mutex);
vcpu              324 arch/s390/kvm/gaccess.c static void ipte_lock_siif(struct kvm_vcpu *vcpu)
vcpu              329 arch/s390/kvm/gaccess.c 	read_lock(&vcpu->kvm->arch.sca_lock);
vcpu              330 arch/s390/kvm/gaccess.c 	ic = kvm_s390_get_ipte_control(vcpu->kvm);
vcpu              334 arch/s390/kvm/gaccess.c 			read_unlock(&vcpu->kvm->arch.sca_lock);
vcpu              342 arch/s390/kvm/gaccess.c 	read_unlock(&vcpu->kvm->arch.sca_lock);
vcpu              345 arch/s390/kvm/gaccess.c static void ipte_unlock_siif(struct kvm_vcpu *vcpu)
vcpu              349 arch/s390/kvm/gaccess.c 	read_lock(&vcpu->kvm->arch.sca_lock);
vcpu              350 arch/s390/kvm/gaccess.c 	ic = kvm_s390_get_ipte_control(vcpu->kvm);
vcpu              358 arch/s390/kvm/gaccess.c 	read_unlock(&vcpu->kvm->arch.sca_lock);
vcpu              360 arch/s390/kvm/gaccess.c 		wake_up(&vcpu->kvm->arch.ipte_wq);
vcpu              363 arch/s390/kvm/gaccess.c void ipte_lock(struct kvm_vcpu *vcpu)
vcpu              365 arch/s390/kvm/gaccess.c 	if (vcpu->arch.sie_block->eca & ECA_SII)
vcpu              366 arch/s390/kvm/gaccess.c 		ipte_lock_siif(vcpu);
vcpu              368 arch/s390/kvm/gaccess.c 		ipte_lock_simple(vcpu);
vcpu              371 arch/s390/kvm/gaccess.c void ipte_unlock(struct kvm_vcpu *vcpu)
vcpu              373 arch/s390/kvm/gaccess.c 	if (vcpu->arch.sie_block->eca & ECA_SII)
vcpu              374 arch/s390/kvm/gaccess.c 		ipte_unlock_siif(vcpu);
vcpu              376 arch/s390/kvm/gaccess.c 		ipte_unlock_simple(vcpu);
vcpu              379 arch/s390/kvm/gaccess.c static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, u8 ar,
vcpu              393 arch/s390/kvm/gaccess.c 	save_access_regs(vcpu->run->s.regs.acrs);
vcpu              394 arch/s390/kvm/gaccess.c 	alet.val = vcpu->run->s.regs.acrs[ar];
vcpu              397 arch/s390/kvm/gaccess.c 		asce->val = vcpu->arch.sie_block->gcr[1];
vcpu              400 arch/s390/kvm/gaccess.c 		asce->val = vcpu->arch.sie_block->gcr[7];
vcpu              408 arch/s390/kvm/gaccess.c 		ald_addr = vcpu->arch.sie_block->gcr[5];
vcpu              410 arch/s390/kvm/gaccess.c 		ald_addr = vcpu->arch.sie_block->gcr[2];
vcpu              413 arch/s390/kvm/gaccess.c 	rc = read_guest_real(vcpu, ald_addr + 16, &ald.val, sizeof(union ald));
vcpu              423 arch/s390/kvm/gaccess.c 	rc = read_guest_real(vcpu, ald.alo * 128 + alet.alen * 16, &ale,
vcpu              433 arch/s390/kvm/gaccess.c 	rc = read_guest_real(vcpu, ale.asteo * 64, &aste, sizeof(struct aste));
vcpu              443 arch/s390/kvm/gaccess.c 		eax = (vcpu->arch.sie_block->gcr[8] >> 16) & 0xffff;
vcpu              450 arch/s390/kvm/gaccess.c 			rc = read_guest_real(vcpu, authority_table_addr,
vcpu              493 arch/s390/kvm/gaccess.c static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
vcpu              496 arch/s390/kvm/gaccess.c 	struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
vcpu              536 arch/s390/kvm/gaccess.c 		tec->as = psw_bits(vcpu->arch.sie_block->gpsw).as;
vcpu              554 arch/s390/kvm/gaccess.c static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce,
vcpu              558 arch/s390/kvm/gaccess.c 	struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw);
vcpu              571 arch/s390/kvm/gaccess.c 		asce->val = vcpu->arch.sie_block->gcr[1];
vcpu              574 arch/s390/kvm/gaccess.c 		asce->val = vcpu->arch.sie_block->gcr[7];
vcpu              577 arch/s390/kvm/gaccess.c 		asce->val = vcpu->arch.sie_block->gcr[13];
vcpu              580 arch/s390/kvm/gaccess.c 		rc = ar_translation(vcpu, asce, ar, mode);
vcpu              582 arch/s390/kvm/gaccess.c 			return trans_exc(vcpu, rc, ga, ar, mode, PROT_TYPE_ALC);
vcpu              614 arch/s390/kvm/gaccess.c static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
vcpu              627 arch/s390/kvm/gaccess.c 	ctlreg0.val = vcpu->arch.sie_block->gcr[0];
vcpu              628 arch/s390/kvm/gaccess.c 	edat1 = ctlreg0.edat && test_kvm_facility(vcpu->kvm, 8);
vcpu              629 arch/s390/kvm/gaccess.c 	edat2 = edat1 && test_kvm_facility(vcpu->kvm, 78);
vcpu              630 arch/s390/kvm/gaccess.c 	iep = ctlreg0.iep && test_kvm_facility(vcpu->kvm, 130);
vcpu              666 arch/s390/kvm/gaccess.c 		if (kvm_is_error_gpa(vcpu->kvm, ptr))
vcpu              668 arch/s390/kvm/gaccess.c 		if (deref_table(vcpu->kvm, ptr, &rfte.val))
vcpu              684 arch/s390/kvm/gaccess.c 		if (kvm_is_error_gpa(vcpu->kvm, ptr))
vcpu              686 arch/s390/kvm/gaccess.c 		if (deref_table(vcpu->kvm, ptr, &rste.val))
vcpu              702 arch/s390/kvm/gaccess.c 		if (kvm_is_error_gpa(vcpu->kvm, ptr))
vcpu              704 arch/s390/kvm/gaccess.c 		if (deref_table(vcpu->kvm, ptr, &rtte.val))
vcpu              730 arch/s390/kvm/gaccess.c 		if (kvm_is_error_gpa(vcpu->kvm, ptr))
vcpu              732 arch/s390/kvm/gaccess.c 		if (deref_table(vcpu->kvm, ptr, &ste.val))
vcpu              750 arch/s390/kvm/gaccess.c 	if (kvm_is_error_gpa(vcpu->kvm, ptr))
vcpu              752 arch/s390/kvm/gaccess.c 	if (deref_table(vcpu->kvm, ptr, &pte.val))
vcpu              762 arch/s390/kvm/gaccess.c 	raddr.addr = kvm_s390_real_to_abs(vcpu, raddr.addr);
vcpu              772 arch/s390/kvm/gaccess.c 	if (kvm_is_error_gpa(vcpu->kvm, raddr.addr))
vcpu              784 arch/s390/kvm/gaccess.c static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
vcpu              787 arch/s390/kvm/gaccess.c 	union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
vcpu              788 arch/s390/kvm/gaccess.c 	psw_t *psw = &vcpu->arch.sie_block->gpsw;
vcpu              797 arch/s390/kvm/gaccess.c static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
vcpu              801 arch/s390/kvm/gaccess.c 	psw_t *psw = &vcpu->arch.sie_block->gpsw;
vcpu              805 arch/s390/kvm/gaccess.c 	lap_enabled = low_address_protection_enabled(vcpu, asce);
vcpu              807 arch/s390/kvm/gaccess.c 		ga = kvm_s390_logical_to_effective(vcpu, ga);
vcpu              809 arch/s390/kvm/gaccess.c 			return trans_exc(vcpu, PGM_PROTECTION, ga, ar, mode,
vcpu              813 arch/s390/kvm/gaccess.c 			rc = guest_translate(vcpu, ga, pages, asce, mode, &prot);
vcpu              817 arch/s390/kvm/gaccess.c 			*pages = kvm_s390_real_to_abs(vcpu, ga);
vcpu              818 arch/s390/kvm/gaccess.c 			if (kvm_is_error_gpa(vcpu->kvm, *pages))
vcpu              822 arch/s390/kvm/gaccess.c 			return trans_exc(vcpu, rc, ga, ar, mode, prot);
vcpu              830 arch/s390/kvm/gaccess.c int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
vcpu              833 arch/s390/kvm/gaccess.c 	psw_t *psw = &vcpu->arch.sie_block->gpsw;
vcpu              843 arch/s390/kvm/gaccess.c 	ga = kvm_s390_logical_to_effective(vcpu, ga);
vcpu              844 arch/s390/kvm/gaccess.c 	rc = get_vcpu_asce(vcpu, &asce, ga, ar, mode);
vcpu              855 arch/s390/kvm/gaccess.c 		ipte_lock(vcpu);
vcpu              856 arch/s390/kvm/gaccess.c 	rc = guest_page_range(vcpu, ga, ar, pages, nr_pages, asce, mode);
vcpu              861 arch/s390/kvm/gaccess.c 			rc = kvm_write_guest(vcpu->kvm, gpa, data, _len);
vcpu              863 arch/s390/kvm/gaccess.c 			rc = kvm_read_guest(vcpu->kvm, gpa, data, _len);
vcpu              869 arch/s390/kvm/gaccess.c 		ipte_unlock(vcpu);
vcpu              875 arch/s390/kvm/gaccess.c int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
vcpu              882 arch/s390/kvm/gaccess.c 		gpa = kvm_s390_real_to_abs(vcpu, gra);
vcpu              885 arch/s390/kvm/gaccess.c 			rc = write_guest_abs(vcpu, gpa, data, _len);
vcpu              887 arch/s390/kvm/gaccess.c 			rc = read_guest_abs(vcpu, gpa, data, _len);
vcpu              904 arch/s390/kvm/gaccess.c int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
vcpu              907 arch/s390/kvm/gaccess.c 	psw_t *psw = &vcpu->arch.sie_block->gpsw;
vcpu              912 arch/s390/kvm/gaccess.c 	gva = kvm_s390_logical_to_effective(vcpu, gva);
vcpu              913 arch/s390/kvm/gaccess.c 	rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode);
vcpu              916 arch/s390/kvm/gaccess.c 	if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) {
vcpu              918 arch/s390/kvm/gaccess.c 			return trans_exc(vcpu, PGM_PROTECTION, gva, 0,
vcpu              923 arch/s390/kvm/gaccess.c 		rc = guest_translate(vcpu, gva, gpa, asce, mode, &prot);
vcpu              925 arch/s390/kvm/gaccess.c 			return trans_exc(vcpu, rc, gva, 0, mode, prot);
vcpu              927 arch/s390/kvm/gaccess.c 		*gpa = kvm_s390_real_to_abs(vcpu, gva);
vcpu              928 arch/s390/kvm/gaccess.c 		if (kvm_is_error_gpa(vcpu->kvm, *gpa))
vcpu              929 arch/s390/kvm/gaccess.c 			return trans_exc(vcpu, rc, gva, PGM_ADDRESSING, mode, 0);
vcpu              938 arch/s390/kvm/gaccess.c int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
vcpu              945 arch/s390/kvm/gaccess.c 	ipte_lock(vcpu);
vcpu              948 arch/s390/kvm/gaccess.c 		rc = guest_translate_address(vcpu, gva, ar, &gpa, mode);
vcpu              952 arch/s390/kvm/gaccess.c 	ipte_unlock(vcpu);
vcpu              966 arch/s390/kvm/gaccess.c int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
vcpu              968 arch/s390/kvm/gaccess.c 	union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]};
vcpu              972 arch/s390/kvm/gaccess.c 	return trans_exc(vcpu, PGM_PROTECTION, gra, 0, GACC_STORE, PROT_TYPE_LA);
vcpu             1164 arch/s390/kvm/gaccess.c int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
vcpu             1179 arch/s390/kvm/gaccess.c 	ipte_lock(vcpu);
vcpu             1201 arch/s390/kvm/gaccess.c 	ipte_unlock(vcpu);
vcpu               27 arch/s390/kvm/gaccess.h static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
vcpu               30 arch/s390/kvm/gaccess.h 	unsigned long prefix  = kvm_s390_get_prefix(vcpu);
vcpu               52 arch/s390/kvm/gaccess.h static inline unsigned long kvm_s390_logical_to_effective(struct kvm_vcpu *vcpu,
vcpu               55 arch/s390/kvm/gaccess.h 	psw_t *psw = &vcpu->arch.sie_block->gpsw;
vcpu               92 arch/s390/kvm/gaccess.h #define put_guest_lc(vcpu, x, gra)				\
vcpu               94 arch/s390/kvm/gaccess.h 	struct kvm_vcpu *__vcpu = (vcpu);			\
vcpu              121 arch/s390/kvm/gaccess.h int write_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
vcpu              124 arch/s390/kvm/gaccess.h 	unsigned long gpa = gra + kvm_s390_get_prefix(vcpu);
vcpu              126 arch/s390/kvm/gaccess.h 	return kvm_write_guest(vcpu->kvm, gpa, data, len);
vcpu              147 arch/s390/kvm/gaccess.h int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
vcpu              150 arch/s390/kvm/gaccess.h 	unsigned long gpa = gra + kvm_s390_get_prefix(vcpu);
vcpu              152 arch/s390/kvm/gaccess.h 	return kvm_read_guest(vcpu->kvm, gpa, data, len);
vcpu              161 arch/s390/kvm/gaccess.h int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
vcpu              163 arch/s390/kvm/gaccess.h int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
vcpu              166 arch/s390/kvm/gaccess.h int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
vcpu              169 arch/s390/kvm/gaccess.h int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
vcpu              218 arch/s390/kvm/gaccess.h int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
vcpu              221 arch/s390/kvm/gaccess.h 	return access_guest(vcpu, ga, ar, data, len, GACC_STORE);
vcpu              238 arch/s390/kvm/gaccess.h int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
vcpu              241 arch/s390/kvm/gaccess.h 	return access_guest(vcpu, ga, ar, data, len, GACC_FETCH);
vcpu              259 arch/s390/kvm/gaccess.h int read_guest_instr(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
vcpu              262 arch/s390/kvm/gaccess.h 	return access_guest(vcpu, ga, 0, data, len, GACC_IFETCH);
vcpu              282 arch/s390/kvm/gaccess.h int write_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data,
vcpu              285 arch/s390/kvm/gaccess.h 	return kvm_write_guest(vcpu->kvm, gpa, data, len);
vcpu              305 arch/s390/kvm/gaccess.h int read_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data,
vcpu              308 arch/s390/kvm/gaccess.h 	return kvm_read_guest(vcpu->kvm, gpa, data, len);
vcpu              328 arch/s390/kvm/gaccess.h int write_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
vcpu              331 arch/s390/kvm/gaccess.h 	return access_guest_real(vcpu, gra, data, len, 1);
vcpu              351 arch/s390/kvm/gaccess.h int read_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
vcpu              354 arch/s390/kvm/gaccess.h 	return access_guest_real(vcpu, gra, data, len, 0);
vcpu              357 arch/s390/kvm/gaccess.h void ipte_lock(struct kvm_vcpu *vcpu);
vcpu              358 arch/s390/kvm/gaccess.h void ipte_unlock(struct kvm_vcpu *vcpu);
vcpu              359 arch/s390/kvm/gaccess.h int ipte_lock_held(struct kvm_vcpu *vcpu);
vcpu              360 arch/s390/kvm/gaccess.h int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra);
vcpu              362 arch/s390/kvm/gaccess.h int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *shadow,
vcpu               59 arch/s390/kvm/guestdbg.c static void enable_all_hw_bp(struct kvm_vcpu *vcpu)
vcpu               62 arch/s390/kvm/guestdbg.c 	u64 *cr9 = &vcpu->arch.sie_block->gcr[9];
vcpu               63 arch/s390/kvm/guestdbg.c 	u64 *cr10 = &vcpu->arch.sie_block->gcr[10];
vcpu               64 arch/s390/kvm/guestdbg.c 	u64 *cr11 = &vcpu->arch.sie_block->gcr[11];
vcpu               67 arch/s390/kvm/guestdbg.c 	if (vcpu->arch.guestdbg.nr_hw_bp <= 0 ||
vcpu               68 arch/s390/kvm/guestdbg.c 	    vcpu->arch.guestdbg.hw_bp_info == NULL)
vcpu               79 arch/s390/kvm/guestdbg.c 	for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) {
vcpu               80 arch/s390/kvm/guestdbg.c 		start = vcpu->arch.guestdbg.hw_bp_info[i].addr;
vcpu               81 arch/s390/kvm/guestdbg.c 		len = vcpu->arch.guestdbg.hw_bp_info[i].len;
vcpu               99 arch/s390/kvm/guestdbg.c static void enable_all_hw_wp(struct kvm_vcpu *vcpu)
vcpu              102 arch/s390/kvm/guestdbg.c 	u64 *cr9 = &vcpu->arch.sie_block->gcr[9];
vcpu              103 arch/s390/kvm/guestdbg.c 	u64 *cr10 = &vcpu->arch.sie_block->gcr[10];
vcpu              104 arch/s390/kvm/guestdbg.c 	u64 *cr11 = &vcpu->arch.sie_block->gcr[11];
vcpu              107 arch/s390/kvm/guestdbg.c 	if (vcpu->arch.guestdbg.nr_hw_wp <= 0 ||
vcpu              108 arch/s390/kvm/guestdbg.c 	    vcpu->arch.guestdbg.hw_wp_info == NULL)
vcpu              121 arch/s390/kvm/guestdbg.c 		for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) {
vcpu              122 arch/s390/kvm/guestdbg.c 			start = vcpu->arch.guestdbg.hw_wp_info[i].addr;
vcpu              123 arch/s390/kvm/guestdbg.c 			len = vcpu->arch.guestdbg.hw_wp_info[i].len;
vcpu              130 arch/s390/kvm/guestdbg.c void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu)
vcpu              132 arch/s390/kvm/guestdbg.c 	vcpu->arch.guestdbg.cr0 = vcpu->arch.sie_block->gcr[0];
vcpu              133 arch/s390/kvm/guestdbg.c 	vcpu->arch.guestdbg.cr9 = vcpu->arch.sie_block->gcr[9];
vcpu              134 arch/s390/kvm/guestdbg.c 	vcpu->arch.guestdbg.cr10 = vcpu->arch.sie_block->gcr[10];
vcpu              135 arch/s390/kvm/guestdbg.c 	vcpu->arch.guestdbg.cr11 = vcpu->arch.sie_block->gcr[11];
vcpu              138 arch/s390/kvm/guestdbg.c void kvm_s390_restore_guest_per_regs(struct kvm_vcpu *vcpu)
vcpu              140 arch/s390/kvm/guestdbg.c 	vcpu->arch.sie_block->gcr[0] = vcpu->arch.guestdbg.cr0;
vcpu              141 arch/s390/kvm/guestdbg.c 	vcpu->arch.sie_block->gcr[9] = vcpu->arch.guestdbg.cr9;
vcpu              142 arch/s390/kvm/guestdbg.c 	vcpu->arch.sie_block->gcr[10] = vcpu->arch.guestdbg.cr10;
vcpu              143 arch/s390/kvm/guestdbg.c 	vcpu->arch.sie_block->gcr[11] = vcpu->arch.guestdbg.cr11;
vcpu              146 arch/s390/kvm/guestdbg.c void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu)
vcpu              154 arch/s390/kvm/guestdbg.c 	if (guestdbg_sstep_enabled(vcpu)) {
vcpu              156 arch/s390/kvm/guestdbg.c 		vcpu->arch.sie_block->gcr[0] &= ~CR0_CLOCK_COMPARATOR_SUBMASK;
vcpu              157 arch/s390/kvm/guestdbg.c 		vcpu->arch.sie_block->gcr[9] |= PER_EVENT_IFETCH;
vcpu              158 arch/s390/kvm/guestdbg.c 		vcpu->arch.sie_block->gcr[10] = 0;
vcpu              159 arch/s390/kvm/guestdbg.c 		vcpu->arch.sie_block->gcr[11] = -1UL;
vcpu              162 arch/s390/kvm/guestdbg.c 	if (guestdbg_hw_bp_enabled(vcpu)) {
vcpu              163 arch/s390/kvm/guestdbg.c 		enable_all_hw_bp(vcpu);
vcpu              164 arch/s390/kvm/guestdbg.c 		enable_all_hw_wp(vcpu);
vcpu              168 arch/s390/kvm/guestdbg.c 	if (vcpu->arch.sie_block->gcr[9] & PER_EVENT_NULLIFICATION)
vcpu              169 arch/s390/kvm/guestdbg.c 		vcpu->arch.sie_block->gcr[9] &= ~PER_EVENT_NULLIFICATION;
vcpu              174 arch/s390/kvm/guestdbg.c static int __import_wp_info(struct kvm_vcpu *vcpu,
vcpu              191 arch/s390/kvm/guestdbg.c 	ret = read_guest_abs(vcpu, wp_info->phys_addr, wp_info->old_data,
vcpu              203 arch/s390/kvm/guestdbg.c int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
vcpu              256 arch/s390/kvm/guestdbg.c 			ret = __import_wp_info(vcpu, &bp_data[i],
vcpu              270 arch/s390/kvm/guestdbg.c 	vcpu->arch.guestdbg.nr_hw_bp = nr_bp;
vcpu              271 arch/s390/kvm/guestdbg.c 	vcpu->arch.guestdbg.hw_bp_info = bp_info;
vcpu              272 arch/s390/kvm/guestdbg.c 	vcpu->arch.guestdbg.nr_hw_wp = nr_wp;
vcpu              273 arch/s390/kvm/guestdbg.c 	vcpu->arch.guestdbg.hw_wp_info = wp_info;
vcpu              282 arch/s390/kvm/guestdbg.c void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu)
vcpu              287 arch/s390/kvm/guestdbg.c 	for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) {
vcpu              288 arch/s390/kvm/guestdbg.c 		hw_wp_info = &vcpu->arch.guestdbg.hw_wp_info[i];
vcpu              292 arch/s390/kvm/guestdbg.c 	kfree(vcpu->arch.guestdbg.hw_wp_info);
vcpu              293 arch/s390/kvm/guestdbg.c 	vcpu->arch.guestdbg.hw_wp_info = NULL;
vcpu              295 arch/s390/kvm/guestdbg.c 	kfree(vcpu->arch.guestdbg.hw_bp_info);
vcpu              296 arch/s390/kvm/guestdbg.c 	vcpu->arch.guestdbg.hw_bp_info = NULL;
vcpu              298 arch/s390/kvm/guestdbg.c 	vcpu->arch.guestdbg.nr_hw_wp = 0;
vcpu              299 arch/s390/kvm/guestdbg.c 	vcpu->arch.guestdbg.nr_hw_bp = 0;
vcpu              313 arch/s390/kvm/guestdbg.c static struct kvm_hw_bp_info_arch *find_hw_bp(struct kvm_vcpu *vcpu,
vcpu              316 arch/s390/kvm/guestdbg.c 	struct kvm_hw_bp_info_arch *bp_info = vcpu->arch.guestdbg.hw_bp_info;
vcpu              319 arch/s390/kvm/guestdbg.c 	if (vcpu->arch.guestdbg.nr_hw_bp == 0)
vcpu              322 arch/s390/kvm/guestdbg.c 	for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) {
vcpu              338 arch/s390/kvm/guestdbg.c static struct kvm_hw_wp_info_arch *any_wp_changed(struct kvm_vcpu *vcpu)
vcpu              344 arch/s390/kvm/guestdbg.c 	if (vcpu->arch.guestdbg.nr_hw_wp == 0)
vcpu              347 arch/s390/kvm/guestdbg.c 	for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) {
vcpu              348 arch/s390/kvm/guestdbg.c 		wp_info = &vcpu->arch.guestdbg.hw_wp_info[i];
vcpu              357 arch/s390/kvm/guestdbg.c 		if (!read_guest_abs(vcpu, wp_info->phys_addr, temp,
vcpu              371 arch/s390/kvm/guestdbg.c void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu)
vcpu              373 arch/s390/kvm/guestdbg.c 	vcpu->run->exit_reason = KVM_EXIT_DEBUG;
vcpu              374 arch/s390/kvm/guestdbg.c 	vcpu->guest_debug &= ~KVM_GUESTDBG_EXIT_PENDING;
vcpu              388 arch/s390/kvm/guestdbg.c static int debug_exit_required(struct kvm_vcpu *vcpu, u8 perc,
vcpu              391 arch/s390/kvm/guestdbg.c 	struct kvm_debug_exit_arch *debug_exit = &vcpu->run->debug.arch;
vcpu              394 arch/s390/kvm/guestdbg.c 	unsigned long addr = vcpu->arch.sie_block->gpsw.addr;
vcpu              396 arch/s390/kvm/guestdbg.c 	if (guestdbg_hw_bp_enabled(vcpu)) {
vcpu              398 arch/s390/kvm/guestdbg.c 		    vcpu->arch.guestdbg.nr_hw_wp > 0) {
vcpu              399 arch/s390/kvm/guestdbg.c 			wp_info = any_wp_changed(vcpu);
vcpu              407 arch/s390/kvm/guestdbg.c 			 vcpu->arch.guestdbg.nr_hw_bp > 0) {
vcpu              408 arch/s390/kvm/guestdbg.c 			bp_info = find_hw_bp(vcpu, addr);
vcpu              413 arch/s390/kvm/guestdbg.c 				vcpu->arch.guestdbg.last_bp = addr;
vcpu              417 arch/s390/kvm/guestdbg.c 			bp_info = find_hw_bp(vcpu, peraddr);
vcpu              418 arch/s390/kvm/guestdbg.c 			if (bp_info && vcpu->arch.guestdbg.last_bp != peraddr) {
vcpu              425 arch/s390/kvm/guestdbg.c 	if (guestdbg_sstep_enabled(vcpu) && per_bp_event(perc)) {
vcpu              436 arch/s390/kvm/guestdbg.c static int per_fetched_addr(struct kvm_vcpu *vcpu, unsigned long *addr)
vcpu              442 arch/s390/kvm/guestdbg.c 	if (vcpu->arch.sie_block->icptcode == ICPT_PROGI) {
vcpu              444 arch/s390/kvm/guestdbg.c 		*addr = vcpu->arch.sie_block->peraddr;
vcpu              450 arch/s390/kvm/guestdbg.c 		rc = read_guest_instr(vcpu, *addr, &opcode, 2);
vcpu              459 arch/s390/kvm/guestdbg.c 		*addr = __rewind_psw(vcpu->arch.sie_block->gpsw,
vcpu              460 arch/s390/kvm/guestdbg.c 				     kvm_s390_get_ilen(vcpu));
vcpu              461 arch/s390/kvm/guestdbg.c 		if (vcpu->arch.sie_block->icptstatus & 0x01) {
vcpu              462 arch/s390/kvm/guestdbg.c 			exec_ilen = (vcpu->arch.sie_block->icptstatus & 0x60) >> 4;
vcpu              470 arch/s390/kvm/guestdbg.c 		rc = read_guest_instr(vcpu, *addr, &opcode, exec_ilen);
vcpu              485 arch/s390/kvm/guestdbg.c 			*addr = base ? vcpu->run->s.regs.gprs[base] : 0;
vcpu              486 arch/s390/kvm/guestdbg.c 			*addr += index ? vcpu->run->s.regs.gprs[index] : 0;
vcpu              489 arch/s390/kvm/guestdbg.c 		*addr = kvm_s390_logical_to_effective(vcpu, *addr);
vcpu              494 arch/s390/kvm/guestdbg.c #define guest_per_enabled(vcpu) \
vcpu              495 arch/s390/kvm/guestdbg.c 			     (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER)
vcpu              497 arch/s390/kvm/guestdbg.c int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu)
vcpu              499 arch/s390/kvm/guestdbg.c 	const u64 cr10 = vcpu->arch.sie_block->gcr[10];
vcpu              500 arch/s390/kvm/guestdbg.c 	const u64 cr11 = vcpu->arch.sie_block->gcr[11];
vcpu              501 arch/s390/kvm/guestdbg.c 	const u8 ilen = kvm_s390_get_ilen(vcpu);
vcpu              505 arch/s390/kvm/guestdbg.c 		.per_address = __rewind_psw(vcpu->arch.sie_block->gpsw, ilen),
vcpu              515 arch/s390/kvm/guestdbg.c 	if (!guestdbg_enabled(vcpu))
vcpu              516 arch/s390/kvm/guestdbg.c 		return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
vcpu              518 arch/s390/kvm/guestdbg.c 	if (debug_exit_required(vcpu, pgm_info.per_code, pgm_info.per_address))
vcpu              519 arch/s390/kvm/guestdbg.c 		vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING;
vcpu              521 arch/s390/kvm/guestdbg.c 	if (!guest_per_enabled(vcpu) ||
vcpu              522 arch/s390/kvm/guestdbg.c 	    !(vcpu->arch.sie_block->gcr[9] & PER_EVENT_IFETCH))
vcpu              525 arch/s390/kvm/guestdbg.c 	rc = per_fetched_addr(vcpu, &fetched_addr);
vcpu              530 arch/s390/kvm/guestdbg.c 		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
vcpu              533 arch/s390/kvm/guestdbg.c 		return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
vcpu              537 arch/s390/kvm/guestdbg.c static int filter_guest_per_event(struct kvm_vcpu *vcpu)
vcpu              539 arch/s390/kvm/guestdbg.c 	const u8 perc = vcpu->arch.sie_block->perc;
vcpu              540 arch/s390/kvm/guestdbg.c 	u64 addr = vcpu->arch.sie_block->gpsw.addr;
vcpu              541 arch/s390/kvm/guestdbg.c 	u64 cr9 = vcpu->arch.sie_block->gcr[9];
vcpu              542 arch/s390/kvm/guestdbg.c 	u64 cr10 = vcpu->arch.sie_block->gcr[10];
vcpu              543 arch/s390/kvm/guestdbg.c 	u64 cr11 = vcpu->arch.sie_block->gcr[11];
vcpu              549 arch/s390/kvm/guestdbg.c 	if (!guest_per_enabled(vcpu))
vcpu              560 arch/s390/kvm/guestdbg.c 		rc = per_fetched_addr(vcpu, &fetched_addr);
vcpu              574 arch/s390/kvm/guestdbg.c 	vcpu->arch.sie_block->perc = guest_perc;
vcpu              577 arch/s390/kvm/guestdbg.c 		vcpu->arch.sie_block->iprcc &= ~PGM_PER;
vcpu              581 arch/s390/kvm/guestdbg.c #define pssec(vcpu) (vcpu->arch.sie_block->gcr[1] & _ASCE_SPACE_SWITCH)
vcpu              582 arch/s390/kvm/guestdbg.c #define hssec(vcpu) (vcpu->arch.sie_block->gcr[13] & _ASCE_SPACE_SWITCH)
vcpu              583 arch/s390/kvm/guestdbg.c #define old_ssec(vcpu) ((vcpu->arch.sie_block->tecmc >> 31) & 0x1)
vcpu              584 arch/s390/kvm/guestdbg.c #define old_as_is_home(vcpu) !(vcpu->arch.sie_block->tecmc & 0xffff)
vcpu              586 arch/s390/kvm/guestdbg.c int kvm_s390_handle_per_event(struct kvm_vcpu *vcpu)
vcpu              590 arch/s390/kvm/guestdbg.c 	if (debug_exit_required(vcpu, vcpu->arch.sie_block->perc,
vcpu              591 arch/s390/kvm/guestdbg.c 				vcpu->arch.sie_block->peraddr))
vcpu              592 arch/s390/kvm/guestdbg.c 		vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING;
vcpu              594 arch/s390/kvm/guestdbg.c 	rc = filter_guest_per_event(vcpu);
vcpu              604 arch/s390/kvm/guestdbg.c 	if (vcpu->arch.sie_block->iprcc == PGM_SPACE_SWITCH) {
vcpu              605 arch/s390/kvm/guestdbg.c 		vcpu->arch.sie_block->iprcc = 0;
vcpu              606 arch/s390/kvm/guestdbg.c 		new_as = psw_bits(vcpu->arch.sie_block->gpsw).as;
vcpu              613 arch/s390/kvm/guestdbg.c 		if (((new_as == PSW_BITS_AS_HOME) ^ old_as_is_home(vcpu)) &&
vcpu              614 arch/s390/kvm/guestdbg.c 		    (pssec(vcpu) || hssec(vcpu)))
vcpu              615 arch/s390/kvm/guestdbg.c 			vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH;
vcpu              621 arch/s390/kvm/guestdbg.c 		if (new_as == PSW_BITS_AS_PRIMARY && !old_as_is_home(vcpu) &&
vcpu              622 arch/s390/kvm/guestdbg.c 		    (pssec(vcpu) || old_ssec(vcpu)))
vcpu              623 arch/s390/kvm/guestdbg.c 			vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH;
vcpu               25 arch/s390/kvm/intercept.c u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu)
vcpu               27 arch/s390/kvm/intercept.c 	struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
vcpu               30 arch/s390/kvm/intercept.c 	switch (vcpu->arch.sie_block->icptcode) {
vcpu               37 arch/s390/kvm/intercept.c 		ilen = insn_length(vcpu->arch.sie_block->ipa >> 8);
vcpu               47 arch/s390/kvm/intercept.c 		ilen = vcpu->arch.sie_block->pgmilc & 0x6;
vcpu               53 arch/s390/kvm/intercept.c static int handle_stop(struct kvm_vcpu *vcpu)
vcpu               55 arch/s390/kvm/intercept.c 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
vcpu               59 arch/s390/kvm/intercept.c 	vcpu->stat.exit_stop_request++;
vcpu               62 arch/s390/kvm/intercept.c 	if (kvm_s390_vcpu_has_irq(vcpu, 1))
vcpu               68 arch/s390/kvm/intercept.c 	stop_pending = kvm_s390_is_stop_irq_pending(vcpu);
vcpu               76 arch/s390/kvm/intercept.c 		rc = kvm_s390_vcpu_store_status(vcpu,
vcpu               82 arch/s390/kvm/intercept.c 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
vcpu               83 arch/s390/kvm/intercept.c 		kvm_s390_vcpu_stop(vcpu);
vcpu               87 arch/s390/kvm/intercept.c static int handle_validity(struct kvm_vcpu *vcpu)
vcpu               89 arch/s390/kvm/intercept.c 	int viwhy = vcpu->arch.sie_block->ipb >> 16;
vcpu               91 arch/s390/kvm/intercept.c 	vcpu->stat.exit_validity++;
vcpu               92 arch/s390/kvm/intercept.c 	trace_kvm_s390_intercept_validity(vcpu, viwhy);
vcpu               94 arch/s390/kvm/intercept.c 		  current->pid, vcpu->kvm);
vcpu              102 arch/s390/kvm/intercept.c static int handle_instruction(struct kvm_vcpu *vcpu)
vcpu              104 arch/s390/kvm/intercept.c 	vcpu->stat.exit_instruction++;
vcpu              105 arch/s390/kvm/intercept.c 	trace_kvm_s390_intercept_instruction(vcpu,
vcpu              106 arch/s390/kvm/intercept.c 					     vcpu->arch.sie_block->ipa,
vcpu              107 arch/s390/kvm/intercept.c 					     vcpu->arch.sie_block->ipb);
vcpu              109 arch/s390/kvm/intercept.c 	switch (vcpu->arch.sie_block->ipa >> 8) {
vcpu              111 arch/s390/kvm/intercept.c 		return kvm_s390_handle_01(vcpu);
vcpu              113 arch/s390/kvm/intercept.c 		return kvm_s390_handle_lpsw(vcpu);
vcpu              115 arch/s390/kvm/intercept.c 		return kvm_s390_handle_diag(vcpu);
vcpu              117 arch/s390/kvm/intercept.c 		return kvm_s390_handle_aa(vcpu);
vcpu              119 arch/s390/kvm/intercept.c 		return kvm_s390_handle_sigp(vcpu);
vcpu              121 arch/s390/kvm/intercept.c 		return kvm_s390_handle_b2(vcpu);
vcpu              123 arch/s390/kvm/intercept.c 		return kvm_s390_handle_stctl(vcpu);
vcpu              125 arch/s390/kvm/intercept.c 		return kvm_s390_handle_lctl(vcpu);
vcpu              127 arch/s390/kvm/intercept.c 		return kvm_s390_handle_b9(vcpu);
vcpu              129 arch/s390/kvm/intercept.c 		return kvm_s390_handle_e3(vcpu);
vcpu              131 arch/s390/kvm/intercept.c 		return kvm_s390_handle_e5(vcpu);
vcpu              133 arch/s390/kvm/intercept.c 		return kvm_s390_handle_eb(vcpu);
vcpu              139 arch/s390/kvm/intercept.c static int inject_prog_on_prog_intercept(struct kvm_vcpu *vcpu)
vcpu              142 arch/s390/kvm/intercept.c 		.code = vcpu->arch.sie_block->iprcc,
vcpu              147 arch/s390/kvm/intercept.c 	switch (vcpu->arch.sie_block->iprcc & ~PGM_PER) {
vcpu              158 arch/s390/kvm/intercept.c 		pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
vcpu              166 arch/s390/kvm/intercept.c 		pgm_info.exc_access_id = vcpu->arch.sie_block->eai;
vcpu              174 arch/s390/kvm/intercept.c 		pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
vcpu              175 arch/s390/kvm/intercept.c 		pgm_info.exc_access_id  = vcpu->arch.sie_block->eai;
vcpu              176 arch/s390/kvm/intercept.c 		pgm_info.op_access_id  = vcpu->arch.sie_block->oai;
vcpu              179 arch/s390/kvm/intercept.c 		pgm_info.mon_class_nr = vcpu->arch.sie_block->mcn;
vcpu              180 arch/s390/kvm/intercept.c 		pgm_info.mon_code = vcpu->arch.sie_block->tecmc;
vcpu              184 arch/s390/kvm/intercept.c 		pgm_info.data_exc_code = vcpu->arch.sie_block->dxc;
vcpu              187 arch/s390/kvm/intercept.c 		pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
vcpu              188 arch/s390/kvm/intercept.c 		pgm_info.exc_access_id  = vcpu->arch.sie_block->eai;
vcpu              194 arch/s390/kvm/intercept.c 	if (vcpu->arch.sie_block->iprcc & PGM_PER) {
vcpu              195 arch/s390/kvm/intercept.c 		pgm_info.per_code = vcpu->arch.sie_block->perc;
vcpu              196 arch/s390/kvm/intercept.c 		pgm_info.per_atmid = vcpu->arch.sie_block->peratmid;
vcpu              197 arch/s390/kvm/intercept.c 		pgm_info.per_address = vcpu->arch.sie_block->peraddr;
vcpu              198 arch/s390/kvm/intercept.c 		pgm_info.per_access_id = vcpu->arch.sie_block->peraid;
vcpu              200 arch/s390/kvm/intercept.c 	return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
vcpu              207 arch/s390/kvm/intercept.c static int handle_itdb(struct kvm_vcpu *vcpu)
vcpu              212 arch/s390/kvm/intercept.c 	if (!IS_TE_ENABLED(vcpu) || !IS_ITDB_VALID(vcpu))
vcpu              216 arch/s390/kvm/intercept.c 	itdb = (struct kvm_s390_itdb *)vcpu->arch.sie_block->itdba;
vcpu              217 arch/s390/kvm/intercept.c 	rc = write_guest_lc(vcpu, __LC_PGM_TDB, itdb, sizeof(*itdb));
vcpu              225 arch/s390/kvm/intercept.c #define per_event(vcpu) (vcpu->arch.sie_block->iprcc & PGM_PER)
vcpu              227 arch/s390/kvm/intercept.c static int handle_prog(struct kvm_vcpu *vcpu)
vcpu              232 arch/s390/kvm/intercept.c 	vcpu->stat.exit_program_interruption++;
vcpu              234 arch/s390/kvm/intercept.c 	if (guestdbg_enabled(vcpu) && per_event(vcpu)) {
vcpu              235 arch/s390/kvm/intercept.c 		rc = kvm_s390_handle_per_event(vcpu);
vcpu              239 arch/s390/kvm/intercept.c 		if (vcpu->arch.sie_block->iprcc == 0)
vcpu              243 arch/s390/kvm/intercept.c 	trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc);
vcpu              244 arch/s390/kvm/intercept.c 	if (vcpu->arch.sie_block->iprcc == PGM_SPECIFICATION) {
vcpu              245 arch/s390/kvm/intercept.c 		rc = read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &psw, sizeof(psw_t));
vcpu              252 arch/s390/kvm/intercept.c 	rc = handle_itdb(vcpu);
vcpu              256 arch/s390/kvm/intercept.c 	return inject_prog_on_prog_intercept(vcpu);
vcpu              267 arch/s390/kvm/intercept.c static int handle_external_interrupt(struct kvm_vcpu *vcpu)
vcpu              269 arch/s390/kvm/intercept.c 	u16 eic = vcpu->arch.sie_block->eic;
vcpu              274 arch/s390/kvm/intercept.c 	vcpu->stat.exit_external_interrupt++;
vcpu              276 arch/s390/kvm/intercept.c 	rc = read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &newpsw, sizeof(psw_t));
vcpu              293 arch/s390/kvm/intercept.c 		irq.u.extcall.code = vcpu->arch.sie_block->extcpuaddr;
vcpu              294 arch/s390/kvm/intercept.c 		rc = kvm_s390_inject_vcpu(vcpu, &irq);
vcpu              303 arch/s390/kvm/intercept.c 	return kvm_s390_inject_vcpu(vcpu, &irq);
vcpu              314 arch/s390/kvm/intercept.c static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
vcpu              319 arch/s390/kvm/intercept.c 	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
vcpu              322 arch/s390/kvm/intercept.c 	rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg2],
vcpu              325 arch/s390/kvm/intercept.c 		return kvm_s390_inject_prog_cond(vcpu, rc);
vcpu              326 arch/s390/kvm/intercept.c 	rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0);
vcpu              331 arch/s390/kvm/intercept.c 	rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg1],
vcpu              334 arch/s390/kvm/intercept.c 		return kvm_s390_inject_prog_cond(vcpu, rc);
vcpu              335 arch/s390/kvm/intercept.c 	rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1);
vcpu              339 arch/s390/kvm/intercept.c 	kvm_s390_retry_instr(vcpu);
vcpu              344 arch/s390/kvm/intercept.c static int handle_partial_execution(struct kvm_vcpu *vcpu)
vcpu              346 arch/s390/kvm/intercept.c 	vcpu->stat.exit_pei++;
vcpu              348 arch/s390/kvm/intercept.c 	if (vcpu->arch.sie_block->ipa == 0xb254)	/* MVPG */
vcpu              349 arch/s390/kvm/intercept.c 		return handle_mvpg_pei(vcpu);
vcpu              350 arch/s390/kvm/intercept.c 	if (vcpu->arch.sie_block->ipa >> 8 == 0xae)	/* SIGP */
vcpu              351 arch/s390/kvm/intercept.c 		return kvm_s390_handle_sigp_pei(vcpu);
vcpu              361 arch/s390/kvm/intercept.c int handle_sthyi(struct kvm_vcpu *vcpu)
vcpu              367 arch/s390/kvm/intercept.c 	if (!test_kvm_facility(vcpu->kvm, 74))
vcpu              368 arch/s390/kvm/intercept.c 		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
vcpu              370 arch/s390/kvm/intercept.c 	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
vcpu              371 arch/s390/kvm/intercept.c 	code = vcpu->run->s.regs.gprs[reg1];
vcpu              372 arch/s390/kvm/intercept.c 	addr = vcpu->run->s.regs.gprs[reg2];
vcpu              374 arch/s390/kvm/intercept.c 	vcpu->stat.instruction_sthyi++;
vcpu              375 arch/s390/kvm/intercept.c 	VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr);
vcpu              376 arch/s390/kvm/intercept.c 	trace_kvm_s390_handle_sthyi(vcpu, code, addr);
vcpu              379 arch/s390/kvm/intercept.c 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
vcpu              388 arch/s390/kvm/intercept.c 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
vcpu              398 arch/s390/kvm/intercept.c 		r = write_guest(vcpu, addr, reg2, sctns, PAGE_SIZE);
vcpu              401 arch/s390/kvm/intercept.c 			return kvm_s390_inject_prog_cond(vcpu, r);
vcpu              406 arch/s390/kvm/intercept.c 	vcpu->run->s.regs.gprs[reg2 + 1] = rc;
vcpu              407 arch/s390/kvm/intercept.c 	kvm_s390_set_psw_cc(vcpu, cc);
vcpu              411 arch/s390/kvm/intercept.c static int handle_operexc(struct kvm_vcpu *vcpu)
vcpu              416 arch/s390/kvm/intercept.c 	vcpu->stat.exit_operation_exception++;
vcpu              417 arch/s390/kvm/intercept.c 	trace_kvm_s390_handle_operexc(vcpu, vcpu->arch.sie_block->ipa,
vcpu              418 arch/s390/kvm/intercept.c 				      vcpu->arch.sie_block->ipb);
vcpu              420 arch/s390/kvm/intercept.c 	if (vcpu->arch.sie_block->ipa == 0xb256)
vcpu              421 arch/s390/kvm/intercept.c 		return handle_sthyi(vcpu);
vcpu              423 arch/s390/kvm/intercept.c 	if (vcpu->arch.sie_block->ipa == 0 && vcpu->kvm->arch.user_instr0)
vcpu              425 arch/s390/kvm/intercept.c 	rc = read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &newpsw, sizeof(psw_t));
vcpu              436 arch/s390/kvm/intercept.c 	oldpsw = vcpu->arch.sie_block->gpsw;
vcpu              444 arch/s390/kvm/intercept.c 	return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
vcpu              447 arch/s390/kvm/intercept.c int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
vcpu              451 arch/s390/kvm/intercept.c 	if (kvm_is_ucontrol(vcpu->kvm))
vcpu              454 arch/s390/kvm/intercept.c 	switch (vcpu->arch.sie_block->icptcode) {
vcpu              456 arch/s390/kvm/intercept.c 		vcpu->stat.exit_external_request++;
vcpu              459 arch/s390/kvm/intercept.c 		vcpu->stat.exit_io_request++;
vcpu              462 arch/s390/kvm/intercept.c 		rc = handle_instruction(vcpu);
vcpu              465 arch/s390/kvm/intercept.c 		return handle_prog(vcpu);
vcpu              467 arch/s390/kvm/intercept.c 		return handle_external_interrupt(vcpu);
vcpu              469 arch/s390/kvm/intercept.c 		return kvm_s390_handle_wait(vcpu);
vcpu              471 arch/s390/kvm/intercept.c 		return handle_validity(vcpu);
vcpu              473 arch/s390/kvm/intercept.c 		return handle_stop(vcpu);
vcpu              475 arch/s390/kvm/intercept.c 		rc = handle_operexc(vcpu);
vcpu              478 arch/s390/kvm/intercept.c 		rc = handle_partial_execution(vcpu);
vcpu              481 arch/s390/kvm/intercept.c 		rc = kvm_s390_skey_check_enable(vcpu);
vcpu              488 arch/s390/kvm/intercept.c 	if (vcpu->arch.sie_block->icptstatus & 0x02 &&
vcpu              490 arch/s390/kvm/intercept.c 		per_rc = kvm_s390_handle_per_ifetch_icpt(vcpu);
vcpu               42 arch/s390/kvm/interrupt.c static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
vcpu               46 arch/s390/kvm/interrupt.c 	if (!kvm_s390_test_cpuflags(vcpu, CPUSTAT_ECALL_PEND))
vcpu               50 arch/s390/kvm/interrupt.c 	read_lock(&vcpu->kvm->arch.sca_lock);
vcpu               51 arch/s390/kvm/interrupt.c 	if (vcpu->kvm->arch.use_esca) {
vcpu               52 arch/s390/kvm/interrupt.c 		struct esca_block *sca = vcpu->kvm->arch.sca;
vcpu               54 arch/s390/kvm/interrupt.c 			sca->cpu[vcpu->vcpu_id].sigp_ctrl;
vcpu               59 arch/s390/kvm/interrupt.c 		struct bsca_block *sca = vcpu->kvm->arch.sca;
vcpu               61 arch/s390/kvm/interrupt.c 			sca->cpu[vcpu->vcpu_id].sigp_ctrl;
vcpu               66 arch/s390/kvm/interrupt.c 	read_unlock(&vcpu->kvm->arch.sca_lock);
vcpu               74 arch/s390/kvm/interrupt.c static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
vcpu               79 arch/s390/kvm/interrupt.c 	read_lock(&vcpu->kvm->arch.sca_lock);
vcpu               80 arch/s390/kvm/interrupt.c 	if (vcpu->kvm->arch.use_esca) {
vcpu               81 arch/s390/kvm/interrupt.c 		struct esca_block *sca = vcpu->kvm->arch.sca;
vcpu               83 arch/s390/kvm/interrupt.c 			&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
vcpu               93 arch/s390/kvm/interrupt.c 		struct bsca_block *sca = vcpu->kvm->arch.sca;
vcpu               95 arch/s390/kvm/interrupt.c 			&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
vcpu              105 arch/s390/kvm/interrupt.c 	read_unlock(&vcpu->kvm->arch.sca_lock);
vcpu              111 arch/s390/kvm/interrupt.c 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_ECALL_PEND);
vcpu              115 arch/s390/kvm/interrupt.c static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
vcpu              121 arch/s390/kvm/interrupt.c 	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_ECALL_PEND);
vcpu              122 arch/s390/kvm/interrupt.c 	read_lock(&vcpu->kvm->arch.sca_lock);
vcpu              123 arch/s390/kvm/interrupt.c 	if (vcpu->kvm->arch.use_esca) {
vcpu              124 arch/s390/kvm/interrupt.c 		struct esca_block *sca = vcpu->kvm->arch.sca;
vcpu              126 arch/s390/kvm/interrupt.c 			&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
vcpu              132 arch/s390/kvm/interrupt.c 		struct bsca_block *sca = vcpu->kvm->arch.sca;
vcpu              134 arch/s390/kvm/interrupt.c 			&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
vcpu              140 arch/s390/kvm/interrupt.c 	read_unlock(&vcpu->kvm->arch.sca_lock);
vcpu              144 arch/s390/kvm/interrupt.c int psw_extint_disabled(struct kvm_vcpu *vcpu)
vcpu              146 arch/s390/kvm/interrupt.c 	return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
vcpu              149 arch/s390/kvm/interrupt.c static int psw_ioint_disabled(struct kvm_vcpu *vcpu)
vcpu              151 arch/s390/kvm/interrupt.c 	return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO);
vcpu              154 arch/s390/kvm/interrupt.c static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
vcpu              156 arch/s390/kvm/interrupt.c 	return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK);
vcpu              159 arch/s390/kvm/interrupt.c static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
vcpu              161 arch/s390/kvm/interrupt.c 	return psw_extint_disabled(vcpu) &&
vcpu              162 arch/s390/kvm/interrupt.c 	       psw_ioint_disabled(vcpu) &&
vcpu              163 arch/s390/kvm/interrupt.c 	       psw_mchk_disabled(vcpu);
vcpu              166 arch/s390/kvm/interrupt.c static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
vcpu              168 arch/s390/kvm/interrupt.c 	if (psw_extint_disabled(vcpu) ||
vcpu              169 arch/s390/kvm/interrupt.c 	    !(vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SUBMASK))
vcpu              171 arch/s390/kvm/interrupt.c 	if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu))
vcpu              177 arch/s390/kvm/interrupt.c static int ckc_irq_pending(struct kvm_vcpu *vcpu)
vcpu              179 arch/s390/kvm/interrupt.c 	const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
vcpu              180 arch/s390/kvm/interrupt.c 	const u64 ckc = vcpu->arch.sie_block->ckc;
vcpu              182 arch/s390/kvm/interrupt.c 	if (vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SIGN) {
vcpu              188 arch/s390/kvm/interrupt.c 	return ckc_interrupts_enabled(vcpu);
vcpu              191 arch/s390/kvm/interrupt.c static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu)
vcpu              193 arch/s390/kvm/interrupt.c 	return !psw_extint_disabled(vcpu) &&
vcpu              194 arch/s390/kvm/interrupt.c 	       (vcpu->arch.sie_block->gcr[0] & CR0_CPU_TIMER_SUBMASK);
vcpu              197 arch/s390/kvm/interrupt.c static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu)
vcpu              199 arch/s390/kvm/interrupt.c 	if (!cpu_timer_interrupts_enabled(vcpu))
vcpu              201 arch/s390/kvm/interrupt.c 	return kvm_s390_get_cpu_timer(vcpu) >> 63;
vcpu              325 arch/s390/kvm/interrupt.c static inline unsigned long pending_irqs_no_gisa(struct kvm_vcpu *vcpu)
vcpu              327 arch/s390/kvm/interrupt.c 	return vcpu->kvm->arch.float_int.pending_irqs |
vcpu              328 arch/s390/kvm/interrupt.c 		vcpu->arch.local_int.pending_irqs;
vcpu              331 arch/s390/kvm/interrupt.c static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu)
vcpu              333 arch/s390/kvm/interrupt.c 	struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
vcpu              336 arch/s390/kvm/interrupt.c 	pending_mask = pending_irqs_no_gisa(vcpu);
vcpu              352 arch/s390/kvm/interrupt.c static unsigned long disable_iscs(struct kvm_vcpu *vcpu,
vcpu              358 arch/s390/kvm/interrupt.c 		if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i)))
vcpu              364 arch/s390/kvm/interrupt.c static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
vcpu              368 arch/s390/kvm/interrupt.c 	active_mask = pending_irqs(vcpu);
vcpu              372 arch/s390/kvm/interrupt.c 	if (psw_extint_disabled(vcpu))
vcpu              374 arch/s390/kvm/interrupt.c 	if (psw_ioint_disabled(vcpu))
vcpu              377 arch/s390/kvm/interrupt.c 		active_mask = disable_iscs(vcpu, active_mask);
vcpu              378 arch/s390/kvm/interrupt.c 	if (!(vcpu->arch.sie_block->gcr[0] & CR0_EXTERNAL_CALL_SUBMASK))
vcpu              380 arch/s390/kvm/interrupt.c 	if (!(vcpu->arch.sie_block->gcr[0] & CR0_EMERGENCY_SIGNAL_SUBMASK))
vcpu              382 arch/s390/kvm/interrupt.c 	if (!(vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SUBMASK))
vcpu              384 arch/s390/kvm/interrupt.c 	if (!(vcpu->arch.sie_block->gcr[0] & CR0_CPU_TIMER_SUBMASK))
vcpu              386 arch/s390/kvm/interrupt.c 	if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
vcpu              388 arch/s390/kvm/interrupt.c 	if (psw_mchk_disabled(vcpu))
vcpu              394 arch/s390/kvm/interrupt.c 	if (!(vcpu->arch.sie_block->gcr[14] &
vcpu              395 arch/s390/kvm/interrupt.c 	   (vcpu->kvm->arch.float_int.mchk.cr14 |
vcpu              396 arch/s390/kvm/interrupt.c 	   vcpu->arch.local_int.irq.mchk.cr14)))
vcpu              408 arch/s390/kvm/interrupt.c static void __set_cpu_idle(struct kvm_vcpu *vcpu)
vcpu              410 arch/s390/kvm/interrupt.c 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
vcpu              411 arch/s390/kvm/interrupt.c 	set_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
vcpu              414 arch/s390/kvm/interrupt.c static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
vcpu              416 arch/s390/kvm/interrupt.c 	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
vcpu              417 arch/s390/kvm/interrupt.c 	clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
vcpu              420 arch/s390/kvm/interrupt.c static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
vcpu              422 arch/s390/kvm/interrupt.c 	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IO_INT | CPUSTAT_EXT_INT |
vcpu              424 arch/s390/kvm/interrupt.c 	vcpu->arch.sie_block->lctl = 0x0000;
vcpu              425 arch/s390/kvm/interrupt.c 	vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
vcpu              427 arch/s390/kvm/interrupt.c 	if (guestdbg_enabled(vcpu)) {
vcpu              428 arch/s390/kvm/interrupt.c 		vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 |
vcpu              430 arch/s390/kvm/interrupt.c 		vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
vcpu              434 arch/s390/kvm/interrupt.c static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
vcpu              436 arch/s390/kvm/interrupt.c 	if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_IO_MASK))
vcpu              438 arch/s390/kvm/interrupt.c 	if (psw_ioint_disabled(vcpu))
vcpu              439 arch/s390/kvm/interrupt.c 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_IO_INT);
vcpu              441 arch/s390/kvm/interrupt.c 		vcpu->arch.sie_block->lctl |= LCTL_CR6;
vcpu              444 arch/s390/kvm/interrupt.c static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
vcpu              446 arch/s390/kvm/interrupt.c 	if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_EXT_MASK))
vcpu              448 arch/s390/kvm/interrupt.c 	if (psw_extint_disabled(vcpu))
vcpu              449 arch/s390/kvm/interrupt.c 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
vcpu              451 arch/s390/kvm/interrupt.c 		vcpu->arch.sie_block->lctl |= LCTL_CR0;
vcpu              454 arch/s390/kvm/interrupt.c static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
vcpu              456 arch/s390/kvm/interrupt.c 	if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_MCHK_MASK))
vcpu              458 arch/s390/kvm/interrupt.c 	if (psw_mchk_disabled(vcpu))
vcpu              459 arch/s390/kvm/interrupt.c 		vcpu->arch.sie_block->ictl |= ICTL_LPSW;
vcpu              461 arch/s390/kvm/interrupt.c 		vcpu->arch.sie_block->lctl |= LCTL_CR14;
vcpu              464 arch/s390/kvm/interrupt.c static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu)
vcpu              466 arch/s390/kvm/interrupt.c 	if (kvm_s390_is_stop_irq_pending(vcpu))
vcpu              467 arch/s390/kvm/interrupt.c 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
vcpu              471 arch/s390/kvm/interrupt.c static void set_intercept_indicators(struct kvm_vcpu *vcpu)
vcpu              473 arch/s390/kvm/interrupt.c 	set_intercept_indicators_io(vcpu);
vcpu              474 arch/s390/kvm/interrupt.c 	set_intercept_indicators_ext(vcpu);
vcpu              475 arch/s390/kvm/interrupt.c 	set_intercept_indicators_mchk(vcpu);
vcpu              476 arch/s390/kvm/interrupt.c 	set_intercept_indicators_stop(vcpu);
vcpu              479 arch/s390/kvm/interrupt.c static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
vcpu              481 arch/s390/kvm/interrupt.c 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
vcpu              484 arch/s390/kvm/interrupt.c 	vcpu->stat.deliver_cputm++;
vcpu              485 arch/s390/kvm/interrupt.c 	trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
vcpu              488 arch/s390/kvm/interrupt.c 	rc  = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
vcpu              490 arch/s390/kvm/interrupt.c 	rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
vcpu              491 arch/s390/kvm/interrupt.c 	rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
vcpu              492 arch/s390/kvm/interrupt.c 			     &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
vcpu              493 arch/s390/kvm/interrupt.c 	rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
vcpu              494 arch/s390/kvm/interrupt.c 			    &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
vcpu              499 arch/s390/kvm/interrupt.c static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu)
vcpu              501 arch/s390/kvm/interrupt.c 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
vcpu              504 arch/s390/kvm/interrupt.c 	vcpu->stat.deliver_ckc++;
vcpu              505 arch/s390/kvm/interrupt.c 	trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
vcpu              508 arch/s390/kvm/interrupt.c 	rc  = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP,
vcpu              510 arch/s390/kvm/interrupt.c 	rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
vcpu              511 arch/s390/kvm/interrupt.c 	rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
vcpu              512 arch/s390/kvm/interrupt.c 			     &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
vcpu              513 arch/s390/kvm/interrupt.c 	rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
vcpu              514 arch/s390/kvm/interrupt.c 			    &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
vcpu              519 arch/s390/kvm/interrupt.c static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
vcpu              521 arch/s390/kvm/interrupt.c 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
vcpu              531 arch/s390/kvm/interrupt.c 	VCPU_EVENT(vcpu, 4, "deliver: pfault init token 0x%llx",
vcpu              533 arch/s390/kvm/interrupt.c 	trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
vcpu              537 arch/s390/kvm/interrupt.c 	rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE);
vcpu              538 arch/s390/kvm/interrupt.c 	rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
vcpu              539 arch/s390/kvm/interrupt.c 	rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
vcpu              540 arch/s390/kvm/interrupt.c 			     &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
vcpu              541 arch/s390/kvm/interrupt.c 	rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
vcpu              542 arch/s390/kvm/interrupt.c 			    &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
vcpu              543 arch/s390/kvm/interrupt.c 	rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2);
vcpu              547 arch/s390/kvm/interrupt.c static int __write_machine_check(struct kvm_vcpu *vcpu,
vcpu              559 arch/s390/kvm/interrupt.c 	save_access_regs(vcpu->run->s.regs.acrs);
vcpu              560 arch/s390/kvm/interrupt.c 	if (MACHINE_HAS_GS && vcpu->arch.gs_enabled)
vcpu              564 arch/s390/kvm/interrupt.c 	rc = read_guest_lc(vcpu, __LC_MCESAD, &ext_sa_addr,
vcpu              568 arch/s390/kvm/interrupt.c 	if (test_kvm_facility(vcpu->kvm, 133)) {
vcpu              588 arch/s390/kvm/interrupt.c 	if (!rc && mci.vr && ext_sa_addr && test_kvm_facility(vcpu->kvm, 129)) {
vcpu              589 arch/s390/kvm/interrupt.c 		if (write_guest_abs(vcpu, ext_sa_addr, vcpu->run->s.regs.vrs,
vcpu              595 arch/s390/kvm/interrupt.c 	if (!rc && mci.gs && ext_sa_addr && test_kvm_facility(vcpu->kvm, 133)
vcpu              597 arch/s390/kvm/interrupt.c 		if (write_guest_abs(vcpu, ext_sa_addr + 1024,
vcpu              598 arch/s390/kvm/interrupt.c 				    &vcpu->run->s.regs.gscb, 32))
vcpu              605 arch/s390/kvm/interrupt.c 	rc |= put_guest_lc(vcpu, 1, (u8 __user *) __LC_AR_MODE_ID);
vcpu              606 arch/s390/kvm/interrupt.c 	rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
vcpu              607 arch/s390/kvm/interrupt.c 			     &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
vcpu              608 arch/s390/kvm/interrupt.c 	rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
vcpu              609 arch/s390/kvm/interrupt.c 			    &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
vcpu              610 arch/s390/kvm/interrupt.c 	rc |= put_guest_lc(vcpu, mci.val, (u64 __user *) __LC_MCCK_CODE);
vcpu              614 arch/s390/kvm/interrupt.c 		convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
vcpu              615 arch/s390/kvm/interrupt.c 		rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA, fprs, 128);
vcpu              617 arch/s390/kvm/interrupt.c 		rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA,
vcpu              618 arch/s390/kvm/interrupt.c 				     vcpu->run->s.regs.fprs, 128);
vcpu              620 arch/s390/kvm/interrupt.c 	rc |= write_guest_lc(vcpu, __LC_GPREGS_SAVE_AREA,
vcpu              621 arch/s390/kvm/interrupt.c 			     vcpu->run->s.regs.gprs, 128);
vcpu              622 arch/s390/kvm/interrupt.c 	rc |= put_guest_lc(vcpu, current->thread.fpu.fpc,
vcpu              624 arch/s390/kvm/interrupt.c 	rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->todpr,
vcpu              626 arch/s390/kvm/interrupt.c 	rc |= put_guest_lc(vcpu, kvm_s390_get_cpu_timer(vcpu),
vcpu              628 arch/s390/kvm/interrupt.c 	rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->ckc >> 8,
vcpu              630 arch/s390/kvm/interrupt.c 	rc |= write_guest_lc(vcpu, __LC_AREGS_SAVE_AREA,
vcpu              631 arch/s390/kvm/interrupt.c 			     &vcpu->run->s.regs.acrs, 64);
vcpu              632 arch/s390/kvm/interrupt.c 	rc |= write_guest_lc(vcpu, __LC_CREGS_SAVE_AREA,
vcpu              633 arch/s390/kvm/interrupt.c 			     &vcpu->arch.sie_block->gcr, 128);
vcpu              636 arch/s390/kvm/interrupt.c 	rc |= put_guest_lc(vcpu, mchk->ext_damage_code,
vcpu              638 arch/s390/kvm/interrupt.c 	rc |= put_guest_lc(vcpu, mchk->failing_storage_address,
vcpu              640 arch/s390/kvm/interrupt.c 	rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, &mchk->fixed_logout,
vcpu              645 arch/s390/kvm/interrupt.c static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
vcpu              647 arch/s390/kvm/interrupt.c 	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
vcpu              648 arch/s390/kvm/interrupt.c 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
vcpu              685 arch/s390/kvm/interrupt.c 		VCPU_EVENT(vcpu, 3, "deliver: machine check mcic 0x%llx",
vcpu              687 arch/s390/kvm/interrupt.c 		trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
vcpu              690 arch/s390/kvm/interrupt.c 		vcpu->stat.deliver_machine_check++;
vcpu              691 arch/s390/kvm/interrupt.c 		rc = __write_machine_check(vcpu, &mchk);
vcpu              696 arch/s390/kvm/interrupt.c static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
vcpu              698 arch/s390/kvm/interrupt.c 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
vcpu              701 arch/s390/kvm/interrupt.c 	VCPU_EVENT(vcpu, 3, "%s", "deliver: cpu restart");
vcpu              702 arch/s390/kvm/interrupt.c 	vcpu->stat.deliver_restart_signal++;
vcpu              703 arch/s390/kvm/interrupt.c 	trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
vcpu              705 arch/s390/kvm/interrupt.c 	rc  = write_guest_lc(vcpu,
vcpu              707 arch/s390/kvm/interrupt.c 			     &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
vcpu              708 arch/s390/kvm/interrupt.c 	rc |= read_guest_lc(vcpu, offsetof(struct lowcore, restart_psw),
vcpu              709 arch/s390/kvm/interrupt.c 			    &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
vcpu              714 arch/s390/kvm/interrupt.c static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu)
vcpu              716 arch/s390/kvm/interrupt.c 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
vcpu              725 arch/s390/kvm/interrupt.c 	vcpu->stat.deliver_prefix_signal++;
vcpu              726 arch/s390/kvm/interrupt.c 	trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
vcpu              730 arch/s390/kvm/interrupt.c 	kvm_s390_set_prefix(vcpu, prefix.address);
vcpu              734 arch/s390/kvm/interrupt.c static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu)
vcpu              736 arch/s390/kvm/interrupt.c 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
vcpu              747 arch/s390/kvm/interrupt.c 	VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp emerg");
vcpu              748 arch/s390/kvm/interrupt.c 	vcpu->stat.deliver_emergency_signal++;
vcpu              749 arch/s390/kvm/interrupt.c 	trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
vcpu              752 arch/s390/kvm/interrupt.c 	rc  = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG,
vcpu              754 arch/s390/kvm/interrupt.c 	rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR);
vcpu              755 arch/s390/kvm/interrupt.c 	rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
vcpu              756 arch/s390/kvm/interrupt.c 			     &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
vcpu              757 arch/s390/kvm/interrupt.c 	rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
vcpu              758 arch/s390/kvm/interrupt.c 			    &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
vcpu              762 arch/s390/kvm/interrupt.c static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu)
vcpu              764 arch/s390/kvm/interrupt.c 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
vcpu              774 arch/s390/kvm/interrupt.c 	VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp ext call");
vcpu              775 arch/s390/kvm/interrupt.c 	vcpu->stat.deliver_external_call++;
vcpu              776 arch/s390/kvm/interrupt.c 	trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
vcpu              780 arch/s390/kvm/interrupt.c 	rc  = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL,
vcpu              782 arch/s390/kvm/interrupt.c 	rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR);
vcpu              783 arch/s390/kvm/interrupt.c 	rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
vcpu              784 arch/s390/kvm/interrupt.c 			     &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
vcpu              785 arch/s390/kvm/interrupt.c 	rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw,
vcpu              790 arch/s390/kvm/interrupt.c static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
vcpu              792 arch/s390/kvm/interrupt.c 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
vcpu              804 arch/s390/kvm/interrupt.c 	VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilen:%d",
vcpu              806 arch/s390/kvm/interrupt.c 	vcpu->stat.deliver_program++;
vcpu              807 arch/s390/kvm/interrupt.c 	trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
vcpu              823 arch/s390/kvm/interrupt.c 		rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
vcpu              832 arch/s390/kvm/interrupt.c 		rc = put_guest_lc(vcpu, pgm_info.exc_access_id,
vcpu              842 arch/s390/kvm/interrupt.c 		rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
vcpu              844 arch/s390/kvm/interrupt.c 		rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
vcpu              846 arch/s390/kvm/interrupt.c 		rc |= put_guest_lc(vcpu, pgm_info.op_access_id,
vcpu              851 arch/s390/kvm/interrupt.c 		rc = put_guest_lc(vcpu, pgm_info.mon_class_nr,
vcpu              853 arch/s390/kvm/interrupt.c 		rc |= put_guest_lc(vcpu, pgm_info.mon_code,
vcpu              858 arch/s390/kvm/interrupt.c 		rc = put_guest_lc(vcpu, pgm_info.data_exc_code,
vcpu              862 arch/s390/kvm/interrupt.c 		rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
vcpu              864 arch/s390/kvm/interrupt.c 		rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
vcpu              879 arch/s390/kvm/interrupt.c 		rc |= put_guest_lc(vcpu, pgm_info.per_code,
vcpu              881 arch/s390/kvm/interrupt.c 		rc |= put_guest_lc(vcpu, pgm_info.per_atmid,
vcpu              883 arch/s390/kvm/interrupt.c 		rc |= put_guest_lc(vcpu, pgm_info.per_address,
vcpu              885 arch/s390/kvm/interrupt.c 		rc |= put_guest_lc(vcpu, pgm_info.per_access_id,
vcpu              890 arch/s390/kvm/interrupt.c 		kvm_s390_rewind_psw(vcpu, ilen);
vcpu              893 arch/s390/kvm/interrupt.c 	rc |= put_guest_lc(vcpu, ilen, (u16 *) __LC_PGM_ILC);
vcpu              894 arch/s390/kvm/interrupt.c 	rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea,
vcpu              896 arch/s390/kvm/interrupt.c 	rc |= put_guest_lc(vcpu, pgm_info.code,
vcpu              898 arch/s390/kvm/interrupt.c 	rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
vcpu              899 arch/s390/kvm/interrupt.c 			     &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
vcpu              900 arch/s390/kvm/interrupt.c 	rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
vcpu              901 arch/s390/kvm/interrupt.c 			    &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
vcpu              905 arch/s390/kvm/interrupt.c static int __must_check __deliver_service(struct kvm_vcpu *vcpu)
vcpu              907 arch/s390/kvm/interrupt.c 	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
vcpu              921 arch/s390/kvm/interrupt.c 	VCPU_EVENT(vcpu, 4, "deliver: sclp parameter 0x%x",
vcpu              923 arch/s390/kvm/interrupt.c 	vcpu->stat.deliver_service_signal++;
vcpu              924 arch/s390/kvm/interrupt.c 	trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
vcpu              927 arch/s390/kvm/interrupt.c 	rc  = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
vcpu              928 arch/s390/kvm/interrupt.c 	rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
vcpu              929 arch/s390/kvm/interrupt.c 	rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
vcpu              930 arch/s390/kvm/interrupt.c 			     &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
vcpu              931 arch/s390/kvm/interrupt.c 	rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
vcpu              932 arch/s390/kvm/interrupt.c 			    &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
vcpu              933 arch/s390/kvm/interrupt.c 	rc |= put_guest_lc(vcpu, ext.ext_params,
vcpu              939 arch/s390/kvm/interrupt.c static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu)
vcpu              941 arch/s390/kvm/interrupt.c 	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
vcpu              958 arch/s390/kvm/interrupt.c 		trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
vcpu              961 arch/s390/kvm/interrupt.c 		VCPU_EVENT(vcpu, 4, "deliver: pfault done token 0x%llx",
vcpu              964 arch/s390/kvm/interrupt.c 		rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
vcpu              966 arch/s390/kvm/interrupt.c 		rc |= put_guest_lc(vcpu, PFAULT_DONE,
vcpu              968 arch/s390/kvm/interrupt.c 		rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
vcpu              969 arch/s390/kvm/interrupt.c 				&vcpu->arch.sie_block->gpsw,
vcpu              971 arch/s390/kvm/interrupt.c 		rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
vcpu              972 arch/s390/kvm/interrupt.c 				&vcpu->arch.sie_block->gpsw,
vcpu              974 arch/s390/kvm/interrupt.c 		rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
vcpu              981 arch/s390/kvm/interrupt.c static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu)
vcpu              983 arch/s390/kvm/interrupt.c 	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
vcpu              992 arch/s390/kvm/interrupt.c 		VCPU_EVENT(vcpu, 4,
vcpu              995 arch/s390/kvm/interrupt.c 		vcpu->stat.deliver_virtio++;
vcpu              996 arch/s390/kvm/interrupt.c 		trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
vcpu             1008 arch/s390/kvm/interrupt.c 		rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
vcpu             1010 arch/s390/kvm/interrupt.c 		rc |= put_guest_lc(vcpu, VIRTIO_PARAM,
vcpu             1012 arch/s390/kvm/interrupt.c 		rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
vcpu             1013 arch/s390/kvm/interrupt.c 				&vcpu->arch.sie_block->gpsw,
vcpu             1015 arch/s390/kvm/interrupt.c 		rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
vcpu             1016 arch/s390/kvm/interrupt.c 				&vcpu->arch.sie_block->gpsw,
vcpu             1018 arch/s390/kvm/interrupt.c 		rc |= put_guest_lc(vcpu, inti->ext.ext_params,
vcpu             1020 arch/s390/kvm/interrupt.c 		rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
vcpu             1027 arch/s390/kvm/interrupt.c static int __do_deliver_io(struct kvm_vcpu *vcpu, struct kvm_s390_io_info *io)
vcpu             1031 arch/s390/kvm/interrupt.c 	rc  = put_guest_lc(vcpu, io->subchannel_id, (u16 *)__LC_SUBCHANNEL_ID);
vcpu             1032 arch/s390/kvm/interrupt.c 	rc |= put_guest_lc(vcpu, io->subchannel_nr, (u16 *)__LC_SUBCHANNEL_NR);
vcpu             1033 arch/s390/kvm/interrupt.c 	rc |= put_guest_lc(vcpu, io->io_int_parm, (u32 *)__LC_IO_INT_PARM);
vcpu             1034 arch/s390/kvm/interrupt.c 	rc |= put_guest_lc(vcpu, io->io_int_word, (u32 *)__LC_IO_INT_WORD);
vcpu             1035 arch/s390/kvm/interrupt.c 	rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
vcpu             1036 arch/s390/kvm/interrupt.c 			     &vcpu->arch.sie_block->gpsw,
vcpu             1038 arch/s390/kvm/interrupt.c 	rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
vcpu             1039 arch/s390/kvm/interrupt.c 			    &vcpu->arch.sie_block->gpsw,
vcpu             1044 arch/s390/kvm/interrupt.c static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
vcpu             1049 arch/s390/kvm/interrupt.c 	struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
vcpu             1055 arch/s390/kvm/interrupt.c 	fi = &vcpu->kvm->arch.float_int;
vcpu             1065 arch/s390/kvm/interrupt.c 			VCPU_EVENT(vcpu, 4, "%s", "deliver: I/O (AI)");
vcpu             1067 arch/s390/kvm/interrupt.c 			VCPU_EVENT(vcpu, 4, "deliver: I/O %x ss %x schid %04x",
vcpu             1072 arch/s390/kvm/interrupt.c 		vcpu->stat.deliver_io++;
vcpu             1073 arch/s390/kvm/interrupt.c 		trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
vcpu             1087 arch/s390/kvm/interrupt.c 		rc = __do_deliver_io(vcpu, &(inti->io));
vcpu             1097 arch/s390/kvm/interrupt.c 		VCPU_EVENT(vcpu, 4, "%s isc %u", "deliver: I/O (AI/gisa)", isc);
vcpu             1100 arch/s390/kvm/interrupt.c 		vcpu->stat.deliver_io++;
vcpu             1101 arch/s390/kvm/interrupt.c 		trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
vcpu             1107 arch/s390/kvm/interrupt.c 		rc = __do_deliver_io(vcpu, &io);
vcpu             1114 arch/s390/kvm/interrupt.c int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
vcpu             1116 arch/s390/kvm/interrupt.c 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
vcpu             1121 arch/s390/kvm/interrupt.c 	return sca_ext_call_pending(vcpu, NULL);
vcpu             1124 arch/s390/kvm/interrupt.c int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
vcpu             1126 arch/s390/kvm/interrupt.c 	if (deliverable_irqs(vcpu))
vcpu             1129 arch/s390/kvm/interrupt.c 	if (kvm_cpu_has_pending_timer(vcpu))
vcpu             1133 arch/s390/kvm/interrupt.c 	if (kvm_s390_ext_call_pending(vcpu) &&
vcpu             1134 arch/s390/kvm/interrupt.c 	    !psw_extint_disabled(vcpu) &&
vcpu             1135 arch/s390/kvm/interrupt.c 	    (vcpu->arch.sie_block->gcr[0] & CR0_EXTERNAL_CALL_SUBMASK))
vcpu             1138 arch/s390/kvm/interrupt.c 	if (!exclude_stop && kvm_s390_is_stop_irq_pending(vcpu))
vcpu             1143 arch/s390/kvm/interrupt.c int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
vcpu             1145 arch/s390/kvm/interrupt.c 	return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu);
vcpu             1148 arch/s390/kvm/interrupt.c static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
vcpu             1150 arch/s390/kvm/interrupt.c 	const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
vcpu             1151 arch/s390/kvm/interrupt.c 	const u64 ckc = vcpu->arch.sie_block->ckc;
vcpu             1154 arch/s390/kvm/interrupt.c 	if (ckc_interrupts_enabled(vcpu)) {
vcpu             1155 arch/s390/kvm/interrupt.c 		if (vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SIGN) {
vcpu             1164 arch/s390/kvm/interrupt.c 		if (cpu_timer_interrupts_enabled(vcpu)) {
vcpu             1165 arch/s390/kvm/interrupt.c 			cputm = kvm_s390_get_cpu_timer(vcpu);
vcpu             1171 arch/s390/kvm/interrupt.c 	} else if (cpu_timer_interrupts_enabled(vcpu)) {
vcpu             1172 arch/s390/kvm/interrupt.c 		sltime = kvm_s390_get_cpu_timer(vcpu);
vcpu             1180 arch/s390/kvm/interrupt.c int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
vcpu             1182 arch/s390/kvm/interrupt.c 	struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
vcpu             1185 arch/s390/kvm/interrupt.c 	vcpu->stat.exit_wait_state++;
vcpu             1188 arch/s390/kvm/interrupt.c 	if (kvm_arch_vcpu_runnable(vcpu))
vcpu             1191 arch/s390/kvm/interrupt.c 	if (psw_interrupts_disabled(vcpu)) {
vcpu             1192 arch/s390/kvm/interrupt.c 		VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
vcpu             1198 arch/s390/kvm/interrupt.c 	     vcpu->arch.sie_block->gcr[6] >> 24))
vcpu             1201 arch/s390/kvm/interrupt.c 	if (!ckc_interrupts_enabled(vcpu) &&
vcpu             1202 arch/s390/kvm/interrupt.c 	    !cpu_timer_interrupts_enabled(vcpu)) {
vcpu             1203 arch/s390/kvm/interrupt.c 		VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
vcpu             1204 arch/s390/kvm/interrupt.c 		__set_cpu_idle(vcpu);
vcpu             1208 arch/s390/kvm/interrupt.c 	sltime = __calculate_sltime(vcpu);
vcpu             1212 arch/s390/kvm/interrupt.c 	__set_cpu_idle(vcpu);
vcpu             1213 arch/s390/kvm/interrupt.c 	hrtimer_start(&vcpu->arch.ckc_timer, sltime, HRTIMER_MODE_REL);
vcpu             1214 arch/s390/kvm/interrupt.c 	VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime);
vcpu             1216 arch/s390/kvm/interrupt.c 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
vcpu             1217 arch/s390/kvm/interrupt.c 	kvm_vcpu_block(vcpu);
vcpu             1218 arch/s390/kvm/interrupt.c 	__unset_cpu_idle(vcpu);
vcpu             1219 arch/s390/kvm/interrupt.c 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
vcpu             1221 arch/s390/kvm/interrupt.c 	hrtimer_cancel(&vcpu->arch.ckc_timer);
vcpu             1225 arch/s390/kvm/interrupt.c void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
vcpu             1227 arch/s390/kvm/interrupt.c 	vcpu->valid_wakeup = true;
vcpu             1228 arch/s390/kvm/interrupt.c 	kvm_vcpu_wake_up(vcpu);
vcpu             1234 arch/s390/kvm/interrupt.c 	kvm_s390_vsie_kick(vcpu);
vcpu             1239 arch/s390/kvm/interrupt.c 	struct kvm_vcpu *vcpu;
vcpu             1242 arch/s390/kvm/interrupt.c 	vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
vcpu             1243 arch/s390/kvm/interrupt.c 	sltime = __calculate_sltime(vcpu);
vcpu             1251 arch/s390/kvm/interrupt.c 	kvm_s390_vcpu_wakeup(vcpu);
vcpu             1255 arch/s390/kvm/interrupt.c void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
vcpu             1257 arch/s390/kvm/interrupt.c 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
vcpu             1265 arch/s390/kvm/interrupt.c 	sca_clear_ext_call(vcpu);
vcpu             1268 arch/s390/kvm/interrupt.c int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
vcpu             1270 arch/s390/kvm/interrupt.c 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
vcpu             1275 arch/s390/kvm/interrupt.c 	__reset_intercept_indicators(vcpu);
vcpu             1279 arch/s390/kvm/interrupt.c 	if (ckc_irq_pending(vcpu))
vcpu             1284 arch/s390/kvm/interrupt.c 	if (cpu_timer_irq_pending(vcpu))
vcpu             1287 arch/s390/kvm/interrupt.c 	while ((irqs = deliverable_irqs(vcpu)) && !rc) {
vcpu             1299 arch/s390/kvm/interrupt.c 			rc = __deliver_io(vcpu, irq_type);
vcpu             1303 arch/s390/kvm/interrupt.c 			rc = __deliver_machine_check(vcpu);
vcpu             1306 arch/s390/kvm/interrupt.c 			rc = __deliver_prog(vcpu);
vcpu             1309 arch/s390/kvm/interrupt.c 			rc = __deliver_emergency_signal(vcpu);
vcpu             1312 arch/s390/kvm/interrupt.c 			rc = __deliver_external_call(vcpu);
vcpu             1315 arch/s390/kvm/interrupt.c 			rc = __deliver_ckc(vcpu);
vcpu             1318 arch/s390/kvm/interrupt.c 			rc = __deliver_cpu_timer(vcpu);
vcpu             1321 arch/s390/kvm/interrupt.c 			rc = __deliver_restart(vcpu);
vcpu             1324 arch/s390/kvm/interrupt.c 			rc = __deliver_set_prefix(vcpu);
vcpu             1327 arch/s390/kvm/interrupt.c 			rc = __deliver_pfault_init(vcpu);
vcpu             1330 arch/s390/kvm/interrupt.c 			rc = __deliver_service(vcpu);
vcpu             1333 arch/s390/kvm/interrupt.c 			rc = __deliver_pfault_done(vcpu);
vcpu             1336 arch/s390/kvm/interrupt.c 			rc = __deliver_virtio(vcpu);
vcpu             1344 arch/s390/kvm/interrupt.c 	set_intercept_indicators(vcpu);
vcpu             1349 arch/s390/kvm/interrupt.c static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
vcpu             1351 arch/s390/kvm/interrupt.c 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
vcpu             1353 arch/s390/kvm/interrupt.c 	vcpu->stat.inject_program++;
vcpu             1354 arch/s390/kvm/interrupt.c 	VCPU_EVENT(vcpu, 3, "inject: program irq code 0x%x", irq->u.pgm.code);
vcpu             1355 arch/s390/kvm/interrupt.c 	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
vcpu             1361 arch/s390/kvm/interrupt.c 		irq->u.pgm.flags |= kvm_s390_get_ilen(vcpu);
vcpu             1391 arch/s390/kvm/interrupt.c static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
vcpu             1393 arch/s390/kvm/interrupt.c 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
vcpu             1395 arch/s390/kvm/interrupt.c 	vcpu->stat.inject_pfault_init++;
vcpu             1396 arch/s390/kvm/interrupt.c 	VCPU_EVENT(vcpu, 4, "inject: pfault init parameter block at 0x%llx",
vcpu             1398 arch/s390/kvm/interrupt.c 	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT,
vcpu             1404 arch/s390/kvm/interrupt.c 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
vcpu             1408 arch/s390/kvm/interrupt.c static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
vcpu             1410 arch/s390/kvm/interrupt.c 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
vcpu             1414 arch/s390/kvm/interrupt.c 	vcpu->stat.inject_external_call++;
vcpu             1415 arch/s390/kvm/interrupt.c 	VCPU_EVENT(vcpu, 4, "inject: external call source-cpu:%u",
vcpu             1417 arch/s390/kvm/interrupt.c 	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
vcpu             1421 arch/s390/kvm/interrupt.c 	if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL)
vcpu             1425 arch/s390/kvm/interrupt.c 		return sca_inject_ext_call(vcpu, src_id);
vcpu             1430 arch/s390/kvm/interrupt.c 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
vcpu             1434 arch/s390/kvm/interrupt.c static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
vcpu             1436 arch/s390/kvm/interrupt.c 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
vcpu             1439 arch/s390/kvm/interrupt.c 	vcpu->stat.inject_set_prefix++;
vcpu             1440 arch/s390/kvm/interrupt.c 	VCPU_EVENT(vcpu, 3, "inject: set prefix to %x",
vcpu             1442 arch/s390/kvm/interrupt.c 	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
vcpu             1445 arch/s390/kvm/interrupt.c 	if (!is_vcpu_stopped(vcpu))
vcpu             1454 arch/s390/kvm/interrupt.c static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
vcpu             1456 arch/s390/kvm/interrupt.c 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
vcpu             1460 arch/s390/kvm/interrupt.c 	vcpu->stat.inject_stop_signal++;
vcpu             1461 arch/s390/kvm/interrupt.c 	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0);
vcpu             1466 arch/s390/kvm/interrupt.c 	if (is_vcpu_stopped(vcpu)) {
vcpu             1468 arch/s390/kvm/interrupt.c 			rc = kvm_s390_store_status_unloaded(vcpu,
vcpu             1476 arch/s390/kvm/interrupt.c 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
vcpu             1480 arch/s390/kvm/interrupt.c static int __inject_sigp_restart(struct kvm_vcpu *vcpu,
vcpu             1483 arch/s390/kvm/interrupt.c 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
vcpu             1485 arch/s390/kvm/interrupt.c 	vcpu->stat.inject_restart++;
vcpu             1486 arch/s390/kvm/interrupt.c 	VCPU_EVENT(vcpu, 3, "%s", "inject: restart int");
vcpu             1487 arch/s390/kvm/interrupt.c 	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
vcpu             1493 arch/s390/kvm/interrupt.c static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
vcpu             1496 arch/s390/kvm/interrupt.c 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
vcpu             1498 arch/s390/kvm/interrupt.c 	vcpu->stat.inject_emergency_signal++;
vcpu             1499 arch/s390/kvm/interrupt.c 	VCPU_EVENT(vcpu, 4, "inject: emergency from cpu %u",
vcpu             1501 arch/s390/kvm/interrupt.c 	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
vcpu             1505 arch/s390/kvm/interrupt.c 	if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL)
vcpu             1510 arch/s390/kvm/interrupt.c 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
vcpu             1514 arch/s390/kvm/interrupt.c static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
vcpu             1516 arch/s390/kvm/interrupt.c 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
vcpu             1519 arch/s390/kvm/interrupt.c 	vcpu->stat.inject_mchk++;
vcpu             1520 arch/s390/kvm/interrupt.c 	VCPU_EVENT(vcpu, 3, "inject: machine check mcic 0x%llx",
vcpu             1522 arch/s390/kvm/interrupt.c 	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
vcpu             1546 arch/s390/kvm/interrupt.c static int __inject_ckc(struct kvm_vcpu *vcpu)
vcpu             1548 arch/s390/kvm/interrupt.c 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
vcpu             1550 arch/s390/kvm/interrupt.c 	vcpu->stat.inject_ckc++;
vcpu             1551 arch/s390/kvm/interrupt.c 	VCPU_EVENT(vcpu, 3, "%s", "inject: clock comparator external");
vcpu             1552 arch/s390/kvm/interrupt.c 	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
vcpu             1556 arch/s390/kvm/interrupt.c 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
vcpu             1560 arch/s390/kvm/interrupt.c static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
vcpu             1562 arch/s390/kvm/interrupt.c 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
vcpu             1564 arch/s390/kvm/interrupt.c 	vcpu->stat.inject_cputm++;
vcpu             1565 arch/s390/kvm/interrupt.c 	VCPU_EVENT(vcpu, 3, "%s", "inject: cpu timer external");
vcpu             1566 arch/s390/kvm/interrupt.c 	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
vcpu             1570 arch/s390/kvm/interrupt.c 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
vcpu             1978 arch/s390/kvm/interrupt.c int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu)
vcpu             1980 arch/s390/kvm/interrupt.c 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
vcpu             1985 arch/s390/kvm/interrupt.c void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
vcpu             1987 arch/s390/kvm/interrupt.c 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
vcpu             1995 arch/s390/kvm/interrupt.c static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
vcpu             2001 arch/s390/kvm/interrupt.c 		rc = __inject_prog(vcpu, irq);
vcpu             2004 arch/s390/kvm/interrupt.c 		rc = __inject_set_prefix(vcpu, irq);
vcpu             2007 arch/s390/kvm/interrupt.c 		rc = __inject_sigp_stop(vcpu, irq);
vcpu             2010 arch/s390/kvm/interrupt.c 		rc = __inject_sigp_restart(vcpu, irq);
vcpu             2013 arch/s390/kvm/interrupt.c 		rc = __inject_ckc(vcpu);
vcpu             2016 arch/s390/kvm/interrupt.c 		rc = __inject_cpu_timer(vcpu);
vcpu             2019 arch/s390/kvm/interrupt.c 		rc = __inject_extcall(vcpu, irq);
vcpu             2022 arch/s390/kvm/interrupt.c 		rc = __inject_sigp_emergency(vcpu, irq);
vcpu             2025 arch/s390/kvm/interrupt.c 		rc = __inject_mchk(vcpu, irq);
vcpu             2028 arch/s390/kvm/interrupt.c 		rc = __inject_pfault_init(vcpu, irq);
vcpu             2040 arch/s390/kvm/interrupt.c int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
vcpu             2042 arch/s390/kvm/interrupt.c 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
vcpu             2046 arch/s390/kvm/interrupt.c 	rc = do_inject_vcpu(vcpu, irq);
vcpu             2049 arch/s390/kvm/interrupt.c 		kvm_s390_vcpu_wakeup(vcpu);
vcpu             2600 arch/s390/kvm/interrupt.c 	struct kvm_vcpu *vcpu;
vcpu             2620 arch/s390/kvm/interrupt.c 		kvm_for_each_vcpu(i, vcpu, dev->kvm)
vcpu             2621 arch/s390/kvm/interrupt.c 			kvm_clear_async_pf_completion_queue(vcpu);
vcpu             2783 arch/s390/kvm/interrupt.c void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu,
vcpu             2809 arch/s390/kvm/interrupt.c 		rc = __inject_vm(vcpu->kvm, &inti);
vcpu             2813 arch/s390/kvm/interrupt.c 		rc = kvm_s390_inject_vcpu(vcpu, &irq);
vcpu             2847 arch/s390/kvm/interrupt.c int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, void __user *irqstate, int len)
vcpu             2849 arch/s390/kvm/interrupt.c 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
vcpu             2874 arch/s390/kvm/interrupt.c 		r = do_inject_vcpu(vcpu, &buf[n]);
vcpu             2929 arch/s390/kvm/interrupt.c int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
vcpu             2933 arch/s390/kvm/interrupt.c 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
vcpu             2952 arch/s390/kvm/interrupt.c 		store_local_irq(&vcpu->arch.local_int, &irq, irq_type);
vcpu             2971 arch/s390/kvm/interrupt.c 	if (sca_ext_call_pending(vcpu, &scn)) {
vcpu             2989 arch/s390/kvm/interrupt.c 	struct kvm_vcpu *vcpu;
vcpu             2992 arch/s390/kvm/interrupt.c 		vcpu = kvm_get_vcpu(kvm, vcpu_id);
vcpu             2993 arch/s390/kvm/interrupt.c 		if (psw_ioint_disabled(vcpu))
vcpu             2995 arch/s390/kvm/interrupt.c 		deliverable_mask &= (u8)(vcpu->arch.sie_block->gcr[6] >> 24);
vcpu             3000 arch/s390/kvm/interrupt.c 			kvm_s390_vcpu_wakeup(vcpu);
vcpu              270 arch/s390/kvm/kvm-s390.c 	struct kvm_vcpu *vcpu;
vcpu              275 arch/s390/kvm/kvm-s390.c 		kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu              276 arch/s390/kvm/kvm-s390.c 			kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
vcpu              278 arch/s390/kvm/kvm-s390.c 				kvm->arch.epoch = vcpu->arch.sie_block->epoch;
vcpu              279 arch/s390/kvm/kvm-s390.c 				kvm->arch.epdx = vcpu->arch.sie_block->epdx;
vcpu              281 arch/s390/kvm/kvm-s390.c 			if (vcpu->arch.cputm_enabled)
vcpu              282 arch/s390/kvm/kvm-s390.c 				vcpu->arch.cputm_start += *delta;
vcpu              283 arch/s390/kvm/kvm-s390.c 			if (vcpu->arch.vsie_block)
vcpu              284 arch/s390/kvm/kvm-s390.c 				kvm_clock_sync_scb(vcpu->arch.vsie_block,
vcpu              607 arch/s390/kvm/kvm-s390.c static void sca_del_vcpu(struct kvm_vcpu *vcpu);
vcpu              655 arch/s390/kvm/kvm-s390.c 	struct kvm_vcpu *vcpu;
vcpu              657 arch/s390/kvm/kvm-s390.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu              658 arch/s390/kvm/kvm-s390.c 		kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
vcpu              899 arch/s390/kvm/kvm-s390.c static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
vcpu              903 arch/s390/kvm/kvm-s390.c 	struct kvm_vcpu *vcpu;
vcpu              908 arch/s390/kvm/kvm-s390.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu              909 arch/s390/kvm/kvm-s390.c 		kvm_s390_vcpu_crypto_setup(vcpu);
vcpu              911 arch/s390/kvm/kvm-s390.c 		kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
vcpu              990 arch/s390/kvm/kvm-s390.c 	struct kvm_vcpu *vcpu;
vcpu              992 arch/s390/kvm/kvm-s390.c 	kvm_for_each_vcpu(cx, vcpu, kvm)
vcpu              993 arch/s390/kvm/kvm-s390.c 		kvm_s390_sync_request(req, vcpu);
vcpu             2524 arch/s390/kvm/kvm-s390.c void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
vcpu             2526 arch/s390/kvm/kvm-s390.c 	VCPU_EVENT(vcpu, 3, "%s", "free cpu");
vcpu             2527 arch/s390/kvm/kvm-s390.c 	trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
vcpu             2528 arch/s390/kvm/kvm-s390.c 	kvm_s390_clear_local_irqs(vcpu);
vcpu             2529 arch/s390/kvm/kvm-s390.c 	kvm_clear_async_pf_completion_queue(vcpu);
vcpu             2530 arch/s390/kvm/kvm-s390.c 	if (!kvm_is_ucontrol(vcpu->kvm))
vcpu             2531 arch/s390/kvm/kvm-s390.c 		sca_del_vcpu(vcpu);
vcpu             2533 arch/s390/kvm/kvm-s390.c 	if (kvm_is_ucontrol(vcpu->kvm))
vcpu             2534 arch/s390/kvm/kvm-s390.c 		gmap_remove(vcpu->arch.gmap);
vcpu             2536 arch/s390/kvm/kvm-s390.c 	if (vcpu->kvm->arch.use_cmma)
vcpu             2537 arch/s390/kvm/kvm-s390.c 		kvm_s390_vcpu_unsetup_cmma(vcpu);
vcpu             2538 arch/s390/kvm/kvm-s390.c 	free_page((unsigned long)(vcpu->arch.sie_block));
vcpu             2540 arch/s390/kvm/kvm-s390.c 	kvm_vcpu_uninit(vcpu);
vcpu             2541 arch/s390/kvm/kvm-s390.c 	kmem_cache_free(kvm_vcpu_cache, vcpu);
vcpu             2547 arch/s390/kvm/kvm-s390.c 	struct kvm_vcpu *vcpu;
vcpu             2549 arch/s390/kvm/kvm-s390.c 	kvm_for_each_vcpu(i, vcpu, kvm)
vcpu             2550 arch/s390/kvm/kvm-s390.c 		kvm_arch_vcpu_destroy(vcpu);
vcpu             2576 arch/s390/kvm/kvm-s390.c static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
vcpu             2578 arch/s390/kvm/kvm-s390.c 	vcpu->arch.gmap = gmap_create(current->mm, -1UL);
vcpu             2579 arch/s390/kvm/kvm-s390.c 	if (!vcpu->arch.gmap)
vcpu             2581 arch/s390/kvm/kvm-s390.c 	vcpu->arch.gmap->private = vcpu->kvm;
vcpu             2586 arch/s390/kvm/kvm-s390.c static void sca_del_vcpu(struct kvm_vcpu *vcpu)
vcpu             2590 arch/s390/kvm/kvm-s390.c 	read_lock(&vcpu->kvm->arch.sca_lock);
vcpu             2591 arch/s390/kvm/kvm-s390.c 	if (vcpu->kvm->arch.use_esca) {
vcpu             2592 arch/s390/kvm/kvm-s390.c 		struct esca_block *sca = vcpu->kvm->arch.sca;
vcpu             2594 arch/s390/kvm/kvm-s390.c 		clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
vcpu             2595 arch/s390/kvm/kvm-s390.c 		sca->cpu[vcpu->vcpu_id].sda = 0;
vcpu             2597 arch/s390/kvm/kvm-s390.c 		struct bsca_block *sca = vcpu->kvm->arch.sca;
vcpu             2599 arch/s390/kvm/kvm-s390.c 		clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
vcpu             2600 arch/s390/kvm/kvm-s390.c 		sca->cpu[vcpu->vcpu_id].sda = 0;
vcpu             2602 arch/s390/kvm/kvm-s390.c 	read_unlock(&vcpu->kvm->arch.sca_lock);
vcpu             2605 arch/s390/kvm/kvm-s390.c static void sca_add_vcpu(struct kvm_vcpu *vcpu)
vcpu             2608 arch/s390/kvm/kvm-s390.c 		struct bsca_block *sca = vcpu->kvm->arch.sca;
vcpu             2611 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
vcpu             2612 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
vcpu             2615 arch/s390/kvm/kvm-s390.c 	read_lock(&vcpu->kvm->arch.sca_lock);
vcpu             2616 arch/s390/kvm/kvm-s390.c 	if (vcpu->kvm->arch.use_esca) {
vcpu             2617 arch/s390/kvm/kvm-s390.c 		struct esca_block *sca = vcpu->kvm->arch.sca;
vcpu             2619 arch/s390/kvm/kvm-s390.c 		sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
vcpu             2620 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
vcpu             2621 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
vcpu             2622 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
vcpu             2623 arch/s390/kvm/kvm-s390.c 		set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
vcpu             2625 arch/s390/kvm/kvm-s390.c 		struct bsca_block *sca = vcpu->kvm->arch.sca;
vcpu             2627 arch/s390/kvm/kvm-s390.c 		sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
vcpu             2628 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
vcpu             2629 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
vcpu             2630 arch/s390/kvm/kvm-s390.c 		set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
vcpu             2632 arch/s390/kvm/kvm-s390.c 	read_unlock(&vcpu->kvm->arch.sca_lock);
vcpu             2657 arch/s390/kvm/kvm-s390.c 	struct kvm_vcpu *vcpu;
vcpu             2673 arch/s390/kvm/kvm-s390.c 	kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
vcpu             2674 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->scaoh = scaoh;
vcpu             2675 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->scaol = scaol;
vcpu             2676 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
vcpu             2712 arch/s390/kvm/kvm-s390.c int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
vcpu             2714 arch/s390/kvm/kvm-s390.c 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
vcpu             2715 arch/s390/kvm/kvm-s390.c 	kvm_clear_async_pf_completion_queue(vcpu);
vcpu             2716 arch/s390/kvm/kvm-s390.c 	vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
vcpu             2722 arch/s390/kvm/kvm-s390.c 	kvm_s390_set_prefix(vcpu, 0);
vcpu             2723 arch/s390/kvm/kvm-s390.c 	if (test_kvm_facility(vcpu->kvm, 64))
vcpu             2724 arch/s390/kvm/kvm-s390.c 		vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
vcpu             2725 arch/s390/kvm/kvm-s390.c 	if (test_kvm_facility(vcpu->kvm, 82))
vcpu             2726 arch/s390/kvm/kvm-s390.c 		vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
vcpu             2727 arch/s390/kvm/kvm-s390.c 	if (test_kvm_facility(vcpu->kvm, 133))
vcpu             2728 arch/s390/kvm/kvm-s390.c 		vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
vcpu             2729 arch/s390/kvm/kvm-s390.c 	if (test_kvm_facility(vcpu->kvm, 156))
vcpu             2730 arch/s390/kvm/kvm-s390.c 		vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
vcpu             2735 arch/s390/kvm/kvm-s390.c 		vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
vcpu             2737 arch/s390/kvm/kvm-s390.c 		vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
vcpu             2739 arch/s390/kvm/kvm-s390.c 	if (kvm_is_ucontrol(vcpu->kvm))
vcpu             2740 arch/s390/kvm/kvm-s390.c 		return __kvm_ucontrol_vcpu_init(vcpu);
vcpu             2746 arch/s390/kvm/kvm-s390.c static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
vcpu             2748 arch/s390/kvm/kvm-s390.c 	WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
vcpu             2749 arch/s390/kvm/kvm-s390.c 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
vcpu             2750 arch/s390/kvm/kvm-s390.c 	vcpu->arch.cputm_start = get_tod_clock_fast();
vcpu             2751 arch/s390/kvm/kvm-s390.c 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
vcpu             2755 arch/s390/kvm/kvm-s390.c static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
vcpu             2757 arch/s390/kvm/kvm-s390.c 	WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
vcpu             2758 arch/s390/kvm/kvm-s390.c 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
vcpu             2759 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
vcpu             2760 arch/s390/kvm/kvm-s390.c 	vcpu->arch.cputm_start = 0;
vcpu             2761 arch/s390/kvm/kvm-s390.c 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
vcpu             2765 arch/s390/kvm/kvm-s390.c static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
vcpu             2767 arch/s390/kvm/kvm-s390.c 	WARN_ON_ONCE(vcpu->arch.cputm_enabled);
vcpu             2768 arch/s390/kvm/kvm-s390.c 	vcpu->arch.cputm_enabled = true;
vcpu             2769 arch/s390/kvm/kvm-s390.c 	__start_cpu_timer_accounting(vcpu);
vcpu             2773 arch/s390/kvm/kvm-s390.c static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
vcpu             2775 arch/s390/kvm/kvm-s390.c 	WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
vcpu             2776 arch/s390/kvm/kvm-s390.c 	__stop_cpu_timer_accounting(vcpu);
vcpu             2777 arch/s390/kvm/kvm-s390.c 	vcpu->arch.cputm_enabled = false;
vcpu             2780 arch/s390/kvm/kvm-s390.c static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
vcpu             2783 arch/s390/kvm/kvm-s390.c 	__enable_cpu_timer_accounting(vcpu);
vcpu             2787 arch/s390/kvm/kvm-s390.c static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
vcpu             2790 arch/s390/kvm/kvm-s390.c 	__disable_cpu_timer_accounting(vcpu);
vcpu             2795 arch/s390/kvm/kvm-s390.c void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
vcpu             2798 arch/s390/kvm/kvm-s390.c 	raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
vcpu             2799 arch/s390/kvm/kvm-s390.c 	if (vcpu->arch.cputm_enabled)
vcpu             2800 arch/s390/kvm/kvm-s390.c 		vcpu->arch.cputm_start = get_tod_clock_fast();
vcpu             2801 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block->cputm = cputm;
vcpu             2802 arch/s390/kvm/kvm-s390.c 	raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
vcpu             2807 arch/s390/kvm/kvm-s390.c __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
vcpu             2812 arch/s390/kvm/kvm-s390.c 	if (unlikely(!vcpu->arch.cputm_enabled))
vcpu             2813 arch/s390/kvm/kvm-s390.c 		return vcpu->arch.sie_block->cputm;
vcpu             2817 arch/s390/kvm/kvm-s390.c 		seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
vcpu             2822 arch/s390/kvm/kvm-s390.c 		WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
vcpu             2823 arch/s390/kvm/kvm-s390.c 		value = vcpu->arch.sie_block->cputm;
vcpu             2825 arch/s390/kvm/kvm-s390.c 		if (likely(vcpu->arch.cputm_start))
vcpu             2826 arch/s390/kvm/kvm-s390.c 			value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
vcpu             2827 arch/s390/kvm/kvm-s390.c 	} while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
vcpu             2832 arch/s390/kvm/kvm-s390.c void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu             2835 arch/s390/kvm/kvm-s390.c 	gmap_enable(vcpu->arch.enabled_gmap);
vcpu             2836 arch/s390/kvm/kvm-s390.c 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
vcpu             2837 arch/s390/kvm/kvm-s390.c 	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
vcpu             2838 arch/s390/kvm/kvm-s390.c 		__start_cpu_timer_accounting(vcpu);
vcpu             2839 arch/s390/kvm/kvm-s390.c 	vcpu->cpu = cpu;
vcpu             2842 arch/s390/kvm/kvm-s390.c void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
vcpu             2844 arch/s390/kvm/kvm-s390.c 	vcpu->cpu = -1;
vcpu             2845 arch/s390/kvm/kvm-s390.c 	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
vcpu             2846 arch/s390/kvm/kvm-s390.c 		__stop_cpu_timer_accounting(vcpu);
vcpu             2847 arch/s390/kvm/kvm-s390.c 	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
vcpu             2848 arch/s390/kvm/kvm-s390.c 	vcpu->arch.enabled_gmap = gmap_get_enabled();
vcpu             2849 arch/s390/kvm/kvm-s390.c 	gmap_disable(vcpu->arch.enabled_gmap);
vcpu             2853 arch/s390/kvm/kvm-s390.c static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
vcpu             2856 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block->gpsw.mask = 0UL;
vcpu             2857 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block->gpsw.addr = 0UL;
vcpu             2858 arch/s390/kvm/kvm-s390.c 	kvm_s390_set_prefix(vcpu, 0);
vcpu             2859 arch/s390/kvm/kvm-s390.c 	kvm_s390_set_cpu_timer(vcpu, 0);
vcpu             2860 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block->ckc       = 0UL;
vcpu             2861 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block->todpr     = 0;
vcpu             2862 arch/s390/kvm/kvm-s390.c 	memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
vcpu             2863 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block->gcr[0]  = CR0_UNUSED_56 |
vcpu             2866 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block->gcr[14] = CR14_UNUSED_32 |
vcpu             2869 arch/s390/kvm/kvm-s390.c 	vcpu->run->s.regs.fpc = 0;
vcpu             2870 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block->gbea = 1;
vcpu             2871 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block->pp = 0;
vcpu             2872 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
vcpu             2873 arch/s390/kvm/kvm-s390.c 	vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
vcpu             2874 arch/s390/kvm/kvm-s390.c 	kvm_clear_async_pf_completion_queue(vcpu);
vcpu             2875 arch/s390/kvm/kvm-s390.c 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
vcpu             2876 arch/s390/kvm/kvm-s390.c 		kvm_s390_vcpu_stop(vcpu);
vcpu             2877 arch/s390/kvm/kvm-s390.c 	kvm_s390_clear_local_irqs(vcpu);
vcpu             2880 arch/s390/kvm/kvm-s390.c void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
vcpu             2882 arch/s390/kvm/kvm-s390.c 	mutex_lock(&vcpu->kvm->lock);
vcpu             2884 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
vcpu             2885 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
vcpu             2887 arch/s390/kvm/kvm-s390.c 	mutex_unlock(&vcpu->kvm->lock);
vcpu             2888 arch/s390/kvm/kvm-s390.c 	if (!kvm_is_ucontrol(vcpu->kvm)) {
vcpu             2889 arch/s390/kvm/kvm-s390.c 		vcpu->arch.gmap = vcpu->kvm->arch.gmap;
vcpu             2890 arch/s390/kvm/kvm-s390.c 		sca_add_vcpu(vcpu);
vcpu             2892 arch/s390/kvm/kvm-s390.c 	if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
vcpu             2893 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
vcpu             2895 arch/s390/kvm/kvm-s390.c 	vcpu->arch.enabled_gmap = vcpu->arch.gmap;
vcpu             2917 arch/s390/kvm/kvm-s390.c static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
vcpu             2923 arch/s390/kvm/kvm-s390.c 	if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
vcpu             2926 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
vcpu             2927 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
vcpu             2928 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block->eca &= ~ECA_APIE;
vcpu             2929 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block->ecd &= ~ECD_ECC;
vcpu             2931 arch/s390/kvm/kvm-s390.c 	if (vcpu->kvm->arch.crypto.apie)
vcpu             2932 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->eca |= ECA_APIE;
vcpu             2935 arch/s390/kvm/kvm-s390.c 	if (vcpu->kvm->arch.crypto.aes_kw) {
vcpu             2936 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->ecb3 |= ECB3_AES;
vcpu             2938 arch/s390/kvm/kvm-s390.c 		if (kvm_has_pckmo_ecc(vcpu->kvm))
vcpu             2939 arch/s390/kvm/kvm-s390.c 			vcpu->arch.sie_block->ecd |= ECD_ECC;
vcpu             2942 arch/s390/kvm/kvm-s390.c 	if (vcpu->kvm->arch.crypto.dea_kw)
vcpu             2943 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
vcpu             2946 arch/s390/kvm/kvm-s390.c void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
vcpu             2948 arch/s390/kvm/kvm-s390.c 	free_page(vcpu->arch.sie_block->cbrlo);
vcpu             2949 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block->cbrlo = 0;
vcpu             2952 arch/s390/kvm/kvm-s390.c int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
vcpu             2954 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
vcpu             2955 arch/s390/kvm/kvm-s390.c 	if (!vcpu->arch.sie_block->cbrlo)
vcpu             2960 arch/s390/kvm/kvm-s390.c static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
vcpu             2962 arch/s390/kvm/kvm-s390.c 	struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
vcpu             2964 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block->ibc = model->ibc;
vcpu             2965 arch/s390/kvm/kvm-s390.c 	if (test_kvm_facility(vcpu->kvm, 7))
vcpu             2966 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
vcpu             2969 arch/s390/kvm/kvm-s390.c int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu             2973 arch/s390/kvm/kvm-s390.c 	atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
vcpu             2977 arch/s390/kvm/kvm-s390.c 	if (test_kvm_facility(vcpu->kvm, 78))
vcpu             2978 arch/s390/kvm/kvm-s390.c 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
vcpu             2979 arch/s390/kvm/kvm-s390.c 	else if (test_kvm_facility(vcpu->kvm, 8))
vcpu             2980 arch/s390/kvm/kvm-s390.c 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
vcpu             2982 arch/s390/kvm/kvm-s390.c 	kvm_s390_vcpu_setup_model(vcpu);
vcpu             2986 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
vcpu             2987 arch/s390/kvm/kvm-s390.c 	if (test_kvm_facility(vcpu->kvm, 9))
vcpu             2988 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->ecb |= ECB_SRSI;
vcpu             2989 arch/s390/kvm/kvm-s390.c 	if (test_kvm_facility(vcpu->kvm, 73))
vcpu             2990 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->ecb |= ECB_TE;
vcpu             2992 arch/s390/kvm/kvm-s390.c 	if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
vcpu             2993 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
vcpu             2994 arch/s390/kvm/kvm-s390.c 	if (test_kvm_facility(vcpu->kvm, 130))
vcpu             2995 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
vcpu             2996 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
vcpu             2998 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->eca |= ECA_CEI;
vcpu             3000 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->eca |= ECA_IB;
vcpu             3002 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->eca |= ECA_SII;
vcpu             3004 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->eca |= ECA_SIGPI;
vcpu             3005 arch/s390/kvm/kvm-s390.c 	if (test_kvm_facility(vcpu->kvm, 129)) {
vcpu             3006 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->eca |= ECA_VX;
vcpu             3007 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
vcpu             3009 arch/s390/kvm/kvm-s390.c 	if (test_kvm_facility(vcpu->kvm, 139))
vcpu             3010 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->ecd |= ECD_MEF;
vcpu             3011 arch/s390/kvm/kvm-s390.c 	if (test_kvm_facility(vcpu->kvm, 156))
vcpu             3012 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
vcpu             3013 arch/s390/kvm/kvm-s390.c 	if (vcpu->arch.sie_block->gd) {
vcpu             3014 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->eca |= ECA_AIV;
vcpu             3015 arch/s390/kvm/kvm-s390.c 		VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
vcpu             3016 arch/s390/kvm/kvm-s390.c 			   vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
vcpu             3018 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
vcpu             3020 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
vcpu             3023 arch/s390/kvm/kvm-s390.c 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
vcpu             3025 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
vcpu             3027 arch/s390/kvm/kvm-s390.c 	if (vcpu->kvm->arch.use_cmma) {
vcpu             3028 arch/s390/kvm/kvm-s390.c 		rc = kvm_s390_vcpu_setup_cmma(vcpu);
vcpu             3032 arch/s390/kvm/kvm-s390.c 	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
vcpu             3033 arch/s390/kvm/kvm-s390.c 	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
vcpu             3035 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block->hpid = HPID_KVM;
vcpu             3037 arch/s390/kvm/kvm-s390.c 	kvm_s390_vcpu_crypto_setup(vcpu);
vcpu             3045 arch/s390/kvm/kvm-s390.c 	struct kvm_vcpu *vcpu;
vcpu             3054 arch/s390/kvm/kvm-s390.c 	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
vcpu             3055 arch/s390/kvm/kvm-s390.c 	if (!vcpu)
vcpu             3063 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block = &sie_page->sie_block;
vcpu             3064 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
vcpu             3067 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block->mso = 0;
vcpu             3068 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block->msl = sclp.hamax;
vcpu             3070 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block->icpua = id;
vcpu             3071 arch/s390/kvm/kvm-s390.c 	spin_lock_init(&vcpu->arch.local_int.lock);
vcpu             3072 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block->gd = (u32)(u64)kvm->arch.gisa_int.origin;
vcpu             3073 arch/s390/kvm/kvm-s390.c 	if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
vcpu             3074 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->gd |= GISA_FORMAT1;
vcpu             3075 arch/s390/kvm/kvm-s390.c 	seqcount_init(&vcpu->arch.cputm_seqcount);
vcpu             3077 arch/s390/kvm/kvm-s390.c 	rc = kvm_vcpu_init(vcpu, kvm, id);
vcpu             3080 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
vcpu             3081 arch/s390/kvm/kvm-s390.c 		 vcpu->arch.sie_block);
vcpu             3082 arch/s390/kvm/kvm-s390.c 	trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
vcpu             3084 arch/s390/kvm/kvm-s390.c 	return vcpu;
vcpu             3086 arch/s390/kvm/kvm-s390.c 	free_page((unsigned long)(vcpu->arch.sie_block));
vcpu             3088 arch/s390/kvm/kvm-s390.c 	kmem_cache_free(kvm_vcpu_cache, vcpu);
vcpu             3093 arch/s390/kvm/kvm-s390.c int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
vcpu             3095 arch/s390/kvm/kvm-s390.c 	return kvm_s390_vcpu_has_irq(vcpu, 0);
vcpu             3098 arch/s390/kvm/kvm-s390.c bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
vcpu             3100 arch/s390/kvm/kvm-s390.c 	return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
vcpu             3103 arch/s390/kvm/kvm-s390.c void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
vcpu             3105 arch/s390/kvm/kvm-s390.c 	atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
vcpu             3106 arch/s390/kvm/kvm-s390.c 	exit_sie(vcpu);
vcpu             3109 arch/s390/kvm/kvm-s390.c void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
vcpu             3111 arch/s390/kvm/kvm-s390.c 	atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
vcpu             3114 arch/s390/kvm/kvm-s390.c static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
vcpu             3116 arch/s390/kvm/kvm-s390.c 	atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
vcpu             3117 arch/s390/kvm/kvm-s390.c 	exit_sie(vcpu);
vcpu             3120 arch/s390/kvm/kvm-s390.c bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
vcpu             3122 arch/s390/kvm/kvm-s390.c 	return atomic_read(&vcpu->arch.sie_block->prog20) &
vcpu             3126 arch/s390/kvm/kvm-s390.c static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
vcpu             3128 arch/s390/kvm/kvm-s390.c 	atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
vcpu             3135 arch/s390/kvm/kvm-s390.c void exit_sie(struct kvm_vcpu *vcpu)
vcpu             3137 arch/s390/kvm/kvm-s390.c 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
vcpu             3138 arch/s390/kvm/kvm-s390.c 	kvm_s390_vsie_kick(vcpu);
vcpu             3139 arch/s390/kvm/kvm-s390.c 	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
vcpu             3144 arch/s390/kvm/kvm-s390.c void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
vcpu             3146 arch/s390/kvm/kvm-s390.c 	kvm_make_request(req, vcpu);
vcpu             3147 arch/s390/kvm/kvm-s390.c 	kvm_s390_vcpu_request(vcpu);
vcpu             3154 arch/s390/kvm/kvm-s390.c 	struct kvm_vcpu *vcpu;
vcpu             3163 arch/s390/kvm/kvm-s390.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu             3165 arch/s390/kvm/kvm-s390.c 		prefix = kvm_s390_get_prefix(vcpu);
vcpu             3167 arch/s390/kvm/kvm-s390.c 			VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
vcpu             3169 arch/s390/kvm/kvm-s390.c 			kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
vcpu             3174 arch/s390/kvm/kvm-s390.c bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
vcpu             3179 arch/s390/kvm/kvm-s390.c 		vcpu->stat.halt_no_poll_steal++;
vcpu             3185 arch/s390/kvm/kvm-s390.c int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
vcpu             3192 arch/s390/kvm/kvm-s390.c static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
vcpu             3199 arch/s390/kvm/kvm-s390.c 		r = put_user(vcpu->arch.sie_block->todpr,
vcpu             3203 arch/s390/kvm/kvm-s390.c 		r = put_user(vcpu->arch.sie_block->epoch,
vcpu             3207 arch/s390/kvm/kvm-s390.c 		r = put_user(kvm_s390_get_cpu_timer(vcpu),
vcpu             3211 arch/s390/kvm/kvm-s390.c 		r = put_user(vcpu->arch.sie_block->ckc,
vcpu             3215 arch/s390/kvm/kvm-s390.c 		r = put_user(vcpu->arch.pfault_token,
vcpu             3219 arch/s390/kvm/kvm-s390.c 		r = put_user(vcpu->arch.pfault_compare,
vcpu             3223 arch/s390/kvm/kvm-s390.c 		r = put_user(vcpu->arch.pfault_select,
vcpu             3227 arch/s390/kvm/kvm-s390.c 		r = put_user(vcpu->arch.sie_block->pp,
vcpu             3231 arch/s390/kvm/kvm-s390.c 		r = put_user(vcpu->arch.sie_block->gbea,
vcpu             3241 arch/s390/kvm/kvm-s390.c static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
vcpu             3249 arch/s390/kvm/kvm-s390.c 		r = get_user(vcpu->arch.sie_block->todpr,
vcpu             3253 arch/s390/kvm/kvm-s390.c 		r = get_user(vcpu->arch.sie_block->epoch,
vcpu             3259 arch/s390/kvm/kvm-s390.c 			kvm_s390_set_cpu_timer(vcpu, val);
vcpu             3262 arch/s390/kvm/kvm-s390.c 		r = get_user(vcpu->arch.sie_block->ckc,
vcpu             3266 arch/s390/kvm/kvm-s390.c 		r = get_user(vcpu->arch.pfault_token,
vcpu             3268 arch/s390/kvm/kvm-s390.c 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
vcpu             3269 arch/s390/kvm/kvm-s390.c 			kvm_clear_async_pf_completion_queue(vcpu);
vcpu             3272 arch/s390/kvm/kvm-s390.c 		r = get_user(vcpu->arch.pfault_compare,
vcpu             3276 arch/s390/kvm/kvm-s390.c 		r = get_user(vcpu->arch.pfault_select,
vcpu             3280 arch/s390/kvm/kvm-s390.c 		r = get_user(vcpu->arch.sie_block->pp,
vcpu             3284 arch/s390/kvm/kvm-s390.c 		r = get_user(vcpu->arch.sie_block->gbea,
vcpu             3294 arch/s390/kvm/kvm-s390.c static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
vcpu             3296 arch/s390/kvm/kvm-s390.c 	kvm_s390_vcpu_initial_reset(vcpu);
vcpu             3300 arch/s390/kvm/kvm-s390.c int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu             3302 arch/s390/kvm/kvm-s390.c 	vcpu_load(vcpu);
vcpu             3303 arch/s390/kvm/kvm-s390.c 	memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
vcpu             3304 arch/s390/kvm/kvm-s390.c 	vcpu_put(vcpu);
vcpu             3308 arch/s390/kvm/kvm-s390.c int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu             3310 arch/s390/kvm/kvm-s390.c 	vcpu_load(vcpu);
vcpu             3311 arch/s390/kvm/kvm-s390.c 	memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
vcpu             3312 arch/s390/kvm/kvm-s390.c 	vcpu_put(vcpu);
vcpu             3316 arch/s390/kvm/kvm-s390.c int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
vcpu             3319 arch/s390/kvm/kvm-s390.c 	vcpu_load(vcpu);
vcpu             3321 arch/s390/kvm/kvm-s390.c 	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
vcpu             3322 arch/s390/kvm/kvm-s390.c 	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
vcpu             3324 arch/s390/kvm/kvm-s390.c 	vcpu_put(vcpu);
vcpu             3328 arch/s390/kvm/kvm-s390.c int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
vcpu             3331 arch/s390/kvm/kvm-s390.c 	vcpu_load(vcpu);
vcpu             3333 arch/s390/kvm/kvm-s390.c 	memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
vcpu             3334 arch/s390/kvm/kvm-s390.c 	memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
vcpu             3336 arch/s390/kvm/kvm-s390.c 	vcpu_put(vcpu);
vcpu             3340 arch/s390/kvm/kvm-s390.c int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
vcpu             3344 arch/s390/kvm/kvm-s390.c 	vcpu_load(vcpu);
vcpu             3350 arch/s390/kvm/kvm-s390.c 	vcpu->run->s.regs.fpc = fpu->fpc;
vcpu             3352 arch/s390/kvm/kvm-s390.c 		convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
vcpu             3355 arch/s390/kvm/kvm-s390.c 		memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
vcpu             3358 arch/s390/kvm/kvm-s390.c 	vcpu_put(vcpu);
vcpu             3362 arch/s390/kvm/kvm-s390.c int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
vcpu             3364 arch/s390/kvm/kvm-s390.c 	vcpu_load(vcpu);
vcpu             3370 arch/s390/kvm/kvm-s390.c 				 (__vector128 *) vcpu->run->s.regs.vrs);
vcpu             3372 arch/s390/kvm/kvm-s390.c 		memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
vcpu             3373 arch/s390/kvm/kvm-s390.c 	fpu->fpc = vcpu->run->s.regs.fpc;
vcpu             3375 arch/s390/kvm/kvm-s390.c 	vcpu_put(vcpu);
vcpu             3379 arch/s390/kvm/kvm-s390.c static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
vcpu             3383 arch/s390/kvm/kvm-s390.c 	if (!is_vcpu_stopped(vcpu))
vcpu             3386 arch/s390/kvm/kvm-s390.c 		vcpu->run->psw_mask = psw.mask;
vcpu             3387 arch/s390/kvm/kvm-s390.c 		vcpu->run->psw_addr = psw.addr;
vcpu             3392 arch/s390/kvm/kvm-s390.c int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
vcpu             3402 arch/s390/kvm/kvm-s390.c int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
vcpu             3407 arch/s390/kvm/kvm-s390.c 	vcpu_load(vcpu);
vcpu             3409 arch/s390/kvm/kvm-s390.c 	vcpu->guest_debug = 0;
vcpu             3410 arch/s390/kvm/kvm-s390.c 	kvm_s390_clear_bp_data(vcpu);
vcpu             3422 arch/s390/kvm/kvm-s390.c 		vcpu->guest_debug = dbg->control;
vcpu             3424 arch/s390/kvm/kvm-s390.c 		kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
vcpu             3427 arch/s390/kvm/kvm-s390.c 			rc = kvm_s390_import_bp_data(vcpu, dbg);
vcpu             3429 arch/s390/kvm/kvm-s390.c 		kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
vcpu             3430 arch/s390/kvm/kvm-s390.c 		vcpu->arch.guestdbg.last_bp = 0;
vcpu             3434 arch/s390/kvm/kvm-s390.c 		vcpu->guest_debug = 0;
vcpu             3435 arch/s390/kvm/kvm-s390.c 		kvm_s390_clear_bp_data(vcpu);
vcpu             3436 arch/s390/kvm/kvm-s390.c 		kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
vcpu             3440 arch/s390/kvm/kvm-s390.c 	vcpu_put(vcpu);
vcpu             3444 arch/s390/kvm/kvm-s390.c int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
vcpu             3449 arch/s390/kvm/kvm-s390.c 	vcpu_load(vcpu);
vcpu             3452 arch/s390/kvm/kvm-s390.c 	ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
vcpu             3455 arch/s390/kvm/kvm-s390.c 	vcpu_put(vcpu);
vcpu             3459 arch/s390/kvm/kvm-s390.c int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
vcpu             3464 arch/s390/kvm/kvm-s390.c 	vcpu_load(vcpu);
vcpu             3467 arch/s390/kvm/kvm-s390.c 	vcpu->kvm->arch.user_cpu_state_ctrl = 1;
vcpu             3471 arch/s390/kvm/kvm-s390.c 		kvm_s390_vcpu_stop(vcpu);
vcpu             3474 arch/s390/kvm/kvm-s390.c 		kvm_s390_vcpu_start(vcpu);
vcpu             3483 arch/s390/kvm/kvm-s390.c 	vcpu_put(vcpu);
vcpu             3487 arch/s390/kvm/kvm-s390.c static bool ibs_enabled(struct kvm_vcpu *vcpu)
vcpu             3489 arch/s390/kvm/kvm-s390.c 	return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
vcpu             3492 arch/s390/kvm/kvm-s390.c static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
vcpu             3495 arch/s390/kvm/kvm-s390.c 	kvm_s390_vcpu_request_handled(vcpu);
vcpu             3496 arch/s390/kvm/kvm-s390.c 	if (!kvm_request_pending(vcpu))
vcpu             3505 arch/s390/kvm/kvm-s390.c 	if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
vcpu             3507 arch/s390/kvm/kvm-s390.c 		rc = gmap_mprotect_notify(vcpu->arch.gmap,
vcpu             3508 arch/s390/kvm/kvm-s390.c 					  kvm_s390_get_prefix(vcpu),
vcpu             3511 arch/s390/kvm/kvm-s390.c 			kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
vcpu             3517 arch/s390/kvm/kvm-s390.c 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
vcpu             3518 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->ihcpu = 0xffff;
vcpu             3522 arch/s390/kvm/kvm-s390.c 	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
vcpu             3523 arch/s390/kvm/kvm-s390.c 		if (!ibs_enabled(vcpu)) {
vcpu             3524 arch/s390/kvm/kvm-s390.c 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
vcpu             3525 arch/s390/kvm/kvm-s390.c 			kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
vcpu             3530 arch/s390/kvm/kvm-s390.c 	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
vcpu             3531 arch/s390/kvm/kvm-s390.c 		if (ibs_enabled(vcpu)) {
vcpu             3532 arch/s390/kvm/kvm-s390.c 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
vcpu             3533 arch/s390/kvm/kvm-s390.c 			kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
vcpu             3538 arch/s390/kvm/kvm-s390.c 	if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
vcpu             3539 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
vcpu             3543 arch/s390/kvm/kvm-s390.c 	if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
vcpu             3549 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
vcpu             3553 arch/s390/kvm/kvm-s390.c 	if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
vcpu             3558 arch/s390/kvm/kvm-s390.c 		if ((vcpu->kvm->arch.use_cmma) &&
vcpu             3559 arch/s390/kvm/kvm-s390.c 		    (vcpu->kvm->mm->context.uses_cmm))
vcpu             3560 arch/s390/kvm/kvm-s390.c 			vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
vcpu             3565 arch/s390/kvm/kvm-s390.c 	kvm_clear_request(KVM_REQ_UNHALT, vcpu);
vcpu             3567 arch/s390/kvm/kvm-s390.c 	kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
vcpu             3575 arch/s390/kvm/kvm-s390.c 	struct kvm_vcpu *vcpu;
vcpu             3593 arch/s390/kvm/kvm-s390.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu             3594 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->epoch = kvm->arch.epoch;
vcpu             3595 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->epdx  = kvm->arch.epdx;
vcpu             3613 arch/s390/kvm/kvm-s390.c long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
vcpu             3615 arch/s390/kvm/kvm-s390.c 	return gmap_fault(vcpu->arch.gmap, gpa,
vcpu             3619 arch/s390/kvm/kvm-s390.c static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
vcpu             3628 arch/s390/kvm/kvm-s390.c 		WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
vcpu             3632 arch/s390/kvm/kvm-s390.c 		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
vcpu             3636 arch/s390/kvm/kvm-s390.c void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
vcpu             3639 arch/s390/kvm/kvm-s390.c 	trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
vcpu             3640 arch/s390/kvm/kvm-s390.c 	__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
vcpu             3643 arch/s390/kvm/kvm-s390.c void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
vcpu             3646 arch/s390/kvm/kvm-s390.c 	trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
vcpu             3647 arch/s390/kvm/kvm-s390.c 	__kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
vcpu             3650 arch/s390/kvm/kvm-s390.c void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
vcpu             3656 arch/s390/kvm/kvm-s390.c bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
vcpu             3665 arch/s390/kvm/kvm-s390.c static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
vcpu             3671 arch/s390/kvm/kvm-s390.c 	if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
vcpu             3673 arch/s390/kvm/kvm-s390.c 	if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
vcpu             3674 arch/s390/kvm/kvm-s390.c 	    vcpu->arch.pfault_compare)
vcpu             3676 arch/s390/kvm/kvm-s390.c 	if (psw_extint_disabled(vcpu))
vcpu             3678 arch/s390/kvm/kvm-s390.c 	if (kvm_s390_vcpu_has_irq(vcpu, 0))
vcpu             3680 arch/s390/kvm/kvm-s390.c 	if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
vcpu             3682 arch/s390/kvm/kvm-s390.c 	if (!vcpu->arch.gmap->pfault_enabled)
vcpu             3685 arch/s390/kvm/kvm-s390.c 	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
vcpu             3687 arch/s390/kvm/kvm-s390.c 	if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
vcpu             3690 arch/s390/kvm/kvm-s390.c 	rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
vcpu             3694 arch/s390/kvm/kvm-s390.c static int vcpu_pre_run(struct kvm_vcpu *vcpu)
vcpu             3703 arch/s390/kvm/kvm-s390.c 	kvm_check_async_pf_completion(vcpu);
vcpu             3705 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
vcpu             3706 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
vcpu             3714 arch/s390/kvm/kvm-s390.c 	if (!kvm_is_ucontrol(vcpu->kvm)) {
vcpu             3715 arch/s390/kvm/kvm-s390.c 		rc = kvm_s390_deliver_pending_interrupts(vcpu);
vcpu             3720 arch/s390/kvm/kvm-s390.c 	rc = kvm_s390_handle_requests(vcpu);
vcpu             3724 arch/s390/kvm/kvm-s390.c 	if (guestdbg_enabled(vcpu)) {
vcpu             3725 arch/s390/kvm/kvm-s390.c 		kvm_s390_backup_guest_per_regs(vcpu);
vcpu             3726 arch/s390/kvm/kvm-s390.c 		kvm_s390_patch_guest_per_regs(vcpu);
vcpu             3729 arch/s390/kvm/kvm-s390.c 	clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.gisa_int.kicked_mask);
vcpu             3731 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block->icptcode = 0;
vcpu             3732 arch/s390/kvm/kvm-s390.c 	cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
vcpu             3733 arch/s390/kvm/kvm-s390.c 	VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
vcpu             3734 arch/s390/kvm/kvm-s390.c 	trace_kvm_s390_sie_enter(vcpu, cpuflags);
vcpu             3739 arch/s390/kvm/kvm-s390.c static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
vcpu             3747 arch/s390/kvm/kvm-s390.c 	VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
vcpu             3748 arch/s390/kvm/kvm-s390.c 	trace_kvm_s390_sie_fault(vcpu);
vcpu             3758 arch/s390/kvm/kvm-s390.c 	rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
vcpu             3767 arch/s390/kvm/kvm-s390.c 		pgm_info = vcpu->arch.pgm;
vcpu             3771 arch/s390/kvm/kvm-s390.c 	kvm_s390_forward_psw(vcpu, ilen);
vcpu             3772 arch/s390/kvm/kvm-s390.c 	return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
vcpu             3775 arch/s390/kvm/kvm-s390.c static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
vcpu             3780 arch/s390/kvm/kvm-s390.c 	VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
vcpu             3781 arch/s390/kvm/kvm-s390.c 		   vcpu->arch.sie_block->icptcode);
vcpu             3782 arch/s390/kvm/kvm-s390.c 	trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
vcpu             3784 arch/s390/kvm/kvm-s390.c 	if (guestdbg_enabled(vcpu))
vcpu             3785 arch/s390/kvm/kvm-s390.c 		kvm_s390_restore_guest_per_regs(vcpu);
vcpu             3787 arch/s390/kvm/kvm-s390.c 	vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
vcpu             3788 arch/s390/kvm/kvm-s390.c 	vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
vcpu             3791 arch/s390/kvm/kvm-s390.c 		VCPU_EVENT(vcpu, 3, "%s", "machine check");
vcpu             3792 arch/s390/kvm/kvm-s390.c 		sie_page = container_of(vcpu->arch.sie_block,
vcpu             3795 arch/s390/kvm/kvm-s390.c 		kvm_s390_reinject_machine_check(vcpu, mcck_info);
vcpu             3799 arch/s390/kvm/kvm-s390.c 	if (vcpu->arch.sie_block->icptcode > 0) {
vcpu             3800 arch/s390/kvm/kvm-s390.c 		int rc = kvm_handle_sie_intercept(vcpu);
vcpu             3804 arch/s390/kvm/kvm-s390.c 		vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
vcpu             3805 arch/s390/kvm/kvm-s390.c 		vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
vcpu             3806 arch/s390/kvm/kvm-s390.c 		vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
vcpu             3807 arch/s390/kvm/kvm-s390.c 		vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
vcpu             3810 arch/s390/kvm/kvm-s390.c 		vcpu->stat.exit_null++;
vcpu             3812 arch/s390/kvm/kvm-s390.c 	} else if (kvm_is_ucontrol(vcpu->kvm)) {
vcpu             3813 arch/s390/kvm/kvm-s390.c 		vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
vcpu             3814 arch/s390/kvm/kvm-s390.c 		vcpu->run->s390_ucontrol.trans_exc_code =
vcpu             3816 arch/s390/kvm/kvm-s390.c 		vcpu->run->s390_ucontrol.pgm_code = 0x10;
vcpu             3819 arch/s390/kvm/kvm-s390.c 		trace_kvm_s390_major_guest_pfault(vcpu);
vcpu             3821 arch/s390/kvm/kvm-s390.c 		if (kvm_arch_setup_async_pf(vcpu))
vcpu             3823 arch/s390/kvm/kvm-s390.c 		return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
vcpu             3825 arch/s390/kvm/kvm-s390.c 	return vcpu_post_run_fault_in_sie(vcpu);
vcpu             3828 arch/s390/kvm/kvm-s390.c static int __vcpu_run(struct kvm_vcpu *vcpu)
vcpu             3836 arch/s390/kvm/kvm-s390.c 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
vcpu             3839 arch/s390/kvm/kvm-s390.c 		rc = vcpu_pre_run(vcpu);
vcpu             3843 arch/s390/kvm/kvm-s390.c 		srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
vcpu             3850 arch/s390/kvm/kvm-s390.c 		__disable_cpu_timer_accounting(vcpu);
vcpu             3852 arch/s390/kvm/kvm-s390.c 		exit_reason = sie64a(vcpu->arch.sie_block,
vcpu             3853 arch/s390/kvm/kvm-s390.c 				     vcpu->run->s.regs.gprs);
vcpu             3855 arch/s390/kvm/kvm-s390.c 		__enable_cpu_timer_accounting(vcpu);
vcpu             3858 arch/s390/kvm/kvm-s390.c 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
vcpu             3860 arch/s390/kvm/kvm-s390.c 		rc = vcpu_post_run(vcpu, exit_reason);
vcpu             3861 arch/s390/kvm/kvm-s390.c 	} while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
vcpu             3863 arch/s390/kvm/kvm-s390.c 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
vcpu             3867 arch/s390/kvm/kvm-s390.c static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
vcpu             3874 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
vcpu             3875 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
vcpu             3877 arch/s390/kvm/kvm-s390.c 		kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
vcpu             3879 arch/s390/kvm/kvm-s390.c 		memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
vcpu             3881 arch/s390/kvm/kvm-s390.c 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
vcpu             3884 arch/s390/kvm/kvm-s390.c 		kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
vcpu             3885 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
vcpu             3886 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
vcpu             3887 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
vcpu             3888 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
vcpu             3891 arch/s390/kvm/kvm-s390.c 		vcpu->arch.pfault_token = kvm_run->s.regs.pft;
vcpu             3892 arch/s390/kvm/kvm-s390.c 		vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
vcpu             3893 arch/s390/kvm/kvm-s390.c 		vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
vcpu             3894 arch/s390/kvm/kvm-s390.c 		if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
vcpu             3895 arch/s390/kvm/kvm-s390.c 			kvm_clear_async_pf_completion_queue(vcpu);
vcpu             3902 arch/s390/kvm/kvm-s390.c 	    test_kvm_facility(vcpu->kvm, 64) &&
vcpu             3904 arch/s390/kvm/kvm-s390.c 	    !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
vcpu             3905 arch/s390/kvm/kvm-s390.c 		VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
vcpu             3906 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->ecb3 |= ECB3_RI;
vcpu             3913 arch/s390/kvm/kvm-s390.c 	    test_kvm_facility(vcpu->kvm, 133) &&
vcpu             3915 arch/s390/kvm/kvm-s390.c 	    !vcpu->arch.gs_enabled) {
vcpu             3916 arch/s390/kvm/kvm-s390.c 		VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
vcpu             3917 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->ecb |= ECB_GS;
vcpu             3918 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
vcpu             3919 arch/s390/kvm/kvm-s390.c 		vcpu->arch.gs_enabled = 1;
vcpu             3922 arch/s390/kvm/kvm-s390.c 	    test_kvm_facility(vcpu->kvm, 82)) {
vcpu             3923 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
vcpu             3924 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
vcpu             3926 arch/s390/kvm/kvm-s390.c 	save_access_regs(vcpu->arch.host_acrs);
vcpu             3927 arch/s390/kvm/kvm-s390.c 	restore_access_regs(vcpu->run->s.regs.acrs);
vcpu             3930 arch/s390/kvm/kvm-s390.c 	vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
vcpu             3931 arch/s390/kvm/kvm-s390.c 	vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
vcpu             3933 arch/s390/kvm/kvm-s390.c 		current->thread.fpu.regs = vcpu->run->s.regs.vrs;
vcpu             3935 arch/s390/kvm/kvm-s390.c 		current->thread.fpu.regs = vcpu->run->s.regs.fprs;
vcpu             3936 arch/s390/kvm/kvm-s390.c 	current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
vcpu             3944 arch/s390/kvm/kvm-s390.c 			vcpu->arch.host_gscb = current->thread.gs_cb;
vcpu             3945 arch/s390/kvm/kvm-s390.c 			save_gs_cb(vcpu->arch.host_gscb);
vcpu             3947 arch/s390/kvm/kvm-s390.c 		if (vcpu->arch.gs_enabled) {
vcpu             3949 arch/s390/kvm/kvm-s390.c 						&vcpu->run->s.regs.gscb;
vcpu             3959 arch/s390/kvm/kvm-s390.c static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
vcpu             3961 arch/s390/kvm/kvm-s390.c 	kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
vcpu             3962 arch/s390/kvm/kvm-s390.c 	kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
vcpu             3963 arch/s390/kvm/kvm-s390.c 	kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
vcpu             3964 arch/s390/kvm/kvm-s390.c 	memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
vcpu             3965 arch/s390/kvm/kvm-s390.c 	kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
vcpu             3966 arch/s390/kvm/kvm-s390.c 	kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
vcpu             3967 arch/s390/kvm/kvm-s390.c 	kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
vcpu             3968 arch/s390/kvm/kvm-s390.c 	kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
vcpu             3969 arch/s390/kvm/kvm-s390.c 	kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
vcpu             3970 arch/s390/kvm/kvm-s390.c 	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
vcpu             3971 arch/s390/kvm/kvm-s390.c 	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
vcpu             3972 arch/s390/kvm/kvm-s390.c 	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
vcpu             3973 arch/s390/kvm/kvm-s390.c 	kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
vcpu             3974 arch/s390/kvm/kvm-s390.c 	save_access_regs(vcpu->run->s.regs.acrs);
vcpu             3975 arch/s390/kvm/kvm-s390.c 	restore_access_regs(vcpu->arch.host_acrs);
vcpu             3978 arch/s390/kvm/kvm-s390.c 	vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
vcpu             3980 arch/s390/kvm/kvm-s390.c 	current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
vcpu             3981 arch/s390/kvm/kvm-s390.c 	current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
vcpu             3984 arch/s390/kvm/kvm-s390.c 		if (vcpu->arch.gs_enabled)
vcpu             3987 arch/s390/kvm/kvm-s390.c 		current->thread.gs_cb = vcpu->arch.host_gscb;
vcpu             3988 arch/s390/kvm/kvm-s390.c 		restore_gs_cb(vcpu->arch.host_gscb);
vcpu             3990 arch/s390/kvm/kvm-s390.c 		if (!vcpu->arch.host_gscb)
vcpu             3992 arch/s390/kvm/kvm-s390.c 		vcpu->arch.host_gscb = NULL;
vcpu             3997 arch/s390/kvm/kvm-s390.c int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
vcpu             4008 arch/s390/kvm/kvm-s390.c 	vcpu_load(vcpu);
vcpu             4010 arch/s390/kvm/kvm-s390.c 	if (guestdbg_exit_pending(vcpu)) {
vcpu             4011 arch/s390/kvm/kvm-s390.c 		kvm_s390_prepare_debug_exit(vcpu);
vcpu             4016 arch/s390/kvm/kvm-s390.c 	kvm_sigset_activate(vcpu);
vcpu             4018 arch/s390/kvm/kvm-s390.c 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
vcpu             4019 arch/s390/kvm/kvm-s390.c 		kvm_s390_vcpu_start(vcpu);
vcpu             4020 arch/s390/kvm/kvm-s390.c 	} else if (is_vcpu_stopped(vcpu)) {
vcpu             4022 arch/s390/kvm/kvm-s390.c 				   vcpu->vcpu_id);
vcpu             4027 arch/s390/kvm/kvm-s390.c 	sync_regs(vcpu, kvm_run);
vcpu             4028 arch/s390/kvm/kvm-s390.c 	enable_cpu_timer_accounting(vcpu);
vcpu             4031 arch/s390/kvm/kvm-s390.c 	rc = __vcpu_run(vcpu);
vcpu             4038 arch/s390/kvm/kvm-s390.c 	if (guestdbg_exit_pending(vcpu) && !rc)  {
vcpu             4039 arch/s390/kvm/kvm-s390.c 		kvm_s390_prepare_debug_exit(vcpu);
vcpu             4048 arch/s390/kvm/kvm-s390.c 	disable_cpu_timer_accounting(vcpu);
vcpu             4049 arch/s390/kvm/kvm-s390.c 	store_regs(vcpu, kvm_run);
vcpu             4051 arch/s390/kvm/kvm-s390.c 	kvm_sigset_deactivate(vcpu);
vcpu             4053 arch/s390/kvm/kvm-s390.c 	vcpu->stat.exit_userspace++;
vcpu             4055 arch/s390/kvm/kvm-s390.c 	vcpu_put(vcpu);
vcpu             4065 arch/s390/kvm/kvm-s390.c int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
vcpu             4073 arch/s390/kvm/kvm-s390.c 	px = kvm_s390_get_prefix(vcpu);
vcpu             4075 arch/s390/kvm/kvm-s390.c 		if (write_guest_abs(vcpu, 163, &archmode, 1))
vcpu             4079 arch/s390/kvm/kvm-s390.c 		if (write_guest_real(vcpu, 163, &archmode, 1))
vcpu             4087 arch/s390/kvm/kvm-s390.c 		convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
vcpu             4088 arch/s390/kvm/kvm-s390.c 		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
vcpu             4091 arch/s390/kvm/kvm-s390.c 		rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
vcpu             4092 arch/s390/kvm/kvm-s390.c 				     vcpu->run->s.regs.fprs, 128);
vcpu             4094 arch/s390/kvm/kvm-s390.c 	rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
vcpu             4095 arch/s390/kvm/kvm-s390.c 			      vcpu->run->s.regs.gprs, 128);
vcpu             4096 arch/s390/kvm/kvm-s390.c 	rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
vcpu             4097 arch/s390/kvm/kvm-s390.c 			      &vcpu->arch.sie_block->gpsw, 16);
vcpu             4098 arch/s390/kvm/kvm-s390.c 	rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
vcpu             4100 arch/s390/kvm/kvm-s390.c 	rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
vcpu             4101 arch/s390/kvm/kvm-s390.c 			      &vcpu->run->s.regs.fpc, 4);
vcpu             4102 arch/s390/kvm/kvm-s390.c 	rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
vcpu             4103 arch/s390/kvm/kvm-s390.c 			      &vcpu->arch.sie_block->todpr, 4);
vcpu             4104 arch/s390/kvm/kvm-s390.c 	cputm = kvm_s390_get_cpu_timer(vcpu);
vcpu             4105 arch/s390/kvm/kvm-s390.c 	rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
vcpu             4107 arch/s390/kvm/kvm-s390.c 	clkcomp = vcpu->arch.sie_block->ckc >> 8;
vcpu             4108 arch/s390/kvm/kvm-s390.c 	rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
vcpu             4110 arch/s390/kvm/kvm-s390.c 	rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
vcpu             4111 arch/s390/kvm/kvm-s390.c 			      &vcpu->run->s.regs.acrs, 64);
vcpu             4112 arch/s390/kvm/kvm-s390.c 	rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
vcpu             4113 arch/s390/kvm/kvm-s390.c 			      &vcpu->arch.sie_block->gcr, 128);
vcpu             4117 arch/s390/kvm/kvm-s390.c int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
vcpu             4125 arch/s390/kvm/kvm-s390.c 	vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
vcpu             4126 arch/s390/kvm/kvm-s390.c 	save_access_regs(vcpu->run->s.regs.acrs);
vcpu             4128 arch/s390/kvm/kvm-s390.c 	return kvm_s390_store_status_unloaded(vcpu, addr);
vcpu             4131 arch/s390/kvm/kvm-s390.c static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
vcpu             4133 arch/s390/kvm/kvm-s390.c 	kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
vcpu             4134 arch/s390/kvm/kvm-s390.c 	kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
vcpu             4140 arch/s390/kvm/kvm-s390.c 	struct kvm_vcpu *vcpu;
vcpu             4142 arch/s390/kvm/kvm-s390.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu             4143 arch/s390/kvm/kvm-s390.c 		__disable_ibs_on_vcpu(vcpu);
vcpu             4147 arch/s390/kvm/kvm-s390.c static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
vcpu             4151 arch/s390/kvm/kvm-s390.c 	kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
vcpu             4152 arch/s390/kvm/kvm-s390.c 	kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
vcpu             4155 arch/s390/kvm/kvm-s390.c void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
vcpu             4159 arch/s390/kvm/kvm-s390.c 	if (!is_vcpu_stopped(vcpu))
vcpu             4162 arch/s390/kvm/kvm-s390.c 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
vcpu             4164 arch/s390/kvm/kvm-s390.c 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
vcpu             4165 arch/s390/kvm/kvm-s390.c 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
vcpu             4168 arch/s390/kvm/kvm-s390.c 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
vcpu             4174 arch/s390/kvm/kvm-s390.c 		__enable_ibs_on_vcpu(vcpu);
vcpu             4181 arch/s390/kvm/kvm-s390.c 		__disable_ibs_on_all_vcpus(vcpu->kvm);
vcpu             4184 arch/s390/kvm/kvm-s390.c 	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
vcpu             4189 arch/s390/kvm/kvm-s390.c 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
vcpu             4190 arch/s390/kvm/kvm-s390.c 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
vcpu             4194 arch/s390/kvm/kvm-s390.c void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
vcpu             4199 arch/s390/kvm/kvm-s390.c 	if (is_vcpu_stopped(vcpu))
vcpu             4202 arch/s390/kvm/kvm-s390.c 	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
vcpu             4204 arch/s390/kvm/kvm-s390.c 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
vcpu             4205 arch/s390/kvm/kvm-s390.c 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
vcpu             4208 arch/s390/kvm/kvm-s390.c 	kvm_s390_clear_stop_irq(vcpu);
vcpu             4210 arch/s390/kvm/kvm-s390.c 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
vcpu             4211 arch/s390/kvm/kvm-s390.c 	__disable_ibs_on_vcpu(vcpu);
vcpu             4214 arch/s390/kvm/kvm-s390.c 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
vcpu             4216 arch/s390/kvm/kvm-s390.c 			started_vcpu = vcpu->kvm->vcpus[i];
vcpu             4228 arch/s390/kvm/kvm-s390.c 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
vcpu             4232 arch/s390/kvm/kvm-s390.c static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
vcpu             4242 arch/s390/kvm/kvm-s390.c 		if (!vcpu->kvm->arch.css_support) {
vcpu             4243 arch/s390/kvm/kvm-s390.c 			vcpu->kvm->arch.css_support = 1;
vcpu             4244 arch/s390/kvm/kvm-s390.c 			VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
vcpu             4245 arch/s390/kvm/kvm-s390.c 			trace_kvm_s390_enable_css(vcpu->kvm);
vcpu             4256 arch/s390/kvm/kvm-s390.c static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
vcpu             4277 arch/s390/kvm/kvm-s390.c 	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
vcpu             4282 arch/s390/kvm/kvm-s390.c 			r = check_gva_range(vcpu, mop->gaddr, mop->ar,
vcpu             4286 arch/s390/kvm/kvm-s390.c 		r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
vcpu             4294 arch/s390/kvm/kvm-s390.c 			r = check_gva_range(vcpu, mop->gaddr, mop->ar,
vcpu             4302 arch/s390/kvm/kvm-s390.c 		r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
vcpu             4308 arch/s390/kvm/kvm-s390.c 	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
vcpu             4311 arch/s390/kvm/kvm-s390.c 		kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
vcpu             4320 arch/s390/kvm/kvm-s390.c 	struct kvm_vcpu *vcpu = filp->private_data;
vcpu             4329 arch/s390/kvm/kvm-s390.c 		return kvm_s390_inject_vcpu(vcpu, &s390irq);
vcpu             4339 arch/s390/kvm/kvm-s390.c 		return kvm_s390_inject_vcpu(vcpu, &s390irq);
vcpu             4348 arch/s390/kvm/kvm-s390.c 	struct kvm_vcpu *vcpu = filp->private_data;
vcpu             4353 arch/s390/kvm/kvm-s390.c 	vcpu_load(vcpu);
vcpu             4357 arch/s390/kvm/kvm-s390.c 		idx = srcu_read_lock(&vcpu->kvm->srcu);
vcpu             4358 arch/s390/kvm/kvm-s390.c 		r = kvm_s390_store_status_unloaded(vcpu, arg);
vcpu             4359 arch/s390/kvm/kvm-s390.c 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
vcpu             4367 arch/s390/kvm/kvm-s390.c 		r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
vcpu             4371 arch/s390/kvm/kvm-s390.c 		r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
vcpu             4380 arch/s390/kvm/kvm-s390.c 			r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
vcpu             4382 arch/s390/kvm/kvm-s390.c 			r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
vcpu             4394 arch/s390/kvm/kvm-s390.c 		if (!kvm_is_ucontrol(vcpu->kvm)) {
vcpu             4399 arch/s390/kvm/kvm-s390.c 		r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
vcpu             4411 arch/s390/kvm/kvm-s390.c 		if (!kvm_is_ucontrol(vcpu->kvm)) {
vcpu             4416 arch/s390/kvm/kvm-s390.c 		r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
vcpu             4422 arch/s390/kvm/kvm-s390.c 		r = gmap_fault(vcpu->arch.gmap, arg, 0);
vcpu             4431 arch/s390/kvm/kvm-s390.c 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
vcpu             4438 arch/s390/kvm/kvm-s390.c 			r = kvm_s390_guest_mem_op(vcpu, &mem_op);
vcpu             4456 arch/s390/kvm/kvm-s390.c 		r = kvm_s390_set_irq_state(vcpu,
vcpu             4472 arch/s390/kvm/kvm-s390.c 		r = kvm_s390_get_irq_state(vcpu,
vcpu             4481 arch/s390/kvm/kvm-s390.c 	vcpu_put(vcpu);
vcpu             4485 arch/s390/kvm/kvm-s390.c vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
vcpu             4489 arch/s390/kvm/kvm-s390.c 		 && (kvm_is_ucontrol(vcpu->kvm))) {
vcpu             4490 arch/s390/kvm/kvm-s390.c 		vmf->page = virt_to_page(vcpu->arch.sie_block);
vcpu             4567 arch/s390/kvm/kvm-s390.c void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
vcpu             4569 arch/s390/kvm/kvm-s390.c 	vcpu->valid_wakeup = false;
vcpu               23 arch/s390/kvm/kvm-s390.h #define IS_TE_ENABLED(vcpu)	((vcpu->arch.sie_block->ecb & ECB_TE))
vcpu               25 arch/s390/kvm/kvm-s390.h #define IS_ITDB_VALID(vcpu)	((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1))
vcpu               48 arch/s390/kvm/kvm-s390.h static inline void kvm_s390_set_cpuflags(struct kvm_vcpu *vcpu, u32 flags)
vcpu               50 arch/s390/kvm/kvm-s390.h 	atomic_or(flags, &vcpu->arch.sie_block->cpuflags);
vcpu               53 arch/s390/kvm/kvm-s390.h static inline void kvm_s390_clear_cpuflags(struct kvm_vcpu *vcpu, u32 flags)
vcpu               55 arch/s390/kvm/kvm-s390.h 	atomic_andnot(flags, &vcpu->arch.sie_block->cpuflags);
vcpu               58 arch/s390/kvm/kvm-s390.h static inline bool kvm_s390_test_cpuflags(struct kvm_vcpu *vcpu, u32 flags)
vcpu               60 arch/s390/kvm/kvm-s390.h 	return (atomic_read(&vcpu->arch.sie_block->cpuflags) & flags) == flags;
vcpu               63 arch/s390/kvm/kvm-s390.h static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
vcpu               65 arch/s390/kvm/kvm-s390.h 	return kvm_s390_test_cpuflags(vcpu, CPUSTAT_STOPPED);
vcpu               68 arch/s390/kvm/kvm-s390.h static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
vcpu               70 arch/s390/kvm/kvm-s390.h 	return test_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
vcpu               85 arch/s390/kvm/kvm-s390.h static inline u32 kvm_s390_get_prefix(struct kvm_vcpu *vcpu)
vcpu               87 arch/s390/kvm/kvm-s390.h 	return vcpu->arch.sie_block->prefix << GUEST_PREFIX_SHIFT;
vcpu               90 arch/s390/kvm/kvm-s390.h static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix)
vcpu               92 arch/s390/kvm/kvm-s390.h 	VCPU_EVENT(vcpu, 3, "set prefix of cpu %03u to 0x%x", vcpu->vcpu_id,
vcpu               94 arch/s390/kvm/kvm-s390.h 	vcpu->arch.sie_block->prefix = prefix >> GUEST_PREFIX_SHIFT;
vcpu               95 arch/s390/kvm/kvm-s390.h 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
vcpu               96 arch/s390/kvm/kvm-s390.h 	kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
vcpu               99 arch/s390/kvm/kvm-s390.h static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, u8 *ar)
vcpu              101 arch/s390/kvm/kvm-s390.h 	u32 base2 = vcpu->arch.sie_block->ipb >> 28;
vcpu              102 arch/s390/kvm/kvm-s390.h 	u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
vcpu              107 arch/s390/kvm/kvm-s390.h 	return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
vcpu              110 arch/s390/kvm/kvm-s390.h static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
vcpu              114 arch/s390/kvm/kvm-s390.h 	u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
vcpu              115 arch/s390/kvm/kvm-s390.h 	u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16;
vcpu              116 arch/s390/kvm/kvm-s390.h 	u32 base2 = (vcpu->arch.sie_block->ipb & 0xf000) >> 12;
vcpu              117 arch/s390/kvm/kvm-s390.h 	u32 disp2 = vcpu->arch.sie_block->ipb & 0x0fff;
vcpu              119 arch/s390/kvm/kvm-s390.h 	*address1 = (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1;
vcpu              120 arch/s390/kvm/kvm-s390.h 	*address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
vcpu              128 arch/s390/kvm/kvm-s390.h static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2)
vcpu              131 arch/s390/kvm/kvm-s390.h 		*r1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 20;
vcpu              133 arch/s390/kvm/kvm-s390.h 		*r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
vcpu              136 arch/s390/kvm/kvm-s390.h static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, u8 *ar)
vcpu              138 arch/s390/kvm/kvm-s390.h 	u32 base2 = vcpu->arch.sie_block->ipb >> 28;
vcpu              139 arch/s390/kvm/kvm-s390.h 	u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
vcpu              140 arch/s390/kvm/kvm-s390.h 			((vcpu->arch.sie_block->ipb & 0xff00) << 4);
vcpu              148 arch/s390/kvm/kvm-s390.h 	return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2;
vcpu              151 arch/s390/kvm/kvm-s390.h static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu, u8 *ar)
vcpu              153 arch/s390/kvm/kvm-s390.h 	u32 base2 = vcpu->arch.sie_block->ipb >> 28;
vcpu              154 arch/s390/kvm/kvm-s390.h 	u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
vcpu              159 arch/s390/kvm/kvm-s390.h 	return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
vcpu              163 arch/s390/kvm/kvm-s390.h static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc)
vcpu              165 arch/s390/kvm/kvm-s390.h 	vcpu->arch.sie_block->gpsw.mask &= ~(3UL << 44);
vcpu              166 arch/s390/kvm/kvm-s390.h 	vcpu->arch.sie_block->gpsw.mask |= cc << 44;
vcpu              200 arch/s390/kvm/kvm-s390.h int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
vcpu              201 arch/s390/kvm/kvm-s390.h void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu);
vcpu              203 arch/s390/kvm/kvm-s390.h int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu);
vcpu              204 arch/s390/kvm/kvm-s390.h void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu);
vcpu              208 arch/s390/kvm/kvm-s390.h int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
vcpu              210 arch/s390/kvm/kvm-s390.h static inline int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
vcpu              218 arch/s390/kvm/kvm-s390.h 	return kvm_s390_inject_vcpu(vcpu, &irq);
vcpu              220 arch/s390/kvm/kvm-s390.h static inline int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
vcpu              227 arch/s390/kvm/kvm-s390.h 	return kvm_s390_inject_vcpu(vcpu, &irq);
vcpu              236 arch/s390/kvm/kvm-s390.h u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu);
vcpu              237 arch/s390/kvm/kvm-s390.h int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu);
vcpu              238 arch/s390/kvm/kvm-s390.h static inline void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilen)
vcpu              240 arch/s390/kvm/kvm-s390.h 	struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
vcpu              244 arch/s390/kvm/kvm-s390.h static inline void kvm_s390_forward_psw(struct kvm_vcpu *vcpu, int ilen)
vcpu              246 arch/s390/kvm/kvm-s390.h 	kvm_s390_rewind_psw(vcpu, -ilen);
vcpu              248 arch/s390/kvm/kvm-s390.h static inline void kvm_s390_retry_instr(struct kvm_vcpu *vcpu)
vcpu              251 arch/s390/kvm/kvm-s390.h 	vcpu->arch.sie_block->icptstatus &= ~0x02;
vcpu              252 arch/s390/kvm/kvm-s390.h 	kvm_s390_rewind_psw(vcpu, kvm_s390_get_ilen(vcpu));
vcpu              255 arch/s390/kvm/kvm-s390.h int handle_sthyi(struct kvm_vcpu *vcpu);
vcpu              259 arch/s390/kvm/kvm-s390.h int kvm_s390_handle_aa(struct kvm_vcpu *vcpu);
vcpu              260 arch/s390/kvm/kvm-s390.h int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
vcpu              261 arch/s390/kvm/kvm-s390.h int kvm_s390_handle_e3(struct kvm_vcpu *vcpu);
vcpu              262 arch/s390/kvm/kvm-s390.h int kvm_s390_handle_e5(struct kvm_vcpu *vcpu);
vcpu              263 arch/s390/kvm/kvm-s390.h int kvm_s390_handle_01(struct kvm_vcpu *vcpu);
vcpu              264 arch/s390/kvm/kvm-s390.h int kvm_s390_handle_b9(struct kvm_vcpu *vcpu);
vcpu              265 arch/s390/kvm/kvm-s390.h int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu);
vcpu              266 arch/s390/kvm/kvm-s390.h int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu);
vcpu              267 arch/s390/kvm/kvm-s390.h int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu);
vcpu              268 arch/s390/kvm/kvm-s390.h int kvm_s390_handle_eb(struct kvm_vcpu *vcpu);
vcpu              269 arch/s390/kvm/kvm-s390.h int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu);
vcpu              272 arch/s390/kvm/kvm-s390.h int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu);
vcpu              273 arch/s390/kvm/kvm-s390.h void kvm_s390_vsie_kick(struct kvm_vcpu *vcpu);
vcpu              280 arch/s390/kvm/kvm-s390.h int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
vcpu              281 arch/s390/kvm/kvm-s390.h int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
vcpu              286 arch/s390/kvm/kvm-s390.h long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
vcpu              287 arch/s390/kvm/kvm-s390.h int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
vcpu              288 arch/s390/kvm/kvm-s390.h int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
vcpu              289 arch/s390/kvm/kvm-s390.h void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
vcpu              290 arch/s390/kvm/kvm-s390.h void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
vcpu              291 arch/s390/kvm/kvm-s390.h void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu);
vcpu              292 arch/s390/kvm/kvm-s390.h void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu);
vcpu              293 arch/s390/kvm/kvm-s390.h bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu);
vcpu              294 arch/s390/kvm/kvm-s390.h void exit_sie(struct kvm_vcpu *vcpu);
vcpu              295 arch/s390/kvm/kvm-s390.h void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu);
vcpu              296 arch/s390/kvm/kvm-s390.h int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu);
vcpu              297 arch/s390/kvm/kvm-s390.h void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu);
vcpu              298 arch/s390/kvm/kvm-s390.h void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm);
vcpu              299 arch/s390/kvm/kvm-s390.h __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu);
vcpu              302 arch/s390/kvm/kvm-s390.h int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
vcpu              307 arch/s390/kvm/kvm-s390.h 	struct kvm_vcpu *vcpu;
vcpu              310 arch/s390/kvm/kvm-s390.h 	kvm_for_each_vcpu(i, vcpu, kvm)
vcpu              311 arch/s390/kvm/kvm-s390.h 		kvm_s390_vcpu_block(vcpu);
vcpu              317 arch/s390/kvm/kvm-s390.h 	struct kvm_vcpu *vcpu;
vcpu              319 arch/s390/kvm/kvm-s390.h 	kvm_for_each_vcpu(i, vcpu, kvm)
vcpu              320 arch/s390/kvm/kvm-s390.h 		kvm_s390_vcpu_unblock(vcpu);
vcpu              359 arch/s390/kvm/kvm-s390.h static inline int kvm_s390_inject_prog_cond(struct kvm_vcpu *vcpu, int rc)
vcpu              363 arch/s390/kvm/kvm-s390.h 	return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
vcpu              370 arch/s390/kvm/kvm-s390.h int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop);
vcpu              371 arch/s390/kvm/kvm-s390.h int psw_extint_disabled(struct kvm_vcpu *vcpu);
vcpu              373 arch/s390/kvm/kvm-s390.h int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu);
vcpu              375 arch/s390/kvm/kvm-s390.h int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu);
vcpu              376 arch/s390/kvm/kvm-s390.h void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu);
vcpu              377 arch/s390/kvm/kvm-s390.h int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu,
vcpu              379 arch/s390/kvm/kvm-s390.h int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu,
vcpu              388 arch/s390/kvm/kvm-s390.h void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu);
vcpu              389 arch/s390/kvm/kvm-s390.h void kvm_s390_restore_guest_per_regs(struct kvm_vcpu *vcpu);
vcpu              390 arch/s390/kvm/kvm-s390.h void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu);
vcpu              391 arch/s390/kvm/kvm-s390.h int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
vcpu              393 arch/s390/kvm/kvm-s390.h void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu);
vcpu              394 arch/s390/kvm/kvm-s390.h void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu);
vcpu              395 arch/s390/kvm/kvm-s390.h int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu);
vcpu              396 arch/s390/kvm/kvm-s390.h int kvm_s390_handle_per_event(struct kvm_vcpu *vcpu);
vcpu              414 arch/s390/kvm/kvm-s390.h void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu,
vcpu               35 arch/s390/kvm/priv.c static int handle_ri(struct kvm_vcpu *vcpu)
vcpu               37 arch/s390/kvm/priv.c 	vcpu->stat.instruction_ri++;
vcpu               39 arch/s390/kvm/priv.c 	if (test_kvm_facility(vcpu->kvm, 64)) {
vcpu               40 arch/s390/kvm/priv.c 		VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (lazy)");
vcpu               41 arch/s390/kvm/priv.c 		vcpu->arch.sie_block->ecb3 |= ECB3_RI;
vcpu               42 arch/s390/kvm/priv.c 		kvm_s390_retry_instr(vcpu);
vcpu               45 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
vcpu               48 arch/s390/kvm/priv.c int kvm_s390_handle_aa(struct kvm_vcpu *vcpu)
vcpu               50 arch/s390/kvm/priv.c 	if ((vcpu->arch.sie_block->ipa & 0xf) <= 4)
vcpu               51 arch/s390/kvm/priv.c 		return handle_ri(vcpu);
vcpu               56 arch/s390/kvm/priv.c static int handle_gs(struct kvm_vcpu *vcpu)
vcpu               58 arch/s390/kvm/priv.c 	vcpu->stat.instruction_gs++;
vcpu               60 arch/s390/kvm/priv.c 	if (test_kvm_facility(vcpu->kvm, 133)) {
vcpu               61 arch/s390/kvm/priv.c 		VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (lazy)");
vcpu               64 arch/s390/kvm/priv.c 		current->thread.gs_cb = (struct gs_cb *)&vcpu->run->s.regs.gscb;
vcpu               67 arch/s390/kvm/priv.c 		vcpu->arch.sie_block->ecb |= ECB_GS;
vcpu               68 arch/s390/kvm/priv.c 		vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
vcpu               69 arch/s390/kvm/priv.c 		vcpu->arch.gs_enabled = 1;
vcpu               70 arch/s390/kvm/priv.c 		kvm_s390_retry_instr(vcpu);
vcpu               73 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
vcpu               76 arch/s390/kvm/priv.c int kvm_s390_handle_e3(struct kvm_vcpu *vcpu)
vcpu               78 arch/s390/kvm/priv.c 	int code = vcpu->arch.sie_block->ipb & 0xff;
vcpu               81 arch/s390/kvm/priv.c 		return handle_gs(vcpu);
vcpu               86 arch/s390/kvm/priv.c static int handle_set_clock(struct kvm_vcpu *vcpu)
vcpu               93 arch/s390/kvm/priv.c 	vcpu->stat.instruction_sck++;
vcpu               95 arch/s390/kvm/priv.c 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
vcpu               96 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
vcpu               98 arch/s390/kvm/priv.c 	op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
vcpu              100 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
vcpu              101 arch/s390/kvm/priv.c 	rc = read_guest(vcpu, op2, ar, &gtod.tod, sizeof(gtod.tod));
vcpu              103 arch/s390/kvm/priv.c 		return kvm_s390_inject_prog_cond(vcpu, rc);
vcpu              105 arch/s390/kvm/priv.c 	VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", gtod.tod);
vcpu              106 arch/s390/kvm/priv.c 	kvm_s390_set_tod_clock(vcpu->kvm, &gtod);
vcpu              108 arch/s390/kvm/priv.c 	kvm_s390_set_psw_cc(vcpu, 0);
vcpu              112 arch/s390/kvm/priv.c static int handle_set_prefix(struct kvm_vcpu *vcpu)
vcpu              119 arch/s390/kvm/priv.c 	vcpu->stat.instruction_spx++;
vcpu              121 arch/s390/kvm/priv.c 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
vcpu              122 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
vcpu              124 arch/s390/kvm/priv.c 	operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
vcpu              128 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
vcpu              131 arch/s390/kvm/priv.c 	rc = read_guest(vcpu, operand2, ar, &address, sizeof(address));
vcpu              133 arch/s390/kvm/priv.c 		return kvm_s390_inject_prog_cond(vcpu, rc);
vcpu              142 arch/s390/kvm/priv.c 	if (kvm_is_error_gpa(vcpu->kvm, address))
vcpu              143 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
vcpu              145 arch/s390/kvm/priv.c 	kvm_s390_set_prefix(vcpu, address);
vcpu              146 arch/s390/kvm/priv.c 	trace_kvm_s390_handle_prefix(vcpu, 1, address);
vcpu              150 arch/s390/kvm/priv.c static int handle_store_prefix(struct kvm_vcpu *vcpu)
vcpu              157 arch/s390/kvm/priv.c 	vcpu->stat.instruction_stpx++;
vcpu              159 arch/s390/kvm/priv.c 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
vcpu              160 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
vcpu              162 arch/s390/kvm/priv.c 	operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
vcpu              166 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
vcpu              168 arch/s390/kvm/priv.c 	address = kvm_s390_get_prefix(vcpu);
vcpu              171 arch/s390/kvm/priv.c 	rc = write_guest(vcpu, operand2, ar, &address, sizeof(address));
vcpu              173 arch/s390/kvm/priv.c 		return kvm_s390_inject_prog_cond(vcpu, rc);
vcpu              175 arch/s390/kvm/priv.c 	VCPU_EVENT(vcpu, 3, "STPX: storing prefix 0x%x into 0x%llx", address, operand2);
vcpu              176 arch/s390/kvm/priv.c 	trace_kvm_s390_handle_prefix(vcpu, 0, address);
vcpu              180 arch/s390/kvm/priv.c static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
vcpu              182 arch/s390/kvm/priv.c 	u16 vcpu_id = vcpu->vcpu_id;
vcpu              187 arch/s390/kvm/priv.c 	vcpu->stat.instruction_stap++;
vcpu              189 arch/s390/kvm/priv.c 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
vcpu              190 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
vcpu              192 arch/s390/kvm/priv.c 	ga = kvm_s390_get_base_disp_s(vcpu, &ar);
vcpu              195 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
vcpu              197 arch/s390/kvm/priv.c 	rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id));
vcpu              199 arch/s390/kvm/priv.c 		return kvm_s390_inject_prog_cond(vcpu, rc);
vcpu              201 arch/s390/kvm/priv.c 	VCPU_EVENT(vcpu, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id, ga);
vcpu              202 arch/s390/kvm/priv.c 	trace_kvm_s390_handle_stap(vcpu, ga);
vcpu              206 arch/s390/kvm/priv.c int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu)
vcpu              210 arch/s390/kvm/priv.c 	trace_kvm_s390_skey_related_inst(vcpu);
vcpu              212 arch/s390/kvm/priv.c 	if (vcpu->arch.skey_enabled)
vcpu              216 arch/s390/kvm/priv.c 	VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc);
vcpu              220 arch/s390/kvm/priv.c 	if (kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS))
vcpu              221 arch/s390/kvm/priv.c 		kvm_s390_clear_cpuflags(vcpu, CPUSTAT_KSS);
vcpu              222 arch/s390/kvm/priv.c 	if (!vcpu->kvm->arch.use_skf)
vcpu              223 arch/s390/kvm/priv.c 		vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
vcpu              225 arch/s390/kvm/priv.c 		vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
vcpu              226 arch/s390/kvm/priv.c 	vcpu->arch.skey_enabled = true;
vcpu              230 arch/s390/kvm/priv.c static int try_handle_skey(struct kvm_vcpu *vcpu)
vcpu              234 arch/s390/kvm/priv.c 	rc = kvm_s390_skey_check_enable(vcpu);
vcpu              237 arch/s390/kvm/priv.c 	if (vcpu->kvm->arch.use_skf) {
vcpu              239 arch/s390/kvm/priv.c 		kvm_s390_retry_instr(vcpu);
vcpu              240 arch/s390/kvm/priv.c 		VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
vcpu              246 arch/s390/kvm/priv.c static int handle_iske(struct kvm_vcpu *vcpu)
vcpu              254 arch/s390/kvm/priv.c 	vcpu->stat.instruction_iske++;
vcpu              256 arch/s390/kvm/priv.c 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
vcpu              257 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
vcpu              259 arch/s390/kvm/priv.c 	rc = try_handle_skey(vcpu);
vcpu              263 arch/s390/kvm/priv.c 	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
vcpu              265 arch/s390/kvm/priv.c 	gaddr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
vcpu              266 arch/s390/kvm/priv.c 	gaddr = kvm_s390_logical_to_effective(vcpu, gaddr);
vcpu              267 arch/s390/kvm/priv.c 	gaddr = kvm_s390_real_to_abs(vcpu, gaddr);
vcpu              268 arch/s390/kvm/priv.c 	vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr));
vcpu              270 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
vcpu              286 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
vcpu              289 arch/s390/kvm/priv.c 	vcpu->run->s.regs.gprs[reg1] &= ~0xff;
vcpu              290 arch/s390/kvm/priv.c 	vcpu->run->s.regs.gprs[reg1] |= key;
vcpu              294 arch/s390/kvm/priv.c static int handle_rrbe(struct kvm_vcpu *vcpu)
vcpu              301 arch/s390/kvm/priv.c 	vcpu->stat.instruction_rrbe++;
vcpu              303 arch/s390/kvm/priv.c 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
vcpu              304 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
vcpu              306 arch/s390/kvm/priv.c 	rc = try_handle_skey(vcpu);
vcpu              310 arch/s390/kvm/priv.c 	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
vcpu              312 arch/s390/kvm/priv.c 	gaddr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
vcpu              313 arch/s390/kvm/priv.c 	gaddr = kvm_s390_logical_to_effective(vcpu, gaddr);
vcpu              314 arch/s390/kvm/priv.c 	gaddr = kvm_s390_real_to_abs(vcpu, gaddr);
vcpu              315 arch/s390/kvm/priv.c 	vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr));
vcpu              317 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
vcpu              332 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
vcpu              335 arch/s390/kvm/priv.c 	kvm_s390_set_psw_cc(vcpu, rc);
vcpu              343 arch/s390/kvm/priv.c static int handle_sske(struct kvm_vcpu *vcpu)
vcpu              345 arch/s390/kvm/priv.c 	unsigned char m3 = vcpu->arch.sie_block->ipb >> 28;
vcpu              352 arch/s390/kvm/priv.c 	vcpu->stat.instruction_sske++;
vcpu              354 arch/s390/kvm/priv.c 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
vcpu              355 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
vcpu              357 arch/s390/kvm/priv.c 	rc = try_handle_skey(vcpu);
vcpu              361 arch/s390/kvm/priv.c 	if (!test_kvm_facility(vcpu->kvm, 8))
vcpu              363 arch/s390/kvm/priv.c 	if (!test_kvm_facility(vcpu->kvm, 10))
vcpu              365 arch/s390/kvm/priv.c 	if (!test_kvm_facility(vcpu->kvm, 14))
vcpu              368 arch/s390/kvm/priv.c 	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
vcpu              370 arch/s390/kvm/priv.c 	key = vcpu->run->s.regs.gprs[reg1] & 0xfe;
vcpu              371 arch/s390/kvm/priv.c 	start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
vcpu              372 arch/s390/kvm/priv.c 	start = kvm_s390_logical_to_effective(vcpu, start);
vcpu              377 arch/s390/kvm/priv.c 		start = kvm_s390_real_to_abs(vcpu, start);
vcpu              382 arch/s390/kvm/priv.c 		unsigned long vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
vcpu              386 arch/s390/kvm/priv.c 			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
vcpu              400 arch/s390/kvm/priv.c 			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
vcpu              409 arch/s390/kvm/priv.c 			kvm_s390_set_psw_cc(vcpu, 3);
vcpu              411 arch/s390/kvm/priv.c 			kvm_s390_set_psw_cc(vcpu, rc);
vcpu              412 arch/s390/kvm/priv.c 			vcpu->run->s.regs.gprs[reg1] &= ~0xff00UL;
vcpu              413 arch/s390/kvm/priv.c 			vcpu->run->s.regs.gprs[reg1] |= (u64) oldkey << 8;
vcpu              417 arch/s390/kvm/priv.c 		if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT)
vcpu              418 arch/s390/kvm/priv.c 			vcpu->run->s.regs.gprs[reg2] &= ~PAGE_MASK;
vcpu              420 arch/s390/kvm/priv.c 			vcpu->run->s.regs.gprs[reg2] &= ~0xfffff000UL;
vcpu              421 arch/s390/kvm/priv.c 		end = kvm_s390_logical_to_effective(vcpu, end);
vcpu              422 arch/s390/kvm/priv.c 		vcpu->run->s.regs.gprs[reg2] |= end;
vcpu              427 arch/s390/kvm/priv.c static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
vcpu              429 arch/s390/kvm/priv.c 	vcpu->stat.instruction_ipte_interlock++;
vcpu              430 arch/s390/kvm/priv.c 	if (psw_bits(vcpu->arch.sie_block->gpsw).pstate)
vcpu              431 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
vcpu              432 arch/s390/kvm/priv.c 	wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
vcpu              433 arch/s390/kvm/priv.c 	kvm_s390_retry_instr(vcpu);
vcpu              434 arch/s390/kvm/priv.c 	VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
vcpu              438 arch/s390/kvm/priv.c static int handle_test_block(struct kvm_vcpu *vcpu)
vcpu              443 arch/s390/kvm/priv.c 	vcpu->stat.instruction_tb++;
vcpu              445 arch/s390/kvm/priv.c 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
vcpu              446 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
vcpu              448 arch/s390/kvm/priv.c 	kvm_s390_get_regs_rre(vcpu, NULL, &reg2);
vcpu              449 arch/s390/kvm/priv.c 	addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
vcpu              450 arch/s390/kvm/priv.c 	addr = kvm_s390_logical_to_effective(vcpu, addr);
vcpu              451 arch/s390/kvm/priv.c 	if (kvm_s390_check_low_addr_prot_real(vcpu, addr))
vcpu              452 arch/s390/kvm/priv.c 		return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
vcpu              453 arch/s390/kvm/priv.c 	addr = kvm_s390_real_to_abs(vcpu, addr);
vcpu              455 arch/s390/kvm/priv.c 	if (kvm_is_error_gpa(vcpu->kvm, addr))
vcpu              456 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
vcpu              461 arch/s390/kvm/priv.c 	if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE))
vcpu              463 arch/s390/kvm/priv.c 	kvm_s390_set_psw_cc(vcpu, 0);
vcpu              464 arch/s390/kvm/priv.c 	vcpu->run->s.regs.gprs[0] = 0;
vcpu              468 arch/s390/kvm/priv.c static int handle_tpi(struct kvm_vcpu *vcpu)
vcpu              477 arch/s390/kvm/priv.c 	vcpu->stat.instruction_tpi++;
vcpu              479 arch/s390/kvm/priv.c 	addr = kvm_s390_get_base_disp_s(vcpu, &ar);
vcpu              481 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
vcpu              483 arch/s390/kvm/priv.c 	inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
vcpu              485 arch/s390/kvm/priv.c 		kvm_s390_set_psw_cc(vcpu, 0);
vcpu              498 arch/s390/kvm/priv.c 		rc = write_guest(vcpu, addr, ar, &tpi_data, len);
vcpu              500 arch/s390/kvm/priv.c 			rc = kvm_s390_inject_prog_cond(vcpu, rc);
vcpu              509 arch/s390/kvm/priv.c 		if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) {
vcpu              518 arch/s390/kvm/priv.c 	kvm_s390_set_psw_cc(vcpu, 1);
vcpu              526 arch/s390/kvm/priv.c 	if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
vcpu              534 arch/s390/kvm/priv.c static int handle_tsch(struct kvm_vcpu *vcpu)
vcpu              539 arch/s390/kvm/priv.c 	vcpu->stat.instruction_tsch++;
vcpu              542 arch/s390/kvm/priv.c 	if (vcpu->run->s.regs.gprs[1])
vcpu              543 arch/s390/kvm/priv.c 		inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask,
vcpu              544 arch/s390/kvm/priv.c 					   vcpu->run->s.regs.gprs[1]);
vcpu              554 arch/s390/kvm/priv.c 	vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
vcpu              555 arch/s390/kvm/priv.c 	vcpu->run->s390_tsch.dequeued = !!inti;
vcpu              557 arch/s390/kvm/priv.c 		vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
vcpu              558 arch/s390/kvm/priv.c 		vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
vcpu              559 arch/s390/kvm/priv.c 		vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
vcpu              560 arch/s390/kvm/priv.c 		vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
vcpu              562 arch/s390/kvm/priv.c 	vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
vcpu              567 arch/s390/kvm/priv.c static int handle_io_inst(struct kvm_vcpu *vcpu)
vcpu              569 arch/s390/kvm/priv.c 	VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
vcpu              571 arch/s390/kvm/priv.c 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
vcpu              572 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
vcpu              574 arch/s390/kvm/priv.c 	if (vcpu->kvm->arch.css_support) {
vcpu              579 arch/s390/kvm/priv.c 		if (vcpu->arch.sie_block->ipa == 0xb236)
vcpu              580 arch/s390/kvm/priv.c 			return handle_tpi(vcpu);
vcpu              581 arch/s390/kvm/priv.c 		if (vcpu->arch.sie_block->ipa == 0xb235)
vcpu              582 arch/s390/kvm/priv.c 			return handle_tsch(vcpu);
vcpu              584 arch/s390/kvm/priv.c 		vcpu->stat.instruction_io_other++;
vcpu              591 arch/s390/kvm/priv.c 		kvm_s390_set_psw_cc(vcpu, 3);
vcpu              611 arch/s390/kvm/priv.c static int handle_pqap(struct kvm_vcpu *vcpu)
vcpu              622 arch/s390/kvm/priv.c 	if (!(vcpu->arch.sie_block->eca & ECA_APIE))
vcpu              632 arch/s390/kvm/priv.c 	reg0 = vcpu->run->s.regs.gprs[0];
vcpu              638 arch/s390/kvm/priv.c 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
vcpu              639 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
vcpu              644 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
vcpu              646 arch/s390/kvm/priv.c 	if (!test_kvm_facility(vcpu->kvm, 15) && (reg0 & 0x00800000UL))
vcpu              647 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
vcpu              649 arch/s390/kvm/priv.c 	if (!(vcpu->kvm->arch.crypto.crycbd & 0x02) && (reg0 & 0x0000c0f0UL))
vcpu              650 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
vcpu              654 arch/s390/kvm/priv.c 	if (!test_kvm_facility(vcpu->kvm, 65))
vcpu              655 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
vcpu              661 arch/s390/kvm/priv.c 	if (vcpu->kvm->arch.crypto.pqap_hook) {
vcpu              662 arch/s390/kvm/priv.c 		if (!try_module_get(vcpu->kvm->arch.crypto.pqap_hook->owner))
vcpu              664 arch/s390/kvm/priv.c 		ret = vcpu->kvm->arch.crypto.pqap_hook->hook(vcpu);
vcpu              665 arch/s390/kvm/priv.c 		module_put(vcpu->kvm->arch.crypto.pqap_hook->owner);
vcpu              666 arch/s390/kvm/priv.c 		if (!ret && vcpu->run->s.regs.gprs[1] & 0x00ff0000)
vcpu              667 arch/s390/kvm/priv.c 			kvm_s390_set_psw_cc(vcpu, 3);
vcpu              676 arch/s390/kvm/priv.c 	memcpy(&vcpu->run->s.regs.gprs[1], &status, sizeof(status));
vcpu              677 arch/s390/kvm/priv.c 	kvm_s390_set_psw_cc(vcpu, 3);
vcpu              681 arch/s390/kvm/priv.c static int handle_stfl(struct kvm_vcpu *vcpu)
vcpu              686 arch/s390/kvm/priv.c 	vcpu->stat.instruction_stfl++;
vcpu              688 arch/s390/kvm/priv.c 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
vcpu              689 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
vcpu              695 arch/s390/kvm/priv.c 	fac = *vcpu->kvm->arch.model.fac_list >> 32;
vcpu              696 arch/s390/kvm/priv.c 	rc = write_guest_lc(vcpu, offsetof(struct lowcore, stfl_fac_list),
vcpu              700 arch/s390/kvm/priv.c 	VCPU_EVENT(vcpu, 3, "STFL: store facility list 0x%x", fac);
vcpu              701 arch/s390/kvm/priv.c 	trace_kvm_s390_handle_stfl(vcpu, fac);
vcpu              727 arch/s390/kvm/priv.c int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
vcpu              729 arch/s390/kvm/priv.c 	psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
vcpu              735 arch/s390/kvm/priv.c 	vcpu->stat.instruction_lpsw++;
vcpu              738 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
vcpu              740 arch/s390/kvm/priv.c 	addr = kvm_s390_get_base_disp_s(vcpu, &ar);
vcpu              742 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
vcpu              744 arch/s390/kvm/priv.c 	rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
vcpu              746 arch/s390/kvm/priv.c 		return kvm_s390_inject_prog_cond(vcpu, rc);
vcpu              748 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
vcpu              753 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
vcpu              757 arch/s390/kvm/priv.c static int handle_lpswe(struct kvm_vcpu *vcpu)
vcpu              764 arch/s390/kvm/priv.c 	vcpu->stat.instruction_lpswe++;
vcpu              766 arch/s390/kvm/priv.c 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
vcpu              767 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
vcpu              769 arch/s390/kvm/priv.c 	addr = kvm_s390_get_base_disp_s(vcpu, &ar);
vcpu              771 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
vcpu              772 arch/s390/kvm/priv.c 	rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
vcpu              774 arch/s390/kvm/priv.c 		return kvm_s390_inject_prog_cond(vcpu, rc);
vcpu              775 arch/s390/kvm/priv.c 	vcpu->arch.sie_block->gpsw = new_psw;
vcpu              776 arch/s390/kvm/priv.c 	if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
vcpu              777 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
vcpu              781 arch/s390/kvm/priv.c static int handle_stidp(struct kvm_vcpu *vcpu)
vcpu              783 arch/s390/kvm/priv.c 	u64 stidp_data = vcpu->kvm->arch.model.cpuid;
vcpu              788 arch/s390/kvm/priv.c 	vcpu->stat.instruction_stidp++;
vcpu              790 arch/s390/kvm/priv.c 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
vcpu              791 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
vcpu              793 arch/s390/kvm/priv.c 	operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
vcpu              796 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
vcpu              798 arch/s390/kvm/priv.c 	rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data));
vcpu              800 arch/s390/kvm/priv.c 		return kvm_s390_inject_prog_cond(vcpu, rc);
vcpu              802 arch/s390/kvm/priv.c 	VCPU_EVENT(vcpu, 3, "STIDP: store cpu id 0x%llx", stidp_data);
vcpu              806 arch/s390/kvm/priv.c static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
vcpu              811 arch/s390/kvm/priv.c 	cpus = atomic_read(&vcpu->kvm->online_vcpus);
vcpu              833 arch/s390/kvm/priv.c static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, u8 ar,
vcpu              836 arch/s390/kvm/priv.c 	vcpu->run->exit_reason = KVM_EXIT_S390_STSI;
vcpu              837 arch/s390/kvm/priv.c 	vcpu->run->s390_stsi.addr = addr;
vcpu              838 arch/s390/kvm/priv.c 	vcpu->run->s390_stsi.ar = ar;
vcpu              839 arch/s390/kvm/priv.c 	vcpu->run->s390_stsi.fc = fc;
vcpu              840 arch/s390/kvm/priv.c 	vcpu->run->s390_stsi.sel1 = sel1;
vcpu              841 arch/s390/kvm/priv.c 	vcpu->run->s390_stsi.sel2 = sel2;
vcpu              844 arch/s390/kvm/priv.c static int handle_stsi(struct kvm_vcpu *vcpu)
vcpu              846 arch/s390/kvm/priv.c 	int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
vcpu              847 arch/s390/kvm/priv.c 	int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
vcpu              848 arch/s390/kvm/priv.c 	int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
vcpu              854 arch/s390/kvm/priv.c 	vcpu->stat.instruction_stsi++;
vcpu              855 arch/s390/kvm/priv.c 	VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2);
vcpu              857 arch/s390/kvm/priv.c 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
vcpu              858 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
vcpu              861 arch/s390/kvm/priv.c 		kvm_s390_set_psw_cc(vcpu, 3);
vcpu              865 arch/s390/kvm/priv.c 	if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
vcpu              866 arch/s390/kvm/priv.c 	    || vcpu->run->s.regs.gprs[1] & 0xffff0000)
vcpu              867 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
vcpu              870 arch/s390/kvm/priv.c 		vcpu->run->s.regs.gprs[0] = 3 << 28;
vcpu              871 arch/s390/kvm/priv.c 		kvm_s390_set_psw_cc(vcpu, 0);
vcpu              875 arch/s390/kvm/priv.c 	operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
vcpu              878 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
vcpu              895 arch/s390/kvm/priv.c 		handle_stsi_3_2_2(vcpu, (void *) mem);
vcpu              899 arch/s390/kvm/priv.c 	rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE);
vcpu              901 arch/s390/kvm/priv.c 		rc = kvm_s390_inject_prog_cond(vcpu, rc);
vcpu              904 arch/s390/kvm/priv.c 	if (vcpu->kvm->arch.user_stsi) {
vcpu              905 arch/s390/kvm/priv.c 		insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2);
vcpu              908 arch/s390/kvm/priv.c 	trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
vcpu              910 arch/s390/kvm/priv.c 	kvm_s390_set_psw_cc(vcpu, 0);
vcpu              911 arch/s390/kvm/priv.c 	vcpu->run->s.regs.gprs[0] = 0;
vcpu              914 arch/s390/kvm/priv.c 	kvm_s390_set_psw_cc(vcpu, 3);
vcpu              920 arch/s390/kvm/priv.c int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
vcpu              922 arch/s390/kvm/priv.c 	switch (vcpu->arch.sie_block->ipa & 0x00ff) {
vcpu              924 arch/s390/kvm/priv.c 		return handle_stidp(vcpu);
vcpu              926 arch/s390/kvm/priv.c 		return handle_set_clock(vcpu);
vcpu              928 arch/s390/kvm/priv.c 		return handle_set_prefix(vcpu);
vcpu              930 arch/s390/kvm/priv.c 		return handle_store_prefix(vcpu);
vcpu              932 arch/s390/kvm/priv.c 		return handle_store_cpu_address(vcpu);
vcpu              934 arch/s390/kvm/priv.c 		return kvm_s390_handle_vsie(vcpu);
vcpu              937 arch/s390/kvm/priv.c 		return handle_ipte_interlock(vcpu);
vcpu              939 arch/s390/kvm/priv.c 		return handle_iske(vcpu);
vcpu              941 arch/s390/kvm/priv.c 		return handle_rrbe(vcpu);
vcpu              943 arch/s390/kvm/priv.c 		return handle_sske(vcpu);
vcpu              945 arch/s390/kvm/priv.c 		return handle_test_block(vcpu);
vcpu              962 arch/s390/kvm/priv.c 		return handle_io_inst(vcpu);
vcpu              964 arch/s390/kvm/priv.c 		return handle_sthyi(vcpu);
vcpu              966 arch/s390/kvm/priv.c 		return handle_stsi(vcpu);
vcpu              968 arch/s390/kvm/priv.c 		return handle_pqap(vcpu);
vcpu              970 arch/s390/kvm/priv.c 		return handle_stfl(vcpu);
vcpu              972 arch/s390/kvm/priv.c 		return handle_lpswe(vcpu);
vcpu              978 arch/s390/kvm/priv.c static int handle_epsw(struct kvm_vcpu *vcpu)
vcpu              982 arch/s390/kvm/priv.c 	vcpu->stat.instruction_epsw++;
vcpu              984 arch/s390/kvm/priv.c 	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
vcpu              987 arch/s390/kvm/priv.c 	vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL;
vcpu              988 arch/s390/kvm/priv.c 	vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
vcpu              990 arch/s390/kvm/priv.c 		vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL;
vcpu              991 arch/s390/kvm/priv.c 		vcpu->run->s.regs.gprs[reg2] |=
vcpu              992 arch/s390/kvm/priv.c 			vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL;
vcpu             1007 arch/s390/kvm/priv.c static int handle_pfmf(struct kvm_vcpu *vcpu)
vcpu             1014 arch/s390/kvm/priv.c 	vcpu->stat.instruction_pfmf++;
vcpu             1016 arch/s390/kvm/priv.c 	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
vcpu             1018 arch/s390/kvm/priv.c 	if (!test_kvm_facility(vcpu->kvm, 8))
vcpu             1019 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
vcpu             1021 arch/s390/kvm/priv.c 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
vcpu             1022 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
vcpu             1024 arch/s390/kvm/priv.c 	if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
vcpu             1025 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
vcpu             1028 arch/s390/kvm/priv.c 	if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ &&
vcpu             1029 arch/s390/kvm/priv.c 	    !test_kvm_facility(vcpu->kvm, 14))
vcpu             1030 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
vcpu             1033 arch/s390/kvm/priv.c 	if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK &&
vcpu             1034 arch/s390/kvm/priv.c 	    test_kvm_facility(vcpu->kvm, 10)) {
vcpu             1035 arch/s390/kvm/priv.c 		mr = vcpu->run->s.regs.gprs[reg1] & PFMF_MR;
vcpu             1036 arch/s390/kvm/priv.c 		mc = vcpu->run->s.regs.gprs[reg1] & PFMF_MC;
vcpu             1039 arch/s390/kvm/priv.c 	nq = vcpu->run->s.regs.gprs[reg1] & PFMF_NQ;
vcpu             1040 arch/s390/kvm/priv.c 	key = vcpu->run->s.regs.gprs[reg1] & PFMF_KEY;
vcpu             1041 arch/s390/kvm/priv.c 	start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
vcpu             1042 arch/s390/kvm/priv.c 	start = kvm_s390_logical_to_effective(vcpu, start);
vcpu             1044 arch/s390/kvm/priv.c 	if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
vcpu             1045 arch/s390/kvm/priv.c 		if (kvm_s390_check_low_addr_prot_real(vcpu, start))
vcpu             1046 arch/s390/kvm/priv.c 			return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
vcpu             1049 arch/s390/kvm/priv.c 	switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
vcpu             1052 arch/s390/kvm/priv.c 		start = kvm_s390_real_to_abs(vcpu, start);
vcpu             1061 arch/s390/kvm/priv.c 		if (!test_kvm_facility(vcpu->kvm, 78) ||
vcpu             1062 arch/s390/kvm/priv.c 		    psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_24BIT)
vcpu             1063 arch/s390/kvm/priv.c 			return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
vcpu             1067 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
vcpu             1075 arch/s390/kvm/priv.c 		vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
vcpu             1077 arch/s390/kvm/priv.c 			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
vcpu             1079 arch/s390/kvm/priv.c 		if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
vcpu             1080 arch/s390/kvm/priv.c 			if (kvm_clear_guest(vcpu->kvm, start, PAGE_SIZE))
vcpu             1081 arch/s390/kvm/priv.c 				return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
vcpu             1084 arch/s390/kvm/priv.c 		if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
vcpu             1085 arch/s390/kvm/priv.c 			int rc = kvm_s390_skey_check_enable(vcpu);
vcpu             1099 arch/s390/kvm/priv.c 				return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
vcpu             1107 arch/s390/kvm/priv.c 	if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
vcpu             1108 arch/s390/kvm/priv.c 		if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) {
vcpu             1109 arch/s390/kvm/priv.c 			vcpu->run->s.regs.gprs[reg2] = end;
vcpu             1111 arch/s390/kvm/priv.c 			vcpu->run->s.regs.gprs[reg2] &= ~0xffffffffUL;
vcpu             1112 arch/s390/kvm/priv.c 			end = kvm_s390_logical_to_effective(vcpu, end);
vcpu             1113 arch/s390/kvm/priv.c 			vcpu->run->s.regs.gprs[reg2] |= end;
vcpu             1122 arch/s390/kvm/priv.c static inline int __do_essa(struct kvm_vcpu *vcpu, const int orc)
vcpu             1133 arch/s390/kvm/priv.c 	kvm_s390_get_regs_rre(vcpu, &r1, &r2);
vcpu             1134 arch/s390/kvm/priv.c 	gfn = vcpu->run->s.regs.gprs[r2] >> PAGE_SHIFT;
vcpu             1135 arch/s390/kvm/priv.c 	hva = gfn_to_hva(vcpu->kvm, gfn);
vcpu             1136 arch/s390/kvm/priv.c 	entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
vcpu             1139 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
vcpu             1141 arch/s390/kvm/priv.c 	nappended = pgste_perform_essa(vcpu->kvm->mm, hva, orc, &ptev, &pgstev);
vcpu             1144 arch/s390/kvm/priv.c 		vcpu->run->s.regs.gprs[r1] = res; /* Exception Indication */
vcpu             1161 arch/s390/kvm/priv.c 	vcpu->run->s.regs.gprs[r1] = res;
vcpu             1169 arch/s390/kvm/priv.c 		cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo & PAGE_MASK);
vcpu             1174 arch/s390/kvm/priv.c 		struct kvm_memory_slot *ms = gfn_to_memslot(vcpu->kvm, gfn);
vcpu             1178 arch/s390/kvm/priv.c 			atomic64_inc(&vcpu->kvm->arch.cmma_dirty_pages);
vcpu             1184 arch/s390/kvm/priv.c static int handle_essa(struct kvm_vcpu *vcpu)
vcpu             1187 arch/s390/kvm/priv.c 	int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
vcpu             1192 arch/s390/kvm/priv.c 	VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries);
vcpu             1193 arch/s390/kvm/priv.c 	gmap = vcpu->arch.gmap;
vcpu             1194 arch/s390/kvm/priv.c 	vcpu->stat.instruction_essa++;
vcpu             1195 arch/s390/kvm/priv.c 	if (!vcpu->kvm->arch.use_cmma)
vcpu             1196 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
vcpu             1198 arch/s390/kvm/priv.c 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
vcpu             1199 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
vcpu             1201 arch/s390/kvm/priv.c 	orc = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
vcpu             1203 arch/s390/kvm/priv.c 	if (orc > (test_kvm_facility(vcpu->kvm, 147) ? ESSA_SET_STABLE_NODAT
vcpu             1205 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
vcpu             1207 arch/s390/kvm/priv.c 	if (!vcpu->kvm->arch.migration_mode) {
vcpu             1217 arch/s390/kvm/priv.c 		if (vcpu->kvm->mm->context.uses_cmm == 0) {
vcpu             1218 arch/s390/kvm/priv.c 			down_write(&vcpu->kvm->mm->mmap_sem);
vcpu             1219 arch/s390/kvm/priv.c 			vcpu->kvm->mm->context.uses_cmm = 1;
vcpu             1220 arch/s390/kvm/priv.c 			up_write(&vcpu->kvm->mm->mmap_sem);
vcpu             1231 arch/s390/kvm/priv.c 		vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
vcpu             1233 arch/s390/kvm/priv.c 		kvm_s390_retry_instr(vcpu);
vcpu             1237 arch/s390/kvm/priv.c 		down_read(&vcpu->kvm->mm->mmap_sem);
vcpu             1238 arch/s390/kvm/priv.c 		srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
vcpu             1239 arch/s390/kvm/priv.c 		i = __do_essa(vcpu, orc);
vcpu             1240 arch/s390/kvm/priv.c 		srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
vcpu             1241 arch/s390/kvm/priv.c 		up_read(&vcpu->kvm->mm->mmap_sem);
vcpu             1247 arch/s390/kvm/priv.c 	vcpu->arch.sie_block->cbrlo &= PAGE_MASK;	/* reset nceo */
vcpu             1248 arch/s390/kvm/priv.c 	cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
vcpu             1256 arch/s390/kvm/priv.c int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
vcpu             1258 arch/s390/kvm/priv.c 	switch (vcpu->arch.sie_block->ipa & 0x00ff) {
vcpu             1262 arch/s390/kvm/priv.c 		return handle_ipte_interlock(vcpu);
vcpu             1264 arch/s390/kvm/priv.c 		return handle_epsw(vcpu);
vcpu             1266 arch/s390/kvm/priv.c 		return handle_essa(vcpu);
vcpu             1268 arch/s390/kvm/priv.c 		return handle_pfmf(vcpu);
vcpu             1274 arch/s390/kvm/priv.c int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
vcpu             1276 arch/s390/kvm/priv.c 	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
vcpu             1277 arch/s390/kvm/priv.c 	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
vcpu             1283 arch/s390/kvm/priv.c 	vcpu->stat.instruction_lctl++;
vcpu             1285 arch/s390/kvm/priv.c 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
vcpu             1286 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
vcpu             1288 arch/s390/kvm/priv.c 	ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
vcpu             1291 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
vcpu             1293 arch/s390/kvm/priv.c 	VCPU_EVENT(vcpu, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
vcpu             1294 arch/s390/kvm/priv.c 	trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
vcpu             1297 arch/s390/kvm/priv.c 	rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
vcpu             1299 arch/s390/kvm/priv.c 		return kvm_s390_inject_prog_cond(vcpu, rc);
vcpu             1303 arch/s390/kvm/priv.c 		vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
vcpu             1304 arch/s390/kvm/priv.c 		vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++];
vcpu             1309 arch/s390/kvm/priv.c 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
vcpu             1313 arch/s390/kvm/priv.c int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
vcpu             1315 arch/s390/kvm/priv.c 	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
vcpu             1316 arch/s390/kvm/priv.c 	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
vcpu             1322 arch/s390/kvm/priv.c 	vcpu->stat.instruction_stctl++;
vcpu             1324 arch/s390/kvm/priv.c 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
vcpu             1325 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
vcpu             1327 arch/s390/kvm/priv.c 	ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
vcpu             1330 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
vcpu             1332 arch/s390/kvm/priv.c 	VCPU_EVENT(vcpu, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
vcpu             1333 arch/s390/kvm/priv.c 	trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga);
vcpu             1338 arch/s390/kvm/priv.c 		ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
vcpu             1343 arch/s390/kvm/priv.c 	rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
vcpu             1344 arch/s390/kvm/priv.c 	return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
vcpu             1347 arch/s390/kvm/priv.c static int handle_lctlg(struct kvm_vcpu *vcpu)
vcpu             1349 arch/s390/kvm/priv.c 	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
vcpu             1350 arch/s390/kvm/priv.c 	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
vcpu             1356 arch/s390/kvm/priv.c 	vcpu->stat.instruction_lctlg++;
vcpu             1358 arch/s390/kvm/priv.c 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
vcpu             1359 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
vcpu             1361 arch/s390/kvm/priv.c 	ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
vcpu             1364 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
vcpu             1366 arch/s390/kvm/priv.c 	VCPU_EVENT(vcpu, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
vcpu             1367 arch/s390/kvm/priv.c 	trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
vcpu             1370 arch/s390/kvm/priv.c 	rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
vcpu             1372 arch/s390/kvm/priv.c 		return kvm_s390_inject_prog_cond(vcpu, rc);
vcpu             1376 arch/s390/kvm/priv.c 		vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++];
vcpu             1381 arch/s390/kvm/priv.c 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
vcpu             1385 arch/s390/kvm/priv.c static int handle_stctg(struct kvm_vcpu *vcpu)
vcpu             1387 arch/s390/kvm/priv.c 	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
vcpu             1388 arch/s390/kvm/priv.c 	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
vcpu             1394 arch/s390/kvm/priv.c 	vcpu->stat.instruction_stctg++;
vcpu             1396 arch/s390/kvm/priv.c 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
vcpu             1397 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
vcpu             1399 arch/s390/kvm/priv.c 	ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
vcpu             1402 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
vcpu             1404 arch/s390/kvm/priv.c 	VCPU_EVENT(vcpu, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
vcpu             1405 arch/s390/kvm/priv.c 	trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga);
vcpu             1410 arch/s390/kvm/priv.c 		ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
vcpu             1415 arch/s390/kvm/priv.c 	rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
vcpu             1416 arch/s390/kvm/priv.c 	return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
vcpu             1419 arch/s390/kvm/priv.c int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
vcpu             1421 arch/s390/kvm/priv.c 	switch (vcpu->arch.sie_block->ipb & 0x000000ff) {
vcpu             1423 arch/s390/kvm/priv.c 		return handle_stctg(vcpu);
vcpu             1425 arch/s390/kvm/priv.c 		return handle_lctlg(vcpu);
vcpu             1429 arch/s390/kvm/priv.c 		return handle_ri(vcpu);
vcpu             1435 arch/s390/kvm/priv.c static int handle_tprot(struct kvm_vcpu *vcpu)
vcpu             1443 arch/s390/kvm/priv.c 	vcpu->stat.instruction_tprot++;
vcpu             1445 arch/s390/kvm/priv.c 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
vcpu             1446 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
vcpu             1448 arch/s390/kvm/priv.c 	kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL);
vcpu             1455 arch/s390/kvm/priv.c 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
vcpu             1456 arch/s390/kvm/priv.c 		ipte_lock(vcpu);
vcpu             1457 arch/s390/kvm/priv.c 	ret = guest_translate_address(vcpu, address1, ar, &gpa, GACC_STORE);
vcpu             1461 arch/s390/kvm/priv.c 		ret = guest_translate_address(vcpu, address1, ar, &gpa,
vcpu             1466 arch/s390/kvm/priv.c 			ret = kvm_s390_inject_program_int(vcpu, ret);
vcpu             1469 arch/s390/kvm/priv.c 			kvm_s390_set_psw_cc(vcpu, 3);
vcpu             1475 arch/s390/kvm/priv.c 	hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable);
vcpu             1477 arch/s390/kvm/priv.c 		ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
vcpu             1481 arch/s390/kvm/priv.c 		kvm_s390_set_psw_cc(vcpu, cc);
vcpu             1485 arch/s390/kvm/priv.c 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
vcpu             1486 arch/s390/kvm/priv.c 		ipte_unlock(vcpu);
vcpu             1490 arch/s390/kvm/priv.c int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
vcpu             1492 arch/s390/kvm/priv.c 	switch (vcpu->arch.sie_block->ipa & 0x00ff) {
vcpu             1494 arch/s390/kvm/priv.c 		return handle_tprot(vcpu);
vcpu             1500 arch/s390/kvm/priv.c static int handle_sckpf(struct kvm_vcpu *vcpu)
vcpu             1504 arch/s390/kvm/priv.c 	vcpu->stat.instruction_sckpf++;
vcpu             1506 arch/s390/kvm/priv.c 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
vcpu             1507 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
vcpu             1509 arch/s390/kvm/priv.c 	if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
vcpu             1510 arch/s390/kvm/priv.c 		return kvm_s390_inject_program_int(vcpu,
vcpu             1513 arch/s390/kvm/priv.c 	value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
vcpu             1514 arch/s390/kvm/priv.c 	vcpu->arch.sie_block->todpr = value;
vcpu             1519 arch/s390/kvm/priv.c static int handle_ptff(struct kvm_vcpu *vcpu)
vcpu             1521 arch/s390/kvm/priv.c 	vcpu->stat.instruction_ptff++;
vcpu             1524 arch/s390/kvm/priv.c 	kvm_s390_set_psw_cc(vcpu, 3);
vcpu             1528 arch/s390/kvm/priv.c int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
vcpu             1530 arch/s390/kvm/priv.c 	switch (vcpu->arch.sie_block->ipa & 0x00ff) {
vcpu             1532 arch/s390/kvm/priv.c 		return handle_ptff(vcpu);
vcpu             1534 arch/s390/kvm/priv.c 		return handle_sckpf(vcpu);
vcpu               20 arch/s390/kvm/sigp.c static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
vcpu               39 arch/s390/kvm/sigp.c 	VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", dst_vcpu->vcpu_id,
vcpu               44 arch/s390/kvm/sigp.c static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
vcpu               49 arch/s390/kvm/sigp.c 		.u.emerg.code = vcpu->vcpu_id,
vcpu               55 arch/s390/kvm/sigp.c 		VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x",
vcpu               61 arch/s390/kvm/sigp.c static int __sigp_emergency(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
vcpu               63 arch/s390/kvm/sigp.c 	return __inject_sigp_emergency(vcpu, dst_vcpu);
vcpu               66 arch/s390/kvm/sigp.c static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu,
vcpu               75 arch/s390/kvm/sigp.c 	idle = is_vcpu_idle(vcpu);
vcpu               81 arch/s390/kvm/sigp.c 	if (!is_vcpu_stopped(vcpu)
vcpu               85 arch/s390/kvm/sigp.c 		return __inject_sigp_emergency(vcpu, dst_vcpu);
vcpu               93 arch/s390/kvm/sigp.c static int __sigp_external_call(struct kvm_vcpu *vcpu,
vcpu               98 arch/s390/kvm/sigp.c 		.u.extcall.code = vcpu->vcpu_id,
vcpu              108 arch/s390/kvm/sigp.c 		VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x",
vcpu              115 arch/s390/kvm/sigp.c static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
vcpu              126 arch/s390/kvm/sigp.c 		VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x",
vcpu              132 arch/s390/kvm/sigp.c static int __sigp_stop_and_store_status(struct kvm_vcpu *vcpu,
vcpu              145 arch/s390/kvm/sigp.c 		VCPU_EVENT(vcpu, 4, "sent sigp stop and store status to cpu %x",
vcpu              151 arch/s390/kvm/sigp.c static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter,
vcpu              158 arch/s390/kvm/sigp.c 	kvm_for_each_vcpu(i, v, vcpu->kvm) {
vcpu              159 arch/s390/kvm/sigp.c 		if (v == vcpu)
vcpu              173 arch/s390/kvm/sigp.c static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
vcpu              187 arch/s390/kvm/sigp.c 	if (kvm_is_error_gpa(vcpu->kvm, irq.u.prefix.address)) {
vcpu              203 arch/s390/kvm/sigp.c static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu,
vcpu              225 arch/s390/kvm/sigp.c static int __sigp_sense_running(struct kvm_vcpu *vcpu,
vcpu              230 arch/s390/kvm/sigp.c 	if (!test_kvm_facility(vcpu->kvm, 9)) {
vcpu              246 arch/s390/kvm/sigp.c 	VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x",
vcpu              252 arch/s390/kvm/sigp.c static int __prepare_sigp_re_start(struct kvm_vcpu *vcpu,
vcpu              268 arch/s390/kvm/sigp.c static int __prepare_sigp_cpu_reset(struct kvm_vcpu *vcpu,
vcpu              275 arch/s390/kvm/sigp.c static int __prepare_sigp_unknown(struct kvm_vcpu *vcpu,
vcpu              282 arch/s390/kvm/sigp.c static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
vcpu              286 arch/s390/kvm/sigp.c 	struct kvm_vcpu *dst_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr);
vcpu              293 arch/s390/kvm/sigp.c 		vcpu->stat.instruction_sigp_sense++;
vcpu              294 arch/s390/kvm/sigp.c 		rc = __sigp_sense(vcpu, dst_vcpu, status_reg);
vcpu              297 arch/s390/kvm/sigp.c 		vcpu->stat.instruction_sigp_external_call++;
vcpu              298 arch/s390/kvm/sigp.c 		rc = __sigp_external_call(vcpu, dst_vcpu, status_reg);
vcpu              301 arch/s390/kvm/sigp.c 		vcpu->stat.instruction_sigp_emergency++;
vcpu              302 arch/s390/kvm/sigp.c 		rc = __sigp_emergency(vcpu, dst_vcpu);
vcpu              305 arch/s390/kvm/sigp.c 		vcpu->stat.instruction_sigp_stop++;
vcpu              306 arch/s390/kvm/sigp.c 		rc = __sigp_stop(vcpu, dst_vcpu);
vcpu              309 arch/s390/kvm/sigp.c 		vcpu->stat.instruction_sigp_stop_store_status++;
vcpu              310 arch/s390/kvm/sigp.c 		rc = __sigp_stop_and_store_status(vcpu, dst_vcpu, status_reg);
vcpu              313 arch/s390/kvm/sigp.c 		vcpu->stat.instruction_sigp_store_status++;
vcpu              314 arch/s390/kvm/sigp.c 		rc = __sigp_store_status_at_addr(vcpu, dst_vcpu, parameter,
vcpu              318 arch/s390/kvm/sigp.c 		vcpu->stat.instruction_sigp_prefix++;
vcpu              319 arch/s390/kvm/sigp.c 		rc = __sigp_set_prefix(vcpu, dst_vcpu, parameter, status_reg);
vcpu              322 arch/s390/kvm/sigp.c 		vcpu->stat.instruction_sigp_cond_emergency++;
vcpu              323 arch/s390/kvm/sigp.c 		rc = __sigp_conditional_emergency(vcpu, dst_vcpu, parameter,
vcpu              327 arch/s390/kvm/sigp.c 		vcpu->stat.instruction_sigp_sense_running++;
vcpu              328 arch/s390/kvm/sigp.c 		rc = __sigp_sense_running(vcpu, dst_vcpu, status_reg);
vcpu              331 arch/s390/kvm/sigp.c 		vcpu->stat.instruction_sigp_start++;
vcpu              332 arch/s390/kvm/sigp.c 		rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code);
vcpu              335 arch/s390/kvm/sigp.c 		vcpu->stat.instruction_sigp_restart++;
vcpu              336 arch/s390/kvm/sigp.c 		rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code);
vcpu              339 arch/s390/kvm/sigp.c 		vcpu->stat.instruction_sigp_init_cpu_reset++;
vcpu              340 arch/s390/kvm/sigp.c 		rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code);
vcpu              343 arch/s390/kvm/sigp.c 		vcpu->stat.instruction_sigp_cpu_reset++;
vcpu              344 arch/s390/kvm/sigp.c 		rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code);
vcpu              347 arch/s390/kvm/sigp.c 		vcpu->stat.instruction_sigp_unknown++;
vcpu              348 arch/s390/kvm/sigp.c 		rc = __prepare_sigp_unknown(vcpu, dst_vcpu);
vcpu              352 arch/s390/kvm/sigp.c 		VCPU_EVENT(vcpu, 4,
vcpu              359 arch/s390/kvm/sigp.c static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code,
vcpu              362 arch/s390/kvm/sigp.c 	if (!vcpu->kvm->arch.user_sigp)
vcpu              374 arch/s390/kvm/sigp.c 		vcpu->stat.instruction_sigp_stop++;
vcpu              377 arch/s390/kvm/sigp.c 		vcpu->stat.instruction_sigp_stop_store_status++;
vcpu              380 arch/s390/kvm/sigp.c 		vcpu->stat.instruction_sigp_store_status++;
vcpu              383 arch/s390/kvm/sigp.c 		vcpu->stat.instruction_sigp_store_adtl_status++;
vcpu              386 arch/s390/kvm/sigp.c 		vcpu->stat.instruction_sigp_prefix++;
vcpu              389 arch/s390/kvm/sigp.c 		vcpu->stat.instruction_sigp_start++;
vcpu              392 arch/s390/kvm/sigp.c 		vcpu->stat.instruction_sigp_restart++;
vcpu              395 arch/s390/kvm/sigp.c 		vcpu->stat.instruction_sigp_init_cpu_reset++;
vcpu              398 arch/s390/kvm/sigp.c 		vcpu->stat.instruction_sigp_cpu_reset++;
vcpu              401 arch/s390/kvm/sigp.c 		vcpu->stat.instruction_sigp_unknown++;
vcpu              403 arch/s390/kvm/sigp.c 	VCPU_EVENT(vcpu, 3, "SIGP: order %u for CPU %d handled in userspace",
vcpu              409 arch/s390/kvm/sigp.c int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
vcpu              411 arch/s390/kvm/sigp.c 	int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
vcpu              412 arch/s390/kvm/sigp.c 	int r3 = vcpu->arch.sie_block->ipa & 0x000f;
vcpu              414 arch/s390/kvm/sigp.c 	u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
vcpu              419 arch/s390/kvm/sigp.c 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
vcpu              420 arch/s390/kvm/sigp.c 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
vcpu              422 arch/s390/kvm/sigp.c 	order_code = kvm_s390_get_base_disp_rs(vcpu, NULL);
vcpu              423 arch/s390/kvm/sigp.c 	if (handle_sigp_order_in_user_space(vcpu, order_code, cpu_addr))
vcpu              427 arch/s390/kvm/sigp.c 		parameter = vcpu->run->s.regs.gprs[r1];
vcpu              429 arch/s390/kvm/sigp.c 		parameter = vcpu->run->s.regs.gprs[r1 + 1];
vcpu              431 arch/s390/kvm/sigp.c 	trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter);
vcpu              434 arch/s390/kvm/sigp.c 		vcpu->stat.instruction_sigp_arch++;
vcpu              435 arch/s390/kvm/sigp.c 		rc = __sigp_set_arch(vcpu, parameter,
vcpu              436 arch/s390/kvm/sigp.c 				     &vcpu->run->s.regs.gprs[r1]);
vcpu              439 arch/s390/kvm/sigp.c 		rc = handle_sigp_dst(vcpu, order_code, cpu_addr,
vcpu              441 arch/s390/kvm/sigp.c 				     &vcpu->run->s.regs.gprs[r1]);
vcpu              447 arch/s390/kvm/sigp.c 	kvm_s390_set_psw_cc(vcpu, rc);
vcpu              460 arch/s390/kvm/sigp.c int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
vcpu              462 arch/s390/kvm/sigp.c 	int r3 = vcpu->arch.sie_block->ipa & 0x000f;
vcpu              463 arch/s390/kvm/sigp.c 	u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
vcpu              465 arch/s390/kvm/sigp.c 	u8 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL);
vcpu              467 arch/s390/kvm/sigp.c 	trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);
vcpu              470 arch/s390/kvm/sigp.c 		dest_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr);
vcpu              474 arch/s390/kvm/sigp.c 		kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED);
vcpu               43 arch/s390/kvm/trace-s390.h 	    TP_PROTO(unsigned int id, struct kvm_vcpu *vcpu,
vcpu               45 arch/s390/kvm/trace-s390.h 	    TP_ARGS(id, vcpu, sie_block),
vcpu               49 arch/s390/kvm/trace-s390.h 		    __field(struct kvm_vcpu *, vcpu)
vcpu               55 arch/s390/kvm/trace-s390.h 		    __entry->vcpu = vcpu;
vcpu               60 arch/s390/kvm/trace-s390.h 		      __entry->id, __entry->vcpu, __entry->sie_block)
vcpu               20 arch/s390/kvm/trace.h #define VCPU_PROTO_COMMON struct kvm_vcpu *vcpu
vcpu               21 arch/s390/kvm/trace.h #define VCPU_ARGS_COMMON vcpu
vcpu               26 arch/s390/kvm/trace.h 	__entry->id = vcpu->vcpu_id;					\
vcpu               27 arch/s390/kvm/trace.h 	__entry->pswmask = vcpu->arch.sie_block->gpsw.mask;		\
vcpu               28 arch/s390/kvm/trace.h 	__entry->pswaddr = vcpu->arch.sie_block->gpsw.addr;		\
vcpu              102 arch/s390/kvm/vsie.c static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
vcpu              119 arch/s390/kvm/vsie.c 	if (cpuflags & CPUSTAT_GED && test_kvm_facility(vcpu->kvm, 8))
vcpu              121 arch/s390/kvm/vsie.c 	if (cpuflags & CPUSTAT_GED2 && test_kvm_facility(vcpu->kvm, 78)) {
vcpu              126 arch/s390/kvm/vsie.c 	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GPERE))
vcpu              128 arch/s390/kvm/vsie.c 	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GSLS))
vcpu              130 arch/s390/kvm/vsie.c 	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IBS))
vcpu              132 arch/s390/kvm/vsie.c 	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_KSS))
vcpu              139 arch/s390/kvm/vsie.c static int setup_apcb10(struct kvm_vcpu *vcpu, struct kvm_s390_apcb1 *apcb_s,
vcpu              144 arch/s390/kvm/vsie.c 	if (read_guest_real(vcpu, apcb_o, &tmp, sizeof(struct kvm_s390_apcb0)))
vcpu              164 arch/s390/kvm/vsie.c static int setup_apcb00(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
vcpu              167 arch/s390/kvm/vsie.c 	if (read_guest_real(vcpu, apcb_o, apcb_s,
vcpu              185 arch/s390/kvm/vsie.c static int setup_apcb11(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
vcpu              189 arch/s390/kvm/vsie.c 	if (read_guest_real(vcpu, apcb_o, apcb_s,
vcpu              212 arch/s390/kvm/vsie.c static int setup_apcb(struct kvm_vcpu *vcpu, struct kvm_s390_crypto_cb *crycb_s,
vcpu              227 arch/s390/kvm/vsie.c 		return setup_apcb11(vcpu, (unsigned long *)&crycb_s->apcb1,
vcpu              233 arch/s390/kvm/vsie.c 			return setup_apcb10(vcpu, &crycb_s->apcb1,
vcpu              237 arch/s390/kvm/vsie.c 			return setup_apcb00(vcpu,
vcpu              249 arch/s390/kvm/vsie.c 			return setup_apcb10(vcpu, &crycb_s->apcb1,
vcpu              254 arch/s390/kvm/vsie.c 			return setup_apcb00(vcpu,
vcpu              283 arch/s390/kvm/vsie.c static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
vcpu              294 arch/s390/kvm/vsie.c 	int key_msk = test_kvm_facility(vcpu->kvm, 76);
vcpu              296 arch/s390/kvm/vsie.c 	int fmt_h = vcpu->arch.sie_block->crycbd & CRYCB_FORMAT_MASK;
vcpu              301 arch/s390/kvm/vsie.c 	apie_h = vcpu->arch.sie_block->eca & ECA_APIE;
vcpu              315 arch/s390/kvm/vsie.c 		ret = setup_apcb(vcpu, &vsie_page->crycb, crycb_addr,
vcpu              316 arch/s390/kvm/vsie.c 				 vcpu->kvm->arch.crypto.crycb,
vcpu              324 arch/s390/kvm/vsie.c 	ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 &
vcpu              326 arch/s390/kvm/vsie.c 	ecd_flags = scb_o->ecd & vcpu->arch.sie_block->ecd & ECD_ECC;
vcpu              331 arch/s390/kvm/vsie.c 	if (read_guest_real(vcpu, crycb_addr + 72,
vcpu              341 arch/s390/kvm/vsie.c 			    vcpu->kvm->arch.crypto.crycb->dea_wrapping_key_mask;
vcpu              358 arch/s390/kvm/vsie.c static void prepare_ibc(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
vcpu              369 arch/s390/kvm/vsie.c 	if (vcpu->kvm->arch.model.ibc && new_ibc) {
vcpu              375 arch/s390/kvm/vsie.c 		if (scb_s->ibc > vcpu->kvm->arch.model.ibc)
vcpu              376 arch/s390/kvm/vsie.c 			scb_s->ibc = vcpu->kvm->arch.model.ibc;
vcpu              381 arch/s390/kvm/vsie.c static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
vcpu              406 arch/s390/kvm/vsie.c 	if (test_kvm_facility(vcpu->kvm, 82)) {
vcpu              437 arch/s390/kvm/vsie.c static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
vcpu              459 arch/s390/kvm/vsie.c 	rc = prepare_cpuflags(vcpu, vsie_page);
vcpu              508 arch/s390/kvm/vsie.c 	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_ESOP))
vcpu              511 arch/s390/kvm/vsie.c 	if (test_kvm_facility(vcpu->kvm, 73) && wants_tx) {
vcpu              518 arch/s390/kvm/vsie.c 	if (test_kvm_facility(vcpu->kvm, 82))
vcpu              521 arch/s390/kvm/vsie.c 	if (test_kvm_facility(vcpu->kvm, 129)) {
vcpu              526 arch/s390/kvm/vsie.c 	if (test_kvm_facility(vcpu->kvm, 64))
vcpu              529 arch/s390/kvm/vsie.c 	if (test_kvm_facility(vcpu->kvm, 130))
vcpu              532 arch/s390/kvm/vsie.c 	if (test_kvm_facility(vcpu->kvm, 133)) {
vcpu              536 arch/s390/kvm/vsie.c 	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIIF))
vcpu              538 arch/s390/kvm/vsie.c 	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IB))
vcpu              540 arch/s390/kvm/vsie.c 	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_CEI))
vcpu              543 arch/s390/kvm/vsie.c 	if (test_kvm_facility(vcpu->kvm, 139))
vcpu              547 arch/s390/kvm/vsie.c 	if (test_kvm_facility(vcpu->kvm, 156))
vcpu              552 arch/s390/kvm/vsie.c 	prepare_ibc(vcpu, vsie_page);
vcpu              553 arch/s390/kvm/vsie.c 	rc = shadow_crycb(vcpu, vsie_page);
vcpu              556 arch/s390/kvm/vsie.c 		unshadow_scb(vcpu, vsie_page);
vcpu              606 arch/s390/kvm/vsie.c static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
vcpu              621 arch/s390/kvm/vsie.c 	rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix);
vcpu              623 arch/s390/kvm/vsie.c 		rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
vcpu              663 arch/s390/kvm/vsie.c static void unpin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
vcpu              670 arch/s390/kvm/vsie.c 		unpin_guest_page(vcpu->kvm, vsie_page->sca_gpa, hpa);
vcpu              678 arch/s390/kvm/vsie.c 		unpin_guest_page(vcpu->kvm, vsie_page->itdba_gpa, hpa);
vcpu              685 arch/s390/kvm/vsie.c 		unpin_guest_page(vcpu->kvm, vsie_page->gvrd_gpa, hpa);
vcpu              692 arch/s390/kvm/vsie.c 		unpin_guest_page(vcpu->kvm, vsie_page->riccbd_gpa, hpa);
vcpu              699 arch/s390/kvm/vsie.c 		unpin_guest_page(vcpu->kvm, vsie_page->sdnx_gpa, hpa);
vcpu              719 arch/s390/kvm/vsie.c static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
vcpu              728 arch/s390/kvm/vsie.c 	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_64BSCAO))
vcpu              733 arch/s390/kvm/vsie.c 		else if ((gpa & ~0x1fffUL) == kvm_s390_get_prefix(vcpu))
vcpu              739 arch/s390/kvm/vsie.c 			rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
vcpu              757 arch/s390/kvm/vsie.c 		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
vcpu              776 arch/s390/kvm/vsie.c 		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
vcpu              792 arch/s390/kvm/vsie.c 		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
vcpu              822 arch/s390/kvm/vsie.c 		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
vcpu              832 arch/s390/kvm/vsie.c 	unpin_blocks(vcpu, vsie_page);
vcpu              837 arch/s390/kvm/vsie.c static void unpin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page,
vcpu              843 arch/s390/kvm/vsie.c 		unpin_guest_page(vcpu->kvm, gpa, hpa);
vcpu              853 arch/s390/kvm/vsie.c static int pin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page,
vcpu              859 arch/s390/kvm/vsie.c 	rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
vcpu              861 arch/s390/kvm/vsie.c 		rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
vcpu              875 arch/s390/kvm/vsie.c static int inject_fault(struct kvm_vcpu *vcpu, __u16 code, __u64 vaddr,
vcpu              894 arch/s390/kvm/vsie.c 	rc = kvm_s390_inject_prog_irq(vcpu, &pgm);
vcpu              905 arch/s390/kvm/vsie.c static int handle_fault(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
vcpu              911 arch/s390/kvm/vsie.c 		return inject_fault(vcpu, PGM_PROTECTION,
vcpu              914 arch/s390/kvm/vsie.c 	rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
vcpu              917 arch/s390/kvm/vsie.c 		rc = inject_fault(vcpu, rc,
vcpu              932 arch/s390/kvm/vsie.c static void handle_last_fault(struct kvm_vcpu *vcpu,
vcpu              936 arch/s390/kvm/vsie.c 		kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
vcpu              969 arch/s390/kvm/vsie.c static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
vcpu              974 arch/s390/kvm/vsie.c 	if (fac && test_kvm_facility(vcpu->kvm, 7)) {
vcpu              976 arch/s390/kvm/vsie.c 		if (read_guest_real(vcpu, fac, &vsie_page->fac,
vcpu              992 arch/s390/kvm/vsie.c static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
vcpu              993 arch/s390/kvm/vsie.c 	__releases(vcpu->kvm->srcu)
vcpu              994 arch/s390/kvm/vsie.c 	__acquires(vcpu->kvm->srcu)
vcpu             1001 arch/s390/kvm/vsie.c 	handle_last_fault(vcpu, vsie_page);
vcpu             1008 arch/s390/kvm/vsie.c 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
vcpu             1019 arch/s390/kvm/vsie.c 	if (test_kvm_facility(vcpu->kvm, 82) &&
vcpu             1020 arch/s390/kvm/vsie.c 	    vcpu->arch.sie_block->fpf & FPF_BPBC)
vcpu             1033 arch/s390/kvm/vsie.c 	vcpu->arch.sie_block->prog0c |= PROG_IN_SIE;
vcpu             1035 arch/s390/kvm/vsie.c 	if (!kvm_s390_vcpu_sie_inhibited(vcpu))
vcpu             1036 arch/s390/kvm/vsie.c 		rc = sie64a(scb_s, vcpu->run->s.regs.gprs);
vcpu             1038 arch/s390/kvm/vsie.c 	vcpu->arch.sie_block->prog0c &= ~PROG_IN_SIE;
vcpu             1048 arch/s390/kvm/vsie.c 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
vcpu             1051 arch/s390/kvm/vsie.c 		VCPU_EVENT(vcpu, 3, "%s", "machine check");
vcpu             1052 arch/s390/kvm/vsie.c 		kvm_s390_reinject_machine_check(vcpu, &vsie_page->mcck_info);
vcpu             1059 arch/s390/kvm/vsie.c 		return handle_fault(vcpu, vsie_page);
vcpu             1064 arch/s390/kvm/vsie.c 			rc = handle_stfle(vcpu, vsie_page);
vcpu             1087 arch/s390/kvm/vsie.c static int acquire_gmap_shadow(struct kvm_vcpu *vcpu,
vcpu             1095 arch/s390/kvm/vsie.c 	asce = vcpu->arch.sie_block->gcr[1];
vcpu             1096 arch/s390/kvm/vsie.c 	cr0.val = vcpu->arch.sie_block->gcr[0];
vcpu             1097 arch/s390/kvm/vsie.c 	edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8);
vcpu             1098 arch/s390/kvm/vsie.c 	edat += edat && test_kvm_facility(vcpu->kvm, 78);
vcpu             1110 arch/s390/kvm/vsie.c 	gmap = gmap_shadow(vcpu->arch.gmap, asce, edat);
vcpu             1113 arch/s390/kvm/vsie.c 	gmap->private = vcpu->kvm;
vcpu             1121 arch/s390/kvm/vsie.c static void register_shadow_scb(struct kvm_vcpu *vcpu,
vcpu             1126 arch/s390/kvm/vsie.c 	WRITE_ONCE(vcpu->arch.vsie_block, &vsie_page->scb_s);
vcpu             1131 arch/s390/kvm/vsie.c 	kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
vcpu             1137 arch/s390/kvm/vsie.c 	scb_s->epoch += vcpu->kvm->arch.epoch;
vcpu             1140 arch/s390/kvm/vsie.c 		scb_s->epdx += vcpu->kvm->arch.epdx;
vcpu             1141 arch/s390/kvm/vsie.c 		if (scb_s->epoch < vcpu->kvm->arch.epoch)
vcpu             1151 arch/s390/kvm/vsie.c static void unregister_shadow_scb(struct kvm_vcpu *vcpu)
vcpu             1153 arch/s390/kvm/vsie.c 	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
vcpu             1154 arch/s390/kvm/vsie.c 	WRITE_ONCE(vcpu->arch.vsie_block, NULL);
vcpu             1165 arch/s390/kvm/vsie.c static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
vcpu             1171 arch/s390/kvm/vsie.c 		rc = acquire_gmap_shadow(vcpu, vsie_page);
vcpu             1173 arch/s390/kvm/vsie.c 			rc = map_prefix(vcpu, vsie_page);
vcpu             1177 arch/s390/kvm/vsie.c 			rc = do_vsie_run(vcpu, vsie_page);
vcpu             1178 arch/s390/kvm/vsie.c 			gmap_enable(vcpu->arch.gmap);
vcpu             1185 arch/s390/kvm/vsie.c 		    kvm_s390_vcpu_has_irq(vcpu, 0) ||
vcpu             1186 arch/s390/kvm/vsie.c 		    kvm_s390_vcpu_sie_inhibited(vcpu))
vcpu             1285 arch/s390/kvm/vsie.c int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu)
vcpu             1291 arch/s390/kvm/vsie.c 	vcpu->stat.instruction_sie++;
vcpu             1292 arch/s390/kvm/vsie.c 	if (!test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIEF2))
vcpu             1294 arch/s390/kvm/vsie.c 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
vcpu             1295 arch/s390/kvm/vsie.c 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
vcpu             1298 arch/s390/kvm/vsie.c 	scb_addr = kvm_s390_get_base_disp_s(vcpu, NULL);
vcpu             1302 arch/s390/kvm/vsie.c 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
vcpu             1304 arch/s390/kvm/vsie.c 	if (signal_pending(current) || kvm_s390_vcpu_has_irq(vcpu, 0) ||
vcpu             1305 arch/s390/kvm/vsie.c 	    kvm_s390_vcpu_sie_inhibited(vcpu))
vcpu             1308 arch/s390/kvm/vsie.c 	vsie_page = get_vsie_page(vcpu->kvm, scb_addr);
vcpu             1315 arch/s390/kvm/vsie.c 	rc = pin_scb(vcpu, vsie_page, scb_addr);
vcpu             1318 arch/s390/kvm/vsie.c 	rc = shadow_scb(vcpu, vsie_page);
vcpu             1321 arch/s390/kvm/vsie.c 	rc = pin_blocks(vcpu, vsie_page);
vcpu             1324 arch/s390/kvm/vsie.c 	register_shadow_scb(vcpu, vsie_page);
vcpu             1325 arch/s390/kvm/vsie.c 	rc = vsie_run(vcpu, vsie_page);
vcpu             1326 arch/s390/kvm/vsie.c 	unregister_shadow_scb(vcpu);
vcpu             1327 arch/s390/kvm/vsie.c 	unpin_blocks(vcpu, vsie_page);
vcpu             1329 arch/s390/kvm/vsie.c 	unshadow_scb(vcpu, vsie_page);
vcpu             1331 arch/s390/kvm/vsie.c 	unpin_scb(vcpu, vsie_page, scb_addr);
vcpu             1333 arch/s390/kvm/vsie.c 	put_vsie_page(vcpu->kvm, vsie_page);
vcpu             1366 arch/s390/kvm/vsie.c void kvm_s390_vsie_kick(struct kvm_vcpu *vcpu)
vcpu             1368 arch/s390/kvm/vsie.c 	struct kvm_s390_sie_block *scb = READ_ONCE(vcpu->arch.vsie_block);
vcpu              141 arch/x86/hyperv/hv_apic.c 	int cur_cpu, vcpu;
vcpu              173 arch/x86/hyperv/hv_apic.c 		vcpu = hv_cpu_number_to_vp_number(cur_cpu);
vcpu              174 arch/x86/hyperv/hv_apic.c 		if (vcpu == VP_INVAL)
vcpu              181 arch/x86/hyperv/hv_apic.c 		if (vcpu >= 64)
vcpu              184 arch/x86/hyperv/hv_apic.c 		__set_bit(vcpu, (unsigned long *)&ipi_arg.cpu_mask);
vcpu               59 arch/x86/hyperv/hv_spinlock.c __visible bool hv_vcpu_is_preempted(int vcpu)
vcpu               58 arch/x86/hyperv/mmu.c 	int cpu, vcpu, gva_n, max_gvas;
vcpu              116 arch/x86/hyperv/mmu.c 			vcpu = hv_cpu_number_to_vp_number(cpu);
vcpu              117 arch/x86/hyperv/mmu.c 			if (vcpu == VP_INVAL) {
vcpu              122 arch/x86/hyperv/mmu.c 			if (vcpu >= 64)
vcpu              125 arch/x86/hyperv/mmu.c 			__set_bit(vcpu, (unsigned long *)
vcpu              380 arch/x86/include/asm/kvm_host.h 	void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
vcpu              381 arch/x86/include/asm/kvm_host.h 	unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
vcpu              382 arch/x86/include/asm/kvm_host.h 	u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
vcpu              383 arch/x86/include/asm/kvm_host.h 	int (*page_fault)(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 err,
vcpu              385 arch/x86/include/asm/kvm_host.h 	void (*inject_page_fault)(struct kvm_vcpu *vcpu,
vcpu              387 arch/x86/include/asm/kvm_host.h 	gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gpa_t gva_or_gpa,
vcpu              389 arch/x86/include/asm/kvm_host.h 	gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
vcpu              391 arch/x86/include/asm/kvm_host.h 	int (*sync_page)(struct kvm_vcpu *vcpu,
vcpu              393 arch/x86/include/asm/kvm_host.h 	void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa);
vcpu              394 arch/x86/include/asm/kvm_host.h 	void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
vcpu              456 arch/x86/include/asm/kvm_host.h 	struct kvm_vcpu *vcpu;
vcpu              660 arch/x86/include/asm/kvm_host.h 	int (*complete_userspace_io)(struct kvm_vcpu *vcpu);
vcpu             1018 arch/x86/include/asm/kvm_host.h 	void (*cpuid_update)(struct kvm_vcpu *vcpu);
vcpu             1027 arch/x86/include/asm/kvm_host.h 	void (*vcpu_free)(struct kvm_vcpu *vcpu);
vcpu             1028 arch/x86/include/asm/kvm_host.h 	void (*vcpu_reset)(struct kvm_vcpu *vcpu, bool init_event);
vcpu             1030 arch/x86/include/asm/kvm_host.h 	void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
vcpu             1031 arch/x86/include/asm/kvm_host.h 	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
vcpu             1032 arch/x86/include/asm/kvm_host.h 	void (*vcpu_put)(struct kvm_vcpu *vcpu);
vcpu             1034 arch/x86/include/asm/kvm_host.h 	void (*update_bp_intercept)(struct kvm_vcpu *vcpu);
vcpu             1035 arch/x86/include/asm/kvm_host.h 	int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
vcpu             1036 arch/x86/include/asm/kvm_host.h 	int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
vcpu             1037 arch/x86/include/asm/kvm_host.h 	u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
vcpu             1038 arch/x86/include/asm/kvm_host.h 	void (*get_segment)(struct kvm_vcpu *vcpu,
vcpu             1040 arch/x86/include/asm/kvm_host.h 	int (*get_cpl)(struct kvm_vcpu *vcpu);
vcpu             1041 arch/x86/include/asm/kvm_host.h 	void (*set_segment)(struct kvm_vcpu *vcpu,
vcpu             1043 arch/x86/include/asm/kvm_host.h 	void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
vcpu             1044 arch/x86/include/asm/kvm_host.h 	void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
vcpu             1045 arch/x86/include/asm/kvm_host.h 	void (*decache_cr3)(struct kvm_vcpu *vcpu);
vcpu             1046 arch/x86/include/asm/kvm_host.h 	void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
vcpu             1047 arch/x86/include/asm/kvm_host.h 	void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
vcpu             1048 arch/x86/include/asm/kvm_host.h 	void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
vcpu             1049 arch/x86/include/asm/kvm_host.h 	int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
vcpu             1050 arch/x86/include/asm/kvm_host.h 	void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
vcpu             1051 arch/x86/include/asm/kvm_host.h 	void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
vcpu             1052 arch/x86/include/asm/kvm_host.h 	void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
vcpu             1053 arch/x86/include/asm/kvm_host.h 	void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
vcpu             1054 arch/x86/include/asm/kvm_host.h 	void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
vcpu             1055 arch/x86/include/asm/kvm_host.h 	u64 (*get_dr6)(struct kvm_vcpu *vcpu);
vcpu             1056 arch/x86/include/asm/kvm_host.h 	void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value);
vcpu             1057 arch/x86/include/asm/kvm_host.h 	void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
vcpu             1058 arch/x86/include/asm/kvm_host.h 	void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
vcpu             1059 arch/x86/include/asm/kvm_host.h 	void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
vcpu             1060 arch/x86/include/asm/kvm_host.h 	unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
vcpu             1061 arch/x86/include/asm/kvm_host.h 	void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
vcpu             1063 arch/x86/include/asm/kvm_host.h 	void (*tlb_flush)(struct kvm_vcpu *vcpu, bool invalidate_gpa);
vcpu             1074 arch/x86/include/asm/kvm_host.h 	void (*tlb_flush_gva)(struct kvm_vcpu *vcpu, gva_t addr);
vcpu             1076 arch/x86/include/asm/kvm_host.h 	void (*run)(struct kvm_vcpu *vcpu);
vcpu             1077 arch/x86/include/asm/kvm_host.h 	int (*handle_exit)(struct kvm_vcpu *vcpu);
vcpu             1078 arch/x86/include/asm/kvm_host.h 	int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
vcpu             1079 arch/x86/include/asm/kvm_host.h 	void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
vcpu             1080 arch/x86/include/asm/kvm_host.h 	u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu);
vcpu             1081 arch/x86/include/asm/kvm_host.h 	void (*patch_hypercall)(struct kvm_vcpu *vcpu,
vcpu             1083 arch/x86/include/asm/kvm_host.h 	void (*set_irq)(struct kvm_vcpu *vcpu);
vcpu             1084 arch/x86/include/asm/kvm_host.h 	void (*set_nmi)(struct kvm_vcpu *vcpu);
vcpu             1085 arch/x86/include/asm/kvm_host.h 	void (*queue_exception)(struct kvm_vcpu *vcpu);
vcpu             1086 arch/x86/include/asm/kvm_host.h 	void (*cancel_injection)(struct kvm_vcpu *vcpu);
vcpu             1087 arch/x86/include/asm/kvm_host.h 	int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
vcpu             1088 arch/x86/include/asm/kvm_host.h 	int (*nmi_allowed)(struct kvm_vcpu *vcpu);
vcpu             1089 arch/x86/include/asm/kvm_host.h 	bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
vcpu             1090 arch/x86/include/asm/kvm_host.h 	void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
vcpu             1091 arch/x86/include/asm/kvm_host.h 	void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
vcpu             1092 arch/x86/include/asm/kvm_host.h 	void (*enable_irq_window)(struct kvm_vcpu *vcpu);
vcpu             1093 arch/x86/include/asm/kvm_host.h 	void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
vcpu             1094 arch/x86/include/asm/kvm_host.h 	bool (*get_enable_apicv)(struct kvm_vcpu *vcpu);
vcpu             1095 arch/x86/include/asm/kvm_host.h 	void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
vcpu             1096 arch/x86/include/asm/kvm_host.h 	void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
vcpu             1097 arch/x86/include/asm/kvm_host.h 	void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
vcpu             1098 arch/x86/include/asm/kvm_host.h 	bool (*guest_apic_has_interrupt)(struct kvm_vcpu *vcpu);
vcpu             1099 arch/x86/include/asm/kvm_host.h 	void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
vcpu             1100 arch/x86/include/asm/kvm_host.h 	void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
vcpu             1101 arch/x86/include/asm/kvm_host.h 	void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
vcpu             1102 arch/x86/include/asm/kvm_host.h 	int (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
vcpu             1103 arch/x86/include/asm/kvm_host.h 	int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
vcpu             1106 arch/x86/include/asm/kvm_host.h 	int (*get_tdp_level)(struct kvm_vcpu *vcpu);
vcpu             1107 arch/x86/include/asm/kvm_host.h 	u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
vcpu             1112 arch/x86/include/asm/kvm_host.h 	void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
vcpu             1118 arch/x86/include/asm/kvm_host.h 	u64 (*read_l1_tsc_offset)(struct kvm_vcpu *vcpu);
vcpu             1120 arch/x86/include/asm/kvm_host.h 	u64 (*write_l1_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
vcpu             1122 arch/x86/include/asm/kvm_host.h 	void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
vcpu             1124 arch/x86/include/asm/kvm_host.h 	int (*check_intercept)(struct kvm_vcpu *vcpu,
vcpu             1127 arch/x86/include/asm/kvm_host.h 	void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu);
vcpu             1134 arch/x86/include/asm/kvm_host.h 	int (*check_nested_events)(struct kvm_vcpu *vcpu);
vcpu             1135 arch/x86/include/asm/kvm_host.h 	void (*request_immediate_exit)(struct kvm_vcpu *vcpu);
vcpu             1163 arch/x86/include/asm/kvm_host.h 	int (*write_log_dirty)(struct kvm_vcpu *vcpu);
vcpu             1177 arch/x86/include/asm/kvm_host.h 	int (*pre_block)(struct kvm_vcpu *vcpu);
vcpu             1178 arch/x86/include/asm/kvm_host.h 	void (*post_block)(struct kvm_vcpu *vcpu);
vcpu             1180 arch/x86/include/asm/kvm_host.h 	void (*vcpu_blocking)(struct kvm_vcpu *vcpu);
vcpu             1181 arch/x86/include/asm/kvm_host.h 	void (*vcpu_unblocking)(struct kvm_vcpu *vcpu);
vcpu             1185 arch/x86/include/asm/kvm_host.h 	void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
vcpu             1186 arch/x86/include/asm/kvm_host.h 	bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
vcpu             1188 arch/x86/include/asm/kvm_host.h 	int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
vcpu             1190 arch/x86/include/asm/kvm_host.h 	void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
vcpu             1192 arch/x86/include/asm/kvm_host.h 	void (*setup_mce)(struct kvm_vcpu *vcpu);
vcpu             1194 arch/x86/include/asm/kvm_host.h 	int (*get_nested_state)(struct kvm_vcpu *vcpu,
vcpu             1197 arch/x86/include/asm/kvm_host.h 	int (*set_nested_state)(struct kvm_vcpu *vcpu,
vcpu             1200 arch/x86/include/asm/kvm_host.h 	bool (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);
vcpu             1202 arch/x86/include/asm/kvm_host.h 	int (*smi_allowed)(struct kvm_vcpu *vcpu);
vcpu             1203 arch/x86/include/asm/kvm_host.h 	int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
vcpu             1204 arch/x86/include/asm/kvm_host.h 	int (*pre_leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
vcpu             1205 arch/x86/include/asm/kvm_host.h 	int (*enable_smi_window)(struct kvm_vcpu *vcpu);
vcpu             1213 arch/x86/include/asm/kvm_host.h 	int (*nested_enable_evmcs)(struct kvm_vcpu *vcpu,
vcpu             1215 arch/x86/include/asm/kvm_host.h 	uint16_t (*nested_get_evmcs_version)(struct kvm_vcpu *vcpu);
vcpu             1217 arch/x86/include/asm/kvm_host.h 	bool (*need_emulation_on_page_fault)(struct kvm_vcpu *vcpu);
vcpu             1219 arch/x86/include/asm/kvm_host.h 	bool (*apic_init_signal_blocked)(struct kvm_vcpu *vcpu);
vcpu             1220 arch/x86/include/asm/kvm_host.h 	int (*enable_direct_tlbflush)(struct kvm_vcpu *vcpu);
vcpu             1257 arch/x86/include/asm/kvm_host.h void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
vcpu             1258 arch/x86/include/asm/kvm_host.h int kvm_mmu_create(struct kvm_vcpu *vcpu);
vcpu             1265 arch/x86/include/asm/kvm_host.h void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
vcpu             1284 arch/x86/include/asm/kvm_host.h int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
vcpu             1285 arch/x86/include/asm/kvm_host.h bool pdptrs_changed(struct kvm_vcpu *vcpu);
vcpu             1287 arch/x86/include/asm/kvm_host.h int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
vcpu             1305 arch/x86/include/asm/kvm_host.h u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);
vcpu             1356 arch/x86/include/asm/kvm_host.h int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type);
vcpu             1357 arch/x86/include/asm/kvm_host.h int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
vcpu             1361 arch/x86/include/asm/kvm_host.h bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
vcpu             1362 arch/x86/include/asm/kvm_host.h int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data);
vcpu             1363 arch/x86/include/asm/kvm_host.h int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data);
vcpu             1364 arch/x86/include/asm/kvm_host.h int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu);
vcpu             1365 arch/x86/include/asm/kvm_host.h int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu);
vcpu             1369 arch/x86/include/asm/kvm_host.h int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in);
vcpu             1370 arch/x86/include/asm/kvm_host.h int kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
vcpu             1371 arch/x86/include/asm/kvm_host.h int kvm_emulate_halt(struct kvm_vcpu *vcpu);
vcpu             1372 arch/x86/include/asm/kvm_host.h int kvm_vcpu_halt(struct kvm_vcpu *vcpu);
vcpu             1373 arch/x86/include/asm/kvm_host.h int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
vcpu             1375 arch/x86/include/asm/kvm_host.h void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
vcpu             1376 arch/x86/include/asm/kvm_host.h int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
vcpu             1377 arch/x86/include/asm/kvm_host.h void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
vcpu             1379 arch/x86/include/asm/kvm_host.h int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
vcpu             1382 arch/x86/include/asm/kvm_host.h int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
vcpu             1383 arch/x86/include/asm/kvm_host.h int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
vcpu             1384 arch/x86/include/asm/kvm_host.h int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
vcpu             1385 arch/x86/include/asm/kvm_host.h int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
vcpu             1386 arch/x86/include/asm/kvm_host.h int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
vcpu             1387 arch/x86/include/asm/kvm_host.h int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
vcpu             1388 arch/x86/include/asm/kvm_host.h unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
vcpu             1389 arch/x86/include/asm/kvm_host.h void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
vcpu             1390 arch/x86/include/asm/kvm_host.h void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
vcpu             1391 arch/x86/include/asm/kvm_host.h int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
vcpu             1393 arch/x86/include/asm/kvm_host.h int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
vcpu             1394 arch/x86/include/asm/kvm_host.h int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
vcpu             1396 arch/x86/include/asm/kvm_host.h unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
vcpu             1397 arch/x86/include/asm/kvm_host.h void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
vcpu             1398 arch/x86/include/asm/kvm_host.h bool kvm_rdpmc(struct kvm_vcpu *vcpu);
vcpu             1400 arch/x86/include/asm/kvm_host.h void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
vcpu             1401 arch/x86/include/asm/kvm_host.h void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
vcpu             1402 arch/x86/include/asm/kvm_host.h void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
vcpu             1403 arch/x86/include/asm/kvm_host.h void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
vcpu             1404 arch/x86/include/asm/kvm_host.h void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
vcpu             1405 arch/x86/include/asm/kvm_host.h int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
vcpu             1408 arch/x86/include/asm/kvm_host.h bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
vcpu             1409 arch/x86/include/asm/kvm_host.h bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr);
vcpu             1430 arch/x86/include/asm/kvm_host.h void kvm_inject_nmi(struct kvm_vcpu *vcpu);
vcpu             1433 arch/x86/include/asm/kvm_host.h int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
vcpu             1434 arch/x86/include/asm/kvm_host.h void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
vcpu             1435 arch/x86/include/asm/kvm_host.h int kvm_mmu_load(struct kvm_vcpu *vcpu);
vcpu             1436 arch/x86/include/asm/kvm_host.h void kvm_mmu_unload(struct kvm_vcpu *vcpu);
vcpu             1437 arch/x86/include/asm/kvm_host.h void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
vcpu             1438 arch/x86/include/asm/kvm_host.h void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
vcpu             1440 arch/x86/include/asm/kvm_host.h gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
vcpu             1442 arch/x86/include/asm/kvm_host.h gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
vcpu             1444 arch/x86/include/asm/kvm_host.h gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
vcpu             1446 arch/x86/include/asm/kvm_host.h gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
vcpu             1448 arch/x86/include/asm/kvm_host.h gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
vcpu             1451 arch/x86/include/asm/kvm_host.h void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu);
vcpu             1453 arch/x86/include/asm/kvm_host.h int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
vcpu             1455 arch/x86/include/asm/kvm_host.h int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
vcpu             1457 arch/x86/include/asm/kvm_host.h void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
vcpu             1458 arch/x86/include/asm/kvm_host.h void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
vcpu             1459 arch/x86/include/asm/kvm_host.h void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush);
vcpu             1464 arch/x86/include/asm/kvm_host.h static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
vcpu             1504 arch/x86/include/asm/kvm_host.h static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
vcpu             1506 arch/x86/include/asm/kvm_host.h 	kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
vcpu             1535 arch/x86/include/asm/kvm_host.h #define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
vcpu             1561 arch/x86/include/asm/kvm_host.h int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
vcpu             1562 arch/x86/include/asm/kvm_host.h int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
vcpu             1564 arch/x86/include/asm/kvm_host.h void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
vcpu             1565 arch/x86/include/asm/kvm_host.h void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu);
vcpu             1574 arch/x86/include/asm/kvm_host.h u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
vcpu             1575 arch/x86/include/asm/kvm_host.h u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);
vcpu             1577 arch/x86/include/asm/kvm_host.h unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
vcpu             1578 arch/x86/include/asm/kvm_host.h bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
vcpu             1583 arch/x86/include/asm/kvm_host.h void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
vcpu             1585 arch/x86/include/asm/kvm_host.h void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
vcpu             1587 arch/x86/include/asm/kvm_host.h void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
vcpu             1589 arch/x86/include/asm/kvm_host.h bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
vcpu             1590 arch/x86/include/asm/kvm_host.h extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
vcpu             1592 arch/x86/include/asm/kvm_host.h int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
vcpu             1593 arch/x86/include/asm/kvm_host.h int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
vcpu             1594 arch/x86/include/asm/kvm_host.h void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu);
vcpu             1600 arch/x86/include/asm/kvm_host.h bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
vcpu             1601 arch/x86/include/asm/kvm_host.h bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);
vcpu             1616 arch/x86/include/asm/kvm_host.h static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
vcpu             1619 arch/x86/include/asm/kvm_host.h 		kvm_x86_ops->vcpu_blocking(vcpu);
vcpu             1622 arch/x86/include/asm/kvm_host.h static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
vcpu             1625 arch/x86/include/asm/kvm_host.h 		kvm_x86_ops->vcpu_unblocking(vcpu);
vcpu             1628 arch/x86/include/asm/kvm_host.h static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
vcpu               35 arch/x86/include/asm/kvm_page_track.h 	void (*track_write)(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
vcpu               63 arch/x86/include/asm/kvm_page_track.h bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
vcpu               72 arch/x86/include/asm/kvm_page_track.h void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
vcpu              237 arch/x86/include/asm/mshyperv.h bool hv_vcpu_is_preempted(int vcpu);
vcpu               14 arch/x86/include/asm/pvclock.h 			    struct pvclock_vcpu_time_info *vcpu,
vcpu               67 arch/x86/kvm/cpuid.c int kvm_update_cpuid(struct kvm_vcpu *vcpu)
vcpu               70 arch/x86/kvm/cpuid.c 	struct kvm_lapic *apic = vcpu->arch.apic;
vcpu               72 arch/x86/kvm/cpuid.c 	best = kvm_find_cpuid_entry(vcpu, 1, 0);
vcpu               79 arch/x86/kvm/cpuid.c 		if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
vcpu               84 arch/x86/kvm/cpuid.c 	if (vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE)
vcpu               94 arch/x86/kvm/cpuid.c 	best = kvm_find_cpuid_entry(vcpu, 7, 0);
vcpu               99 arch/x86/kvm/cpuid.c 			if (kvm_read_cr4_bits(vcpu, X86_CR4_PKE))
vcpu              104 arch/x86/kvm/cpuid.c 	best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
vcpu              106 arch/x86/kvm/cpuid.c 		vcpu->arch.guest_supported_xcr0 = 0;
vcpu              107 arch/x86/kvm/cpuid.c 		vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
vcpu              109 arch/x86/kvm/cpuid.c 		vcpu->arch.guest_supported_xcr0 =
vcpu              112 arch/x86/kvm/cpuid.c 		vcpu->arch.guest_xstate_size = best->ebx =
vcpu              113 arch/x86/kvm/cpuid.c 			xstate_required_size(vcpu->arch.xcr0, false);
vcpu              116 arch/x86/kvm/cpuid.c 	best = kvm_find_cpuid_entry(vcpu, 0xD, 1);
vcpu              118 arch/x86/kvm/cpuid.c 		best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
vcpu              124 arch/x86/kvm/cpuid.c 	best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
vcpu              132 arch/x86/kvm/cpuid.c 	best = kvm_find_cpuid_entry(vcpu, KVM_CPUID_FEATURES, 0);
vcpu              133 arch/x86/kvm/cpuid.c 	if (kvm_hlt_in_guest(vcpu->kvm) && best &&
vcpu              137 arch/x86/kvm/cpuid.c 	if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) {
vcpu              138 arch/x86/kvm/cpuid.c 		best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
vcpu              140 arch/x86/kvm/cpuid.c 			if (vcpu->arch.ia32_misc_enable_msr & MSR_IA32_MISC_ENABLE_MWAIT)
vcpu              148 arch/x86/kvm/cpuid.c 	vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
vcpu              149 arch/x86/kvm/cpuid.c 	kvm_mmu_reset_context(vcpu);
vcpu              151 arch/x86/kvm/cpuid.c 	kvm_pmu_refresh(vcpu);
vcpu              163 arch/x86/kvm/cpuid.c static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
vcpu              169 arch/x86/kvm/cpuid.c 	for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
vcpu              170 arch/x86/kvm/cpuid.c 		e = &vcpu->arch.cpuid_entries[i];
vcpu              182 arch/x86/kvm/cpuid.c int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
vcpu              186 arch/x86/kvm/cpuid.c 	best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
vcpu              189 arch/x86/kvm/cpuid.c 	best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
vcpu              198 arch/x86/kvm/cpuid.c int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
vcpu              221 arch/x86/kvm/cpuid.c 		vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
vcpu              222 arch/x86/kvm/cpuid.c 		vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
vcpu              223 arch/x86/kvm/cpuid.c 		vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
vcpu              224 arch/x86/kvm/cpuid.c 		vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
vcpu              225 arch/x86/kvm/cpuid.c 		vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
vcpu              226 arch/x86/kvm/cpuid.c 		vcpu->arch.cpuid_entries[i].index = 0;
vcpu              227 arch/x86/kvm/cpuid.c 		vcpu->arch.cpuid_entries[i].flags = 0;
vcpu              228 arch/x86/kvm/cpuid.c 		vcpu->arch.cpuid_entries[i].padding[0] = 0;
vcpu              229 arch/x86/kvm/cpuid.c 		vcpu->arch.cpuid_entries[i].padding[1] = 0;
vcpu              230 arch/x86/kvm/cpuid.c 		vcpu->arch.cpuid_entries[i].padding[2] = 0;
vcpu              232 arch/x86/kvm/cpuid.c 	vcpu->arch.cpuid_nent = cpuid->nent;
vcpu              233 arch/x86/kvm/cpuid.c 	cpuid_fix_nx_cap(vcpu);
vcpu              234 arch/x86/kvm/cpuid.c 	kvm_apic_set_version(vcpu);
vcpu              235 arch/x86/kvm/cpuid.c 	kvm_x86_ops->cpuid_update(vcpu);
vcpu              236 arch/x86/kvm/cpuid.c 	r = kvm_update_cpuid(vcpu);
vcpu              243 arch/x86/kvm/cpuid.c int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
vcpu              253 arch/x86/kvm/cpuid.c 	if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
vcpu              256 arch/x86/kvm/cpuid.c 	vcpu->arch.cpuid_nent = cpuid->nent;
vcpu              257 arch/x86/kvm/cpuid.c 	kvm_apic_set_version(vcpu);
vcpu              258 arch/x86/kvm/cpuid.c 	kvm_x86_ops->cpuid_update(vcpu);
vcpu              259 arch/x86/kvm/cpuid.c 	r = kvm_update_cpuid(vcpu);
vcpu              264 arch/x86/kvm/cpuid.c int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
vcpu              271 arch/x86/kvm/cpuid.c 	if (cpuid->nent < vcpu->arch.cpuid_nent)
vcpu              274 arch/x86/kvm/cpuid.c 	if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
vcpu              275 arch/x86/kvm/cpuid.c 			 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
vcpu              280 arch/x86/kvm/cpuid.c 	cpuid->nent = vcpu->arch.cpuid_nent;
vcpu              928 arch/x86/kvm/cpuid.c static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
vcpu              930 arch/x86/kvm/cpuid.c 	struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
vcpu              933 arch/x86/kvm/cpuid.c 	int nent = vcpu->arch.cpuid_nent;
vcpu              939 arch/x86/kvm/cpuid.c 		ej = &vcpu->arch.cpuid_entries[j];
vcpu              962 arch/x86/kvm/cpuid.c struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
vcpu              968 arch/x86/kvm/cpuid.c 	for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
vcpu              971 arch/x86/kvm/cpuid.c 		e = &vcpu->arch.cpuid_entries[i];
vcpu              974 arch/x86/kvm/cpuid.c 				move_to_next_stateful_cpuid_entry(vcpu, i);
vcpu              988 arch/x86/kvm/cpuid.c static bool cpuid_function_in_range(struct kvm_vcpu *vcpu, u32 function)
vcpu              992 arch/x86/kvm/cpuid.c 	max = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
vcpu              996 arch/x86/kvm/cpuid.c bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
vcpu             1004 arch/x86/kvm/cpuid.c 	entry = kvm_find_cpuid_entry(vcpu, function, index);
vcpu             1012 arch/x86/kvm/cpuid.c 	if (!entry && check_limit && !guest_cpuid_is_amd(vcpu) &&
vcpu             1013 arch/x86/kvm/cpuid.c 	    !cpuid_function_in_range(vcpu, function)) {
vcpu             1014 arch/x86/kvm/cpuid.c 		max = kvm_find_cpuid_entry(vcpu, 0, 0);
vcpu             1017 arch/x86/kvm/cpuid.c 			entry = kvm_find_cpuid_entry(vcpu, function, index);
vcpu             1035 arch/x86/kvm/cpuid.c 			entry = kvm_find_cpuid_entry(vcpu, function, 1);
vcpu             1047 arch/x86/kvm/cpuid.c int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
vcpu             1051 arch/x86/kvm/cpuid.c 	if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0))
vcpu             1054 arch/x86/kvm/cpuid.c 	eax = kvm_rax_read(vcpu);
vcpu             1055 arch/x86/kvm/cpuid.c 	ecx = kvm_rcx_read(vcpu);
vcpu             1056 arch/x86/kvm/cpuid.c 	kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, true);
vcpu             1057 arch/x86/kvm/cpuid.c 	kvm_rax_write(vcpu, eax);
vcpu             1058 arch/x86/kvm/cpuid.c 	kvm_rbx_write(vcpu, ebx);
vcpu             1059 arch/x86/kvm/cpuid.c 	kvm_rcx_write(vcpu, ecx);
vcpu             1060 arch/x86/kvm/cpuid.c 	kvm_rdx_write(vcpu, edx);
vcpu             1061 arch/x86/kvm/cpuid.c 	return kvm_skip_emulated_instruction(vcpu);
vcpu                9 arch/x86/kvm/cpuid.h int kvm_update_cpuid(struct kvm_vcpu *vcpu);
vcpu               11 arch/x86/kvm/cpuid.h struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
vcpu               16 arch/x86/kvm/cpuid.h int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
vcpu               19 arch/x86/kvm/cpuid.h int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
vcpu               22 arch/x86/kvm/cpuid.h int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
vcpu               25 arch/x86/kvm/cpuid.h bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
vcpu               28 arch/x86/kvm/cpuid.h int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
vcpu               30 arch/x86/kvm/cpuid.h static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
vcpu               32 arch/x86/kvm/cpuid.h 	return vcpu->arch.maxphyaddr;
vcpu               68 arch/x86/kvm/cpuid.h static __always_inline int *guest_cpuid_get_register(struct kvm_vcpu *vcpu, unsigned x86_feature)
vcpu               73 arch/x86/kvm/cpuid.h 	entry = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index);
vcpu               92 arch/x86/kvm/cpuid.h static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned x86_feature)
vcpu              100 arch/x86/kvm/cpuid.h 	reg = guest_cpuid_get_register(vcpu, x86_feature);
vcpu              107 arch/x86/kvm/cpuid.h static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu, unsigned x86_feature)
vcpu              111 arch/x86/kvm/cpuid.h 	reg = guest_cpuid_get_register(vcpu, x86_feature);
vcpu              116 arch/x86/kvm/cpuid.h static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
vcpu              120 arch/x86/kvm/cpuid.h 	best = kvm_find_cpuid_entry(vcpu, 0, 0);
vcpu              124 arch/x86/kvm/cpuid.h static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
vcpu              128 arch/x86/kvm/cpuid.h 	best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
vcpu              135 arch/x86/kvm/cpuid.h static inline int guest_cpuid_model(struct kvm_vcpu *vcpu)
vcpu              139 arch/x86/kvm/cpuid.h 	best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
vcpu              146 arch/x86/kvm/cpuid.h static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
vcpu              150 arch/x86/kvm/cpuid.h 	best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
vcpu              157 arch/x86/kvm/cpuid.h static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu)
vcpu              159 arch/x86/kvm/cpuid.h 	return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT;
vcpu              162 arch/x86/kvm/cpuid.h static inline bool cpuid_fault_enabled(struct kvm_vcpu *vcpu)
vcpu              164 arch/x86/kvm/cpuid.h 	return vcpu->arch.msr_misc_features_enables &
vcpu               13 arch/x86/kvm/debugfs.c 	struct kvm_vcpu *vcpu = (struct kvm_vcpu *) data;
vcpu               14 arch/x86/kvm/debugfs.c 	*val = vcpu->arch.apic->lapic_timer.timer_advance_ns;
vcpu               22 arch/x86/kvm/debugfs.c 	struct kvm_vcpu *vcpu = (struct kvm_vcpu *) data;
vcpu               23 arch/x86/kvm/debugfs.c 	*val = vcpu->arch.tsc_offset;
vcpu               31 arch/x86/kvm/debugfs.c 	struct kvm_vcpu *vcpu = (struct kvm_vcpu *) data;
vcpu               32 arch/x86/kvm/debugfs.c 	*val = vcpu->arch.tsc_scaling_ratio;
vcpu               46 arch/x86/kvm/debugfs.c void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
vcpu               48 arch/x86/kvm/debugfs.c 	debugfs_create_file("tsc-offset", 0444, vcpu->debugfs_dentry, vcpu,
vcpu               51 arch/x86/kvm/debugfs.c 	if (lapic_in_kernel(vcpu))
vcpu               53 arch/x86/kvm/debugfs.c 				    vcpu->debugfs_dentry, vcpu,
vcpu               58 arch/x86/kvm/debugfs.c 				    vcpu->debugfs_dentry, vcpu,
vcpu               61 arch/x86/kvm/debugfs.c 				    vcpu->debugfs_dentry, vcpu,
vcpu              135 arch/x86/kvm/hyperv.c 	struct kvm_vcpu *vcpu = NULL;
vcpu              141 arch/x86/kvm/hyperv.c 	vcpu = kvm_get_vcpu(kvm, vpidx);
vcpu              142 arch/x86/kvm/hyperv.c 	if (vcpu && vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx)
vcpu              143 arch/x86/kvm/hyperv.c 		return vcpu;
vcpu              144 arch/x86/kvm/hyperv.c 	kvm_for_each_vcpu(i, vcpu, kvm)
vcpu              145 arch/x86/kvm/hyperv.c 		if (vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx)
vcpu              146 arch/x86/kvm/hyperv.c 			return vcpu;
vcpu              152 arch/x86/kvm/hyperv.c 	struct kvm_vcpu *vcpu;
vcpu              155 arch/x86/kvm/hyperv.c 	vcpu = get_vcpu_by_vpidx(kvm, vpidx);
vcpu              156 arch/x86/kvm/hyperv.c 	if (!vcpu)
vcpu              158 arch/x86/kvm/hyperv.c 	synic = vcpu_to_synic(vcpu);
vcpu              162 arch/x86/kvm/hyperv.c static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
vcpu              164 arch/x86/kvm/hyperv.c 	struct kvm *kvm = vcpu->kvm;
vcpu              165 arch/x86/kvm/hyperv.c 	struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
vcpu              166 arch/x86/kvm/hyperv.c 	struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
vcpu              170 arch/x86/kvm/hyperv.c 	trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint);
vcpu              190 arch/x86/kvm/hyperv.c 	struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
vcpu              191 arch/x86/kvm/hyperv.c 	struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
vcpu              199 arch/x86/kvm/hyperv.c 	kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
vcpu              205 arch/x86/kvm/hyperv.c 	struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
vcpu              211 arch/x86/kvm/hyperv.c 	trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host);
vcpu              230 arch/x86/kvm/hyperv.c 			if (kvm_clear_guest(vcpu->kvm,
vcpu              242 arch/x86/kvm/hyperv.c 			if (kvm_clear_guest(vcpu->kvm,
vcpu              255 arch/x86/kvm/hyperv.c 			kvm_hv_notify_acked_sint(vcpu, i);
vcpu              305 arch/x86/kvm/hyperv.c 	struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
vcpu              323 arch/x86/kvm/hyperv.c 	ret = kvm_irq_delivery_to_apic(vcpu->kvm, vcpu->arch.apic, &irq, NULL);
vcpu              324 arch/x86/kvm/hyperv.c 	trace_kvm_hv_synic_set_irq(vcpu->vcpu_id, sint, irq.vector, ret);
vcpu              339 arch/x86/kvm/hyperv.c void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector)
vcpu              341 arch/x86/kvm/hyperv.c 	struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
vcpu              344 arch/x86/kvm/hyperv.c 	trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector);
vcpu              348 arch/x86/kvm/hyperv.c 			kvm_hv_notify_acked_sint(vcpu, i);
vcpu              378 arch/x86/kvm/hyperv.c 				kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu,
vcpu              399 arch/x86/kvm/hyperv.c 	struct kvm_vcpu *vcpu;
vcpu              409 arch/x86/kvm/hyperv.c 	vcpu = kvm_get_vcpu(kvm, 0);
vcpu              410 arch/x86/kvm/hyperv.c 	tsc = kvm_read_l1_tsc(vcpu, rdtsc());
vcpu              418 arch/x86/kvm/hyperv.c 	struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
vcpu              421 arch/x86/kvm/hyperv.c 		vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap);
vcpu              422 arch/x86/kvm/hyperv.c 	kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
vcpu              424 arch/x86/kvm/hyperv.c 		kvm_vcpu_kick(vcpu);
vcpu              429 arch/x86/kvm/hyperv.c 	struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
vcpu              436 arch/x86/kvm/hyperv.c 		  vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap);
vcpu              567 arch/x86/kvm/hyperv.c 	struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
vcpu              584 arch/x86/kvm/hyperv.c 	r = kvm_vcpu_read_guest_page(vcpu, msg_page_gfn, &hv_hdr.message_type,
vcpu              596 arch/x86/kvm/hyperv.c 		r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn,
vcpu              607 arch/x86/kvm/hyperv.c 	r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn, src_msg, msg_off,
vcpu              623 arch/x86/kvm/hyperv.c 	struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
vcpu              635 arch/x86/kvm/hyperv.c 	payload->delivery_time = get_time_ref_counter(vcpu->kvm);
vcpu              636 arch/x86/kvm/hyperv.c 	return synic_deliver_msg(vcpu_to_synic(vcpu),
vcpu              643 arch/x86/kvm/hyperv.c 	struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
vcpu              649 arch/x86/kvm/hyperv.c 	if (lapic_in_kernel(vcpu))
vcpu              650 arch/x86/kvm/hyperv.c 		return !kvm_apic_set_irq(vcpu, &irq, NULL);
vcpu              672 arch/x86/kvm/hyperv.c void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
vcpu              674 arch/x86/kvm/hyperv.c 	struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
vcpu              687 arch/x86/kvm/hyperv.c 						get_time_ref_counter(vcpu->kvm);
vcpu              702 arch/x86/kvm/hyperv.c void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
vcpu              704 arch/x86/kvm/hyperv.c 	struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
vcpu              711 arch/x86/kvm/hyperv.c bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
vcpu              713 arch/x86/kvm/hyperv.c 	if (!(vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE))
vcpu              715 arch/x86/kvm/hyperv.c 	return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
vcpu              719 arch/x86/kvm/hyperv.c bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu,
vcpu              722 arch/x86/kvm/hyperv.c 	if (!kvm_hv_assist_page_enabled(vcpu))
vcpu              724 arch/x86/kvm/hyperv.c 	return !kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data,
vcpu              753 arch/x86/kvm/hyperv.c void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
vcpu              755 arch/x86/kvm/hyperv.c 	struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
vcpu              765 arch/x86/kvm/hyperv.c void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu)
vcpu              767 arch/x86/kvm/hyperv.c 	struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
vcpu              769 arch/x86/kvm/hyperv.c 	hv_vcpu->vp_index = kvm_vcpu_get_idx(vcpu);
vcpu              772 arch/x86/kvm/hyperv.c int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
vcpu              774 arch/x86/kvm/hyperv.c 	struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
vcpu              780 arch/x86/kvm/hyperv.c 	kvm_vcpu_deactivate_apicv(vcpu);
vcpu              808 arch/x86/kvm/hyperv.c static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu,
vcpu              811 arch/x86/kvm/hyperv.c 	struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
vcpu              821 arch/x86/kvm/hyperv.c static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata)
vcpu              823 arch/x86/kvm/hyperv.c 	struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
vcpu              829 arch/x86/kvm/hyperv.c static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host)
vcpu              831 arch/x86/kvm/hyperv.c 	struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
vcpu              838 arch/x86/kvm/hyperv.c 		vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
vcpu              846 arch/x86/kvm/hyperv.c 		kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
vcpu              852 arch/x86/kvm/hyperv.c static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu,
vcpu              855 arch/x86/kvm/hyperv.c 	struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
vcpu              994 arch/x86/kvm/hyperv.c static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
vcpu              997 arch/x86/kvm/hyperv.c 	struct kvm *kvm = vcpu->kvm;
vcpu             1023 arch/x86/kvm/hyperv.c 		kvm_x86_ops->patch_hypercall(vcpu, instructions);
vcpu             1034 arch/x86/kvm/hyperv.c 			kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
vcpu             1037 arch/x86/kvm/hyperv.c 		return kvm_hv_msr_set_crash_data(vcpu,
vcpu             1041 arch/x86/kvm/hyperv.c 		return kvm_hv_msr_set_crash_ctl(vcpu, data, host);
vcpu             1044 arch/x86/kvm/hyperv.c 			vcpu_debug(vcpu, "hyper-v reset requested\n");
vcpu             1045 arch/x86/kvm/hyperv.c 			kvm_make_request(KVM_REQ_HV_RESET, vcpu);
vcpu             1063 arch/x86/kvm/hyperv.c 		vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
vcpu             1080 arch/x86/kvm/hyperv.c static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
vcpu             1082 arch/x86/kvm/hyperv.c 	struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
vcpu             1086 arch/x86/kvm/hyperv.c 		struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
vcpu             1087 arch/x86/kvm/hyperv.c 		int vcpu_idx = kvm_vcpu_get_idx(vcpu);
vcpu             1116 arch/x86/kvm/hyperv.c 			if (kvm_lapic_enable_pv_eoi(vcpu, 0, 0))
vcpu             1121 arch/x86/kvm/hyperv.c 		addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
vcpu             1133 arch/x86/kvm/hyperv.c 		kvm_vcpu_mark_page_dirty(vcpu, gfn);
vcpu             1134 arch/x86/kvm/hyperv.c 		if (kvm_lapic_enable_pv_eoi(vcpu,
vcpu             1141 arch/x86/kvm/hyperv.c 		return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
vcpu             1143 arch/x86/kvm/hyperv.c 		return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
vcpu             1145 arch/x86/kvm/hyperv.c 		return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
vcpu             1157 arch/x86/kvm/hyperv.c 		return synic_set_msr(vcpu_to_synic(vcpu), msr, data, host);
vcpu             1164 arch/x86/kvm/hyperv.c 		return stimer_set_config(vcpu_to_stimer(vcpu, timer_index),
vcpu             1173 arch/x86/kvm/hyperv.c 		return stimer_set_count(vcpu_to_stimer(vcpu, timer_index),
vcpu             1183 arch/x86/kvm/hyperv.c 		vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
vcpu             1191 arch/x86/kvm/hyperv.c static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
vcpu             1194 arch/x86/kvm/hyperv.c 	struct kvm *kvm = vcpu->kvm;
vcpu             1211 arch/x86/kvm/hyperv.c 		return kvm_hv_msr_get_crash_data(vcpu,
vcpu             1215 arch/x86/kvm/hyperv.c 		return kvm_hv_msr_get_crash_ctl(vcpu, pdata);
vcpu             1229 arch/x86/kvm/hyperv.c 		vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
vcpu             1237 arch/x86/kvm/hyperv.c static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
vcpu             1241 arch/x86/kvm/hyperv.c 	struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
vcpu             1248 arch/x86/kvm/hyperv.c 		return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
vcpu             1250 arch/x86/kvm/hyperv.c 		return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
vcpu             1252 arch/x86/kvm/hyperv.c 		return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
vcpu             1265 arch/x86/kvm/hyperv.c 		return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata, host);
vcpu             1272 arch/x86/kvm/hyperv.c 		return stimer_get_config(vcpu_to_stimer(vcpu, timer_index),
vcpu             1281 arch/x86/kvm/hyperv.c 		return stimer_get_count(vcpu_to_stimer(vcpu, timer_index),
vcpu             1285 arch/x86/kvm/hyperv.c 		data = (u64)vcpu->arch.virtual_tsc_khz * 1000;
vcpu             1291 arch/x86/kvm/hyperv.c 		vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
vcpu             1298 arch/x86/kvm/hyperv.c int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
vcpu             1303 arch/x86/kvm/hyperv.c 		mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
vcpu             1304 arch/x86/kvm/hyperv.c 		r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
vcpu             1305 arch/x86/kvm/hyperv.c 		mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
vcpu             1308 arch/x86/kvm/hyperv.c 		return kvm_hv_set_msr(vcpu, msr, data, host);
vcpu             1311 arch/x86/kvm/hyperv.c int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
vcpu             1316 arch/x86/kvm/hyperv.c 		mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
vcpu             1317 arch/x86/kvm/hyperv.c 		r = kvm_hv_get_msr_pw(vcpu, msr, pdata);
vcpu             1318 arch/x86/kvm/hyperv.c 		mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
vcpu             1321 arch/x86/kvm/hyperv.c 		return kvm_hv_get_msr(vcpu, msr, pdata, host);
vcpu             1329 arch/x86/kvm/hyperv.c 	struct kvm_vcpu *vcpu;
vcpu             1344 arch/x86/kvm/hyperv.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu             1345 arch/x86/kvm/hyperv.c 		if (test_bit(vcpu_to_hv_vcpu(vcpu)->vp_index,
vcpu             1443 arch/x86/kvm/hyperv.c 	struct kvm_vcpu *vcpu;
vcpu             1446 arch/x86/kvm/hyperv.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu             1451 arch/x86/kvm/hyperv.c 		kvm_apic_set_irq(vcpu, &irq, NULL);
vcpu             1534 arch/x86/kvm/hyperv.c static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
vcpu             1538 arch/x86/kvm/hyperv.c 	longmode = is_64_bit_mode(vcpu);
vcpu             1540 arch/x86/kvm/hyperv.c 		kvm_rax_write(vcpu, result);
vcpu             1542 arch/x86/kvm/hyperv.c 		kvm_rdx_write(vcpu, result >> 32);
vcpu             1543 arch/x86/kvm/hyperv.c 		kvm_rax_write(vcpu, result & 0xffffffff);
vcpu             1547 arch/x86/kvm/hyperv.c static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result)
vcpu             1549 arch/x86/kvm/hyperv.c 	kvm_hv_hypercall_set_result(vcpu, result);
vcpu             1550 arch/x86/kvm/hyperv.c 	++vcpu->stat.hypercalls;
vcpu             1551 arch/x86/kvm/hyperv.c 	return kvm_skip_emulated_instruction(vcpu);
vcpu             1554 arch/x86/kvm/hyperv.c static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
vcpu             1556 arch/x86/kvm/hyperv.c 	return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result);
vcpu             1559 arch/x86/kvm/hyperv.c static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param)
vcpu             1571 arch/x86/kvm/hyperv.c 		ret = kvm_vcpu_read_guest(vcpu, gpa, &param, sizeof(param));
vcpu             1589 arch/x86/kvm/hyperv.c 	eventfd = idr_find(&vcpu->kvm->arch.hyperv.conn_to_evt, param);
vcpu             1598 arch/x86/kvm/hyperv.c int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
vcpu             1608 arch/x86/kvm/hyperv.c 	if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
vcpu             1609 arch/x86/kvm/hyperv.c 		kvm_queue_exception(vcpu, UD_VECTOR);
vcpu             1614 arch/x86/kvm/hyperv.c 	if (is_64_bit_mode(vcpu)) {
vcpu             1615 arch/x86/kvm/hyperv.c 		param = kvm_rcx_read(vcpu);
vcpu             1616 arch/x86/kvm/hyperv.c 		ingpa = kvm_rdx_read(vcpu);
vcpu             1617 arch/x86/kvm/hyperv.c 		outgpa = kvm_r8_read(vcpu);
vcpu             1621 arch/x86/kvm/hyperv.c 		param = ((u64)kvm_rdx_read(vcpu) << 32) |
vcpu             1622 arch/x86/kvm/hyperv.c 			(kvm_rax_read(vcpu) & 0xffffffff);
vcpu             1623 arch/x86/kvm/hyperv.c 		ingpa = ((u64)kvm_rbx_read(vcpu) << 32) |
vcpu             1624 arch/x86/kvm/hyperv.c 			(kvm_rcx_read(vcpu) & 0xffffffff);
vcpu             1625 arch/x86/kvm/hyperv.c 		outgpa = ((u64)kvm_rdi_read(vcpu) << 32) |
vcpu             1626 arch/x86/kvm/hyperv.c 			(kvm_rsi_read(vcpu) & 0xffffffff);
vcpu             1643 arch/x86/kvm/hyperv.c 		kvm_vcpu_on_spin(vcpu, true);
vcpu             1650 arch/x86/kvm/hyperv.c 		ret = kvm_hvcall_signal_event(vcpu, fast, ingpa);
vcpu             1656 arch/x86/kvm/hyperv.c 		if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) {
vcpu             1660 arch/x86/kvm/hyperv.c 		vcpu->run->exit_reason = KVM_EXIT_HYPERV;
vcpu             1661 arch/x86/kvm/hyperv.c 		vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
vcpu             1662 arch/x86/kvm/hyperv.c 		vcpu->run->hyperv.u.hcall.input = param;
vcpu             1663 arch/x86/kvm/hyperv.c 		vcpu->run->hyperv.u.hcall.params[0] = ingpa;
vcpu             1664 arch/x86/kvm/hyperv.c 		vcpu->run->hyperv.u.hcall.params[1] = outgpa;
vcpu             1665 arch/x86/kvm/hyperv.c 		vcpu->arch.complete_userspace_io =
vcpu             1673 arch/x86/kvm/hyperv.c 		ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, false);
vcpu             1680 arch/x86/kvm/hyperv.c 		ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, false);
vcpu             1687 arch/x86/kvm/hyperv.c 		ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, true);
vcpu             1694 arch/x86/kvm/hyperv.c 		ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, true);
vcpu             1701 arch/x86/kvm/hyperv.c 		ret = kvm_hv_send_ipi(vcpu, ingpa, outgpa, false, fast);
vcpu             1708 arch/x86/kvm/hyperv.c 		ret = kvm_hv_send_ipi(vcpu, ingpa, outgpa, true, false);
vcpu             1715 arch/x86/kvm/hyperv.c 	return kvm_hv_hypercall_complete(vcpu, ret);
vcpu             1786 arch/x86/kvm/hyperv.c int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
vcpu             1802 arch/x86/kvm/hyperv.c 		evmcs_ver = kvm_x86_ops->nested_get_evmcs_version(vcpu);
vcpu             1865 arch/x86/kvm/hyperv.c 			if (lapic_in_kernel(vcpu))
vcpu               26 arch/x86/kvm/hyperv.h static inline struct kvm_vcpu_hv *vcpu_to_hv_vcpu(struct kvm_vcpu *vcpu)
vcpu               28 arch/x86/kvm/hyperv.h 	return &vcpu->arch.hyperv;
vcpu               39 arch/x86/kvm/hyperv.h static inline struct kvm_vcpu_hv_synic *vcpu_to_synic(struct kvm_vcpu *vcpu)
vcpu               41 arch/x86/kvm/hyperv.h 	return &vcpu->arch.hyperv.synic;
vcpu               49 arch/x86/kvm/hyperv.h int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host);
vcpu               50 arch/x86/kvm/hyperv.h int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host);
vcpu               53 arch/x86/kvm/hyperv.h int kvm_hv_hypercall(struct kvm_vcpu *vcpu);
vcpu               57 arch/x86/kvm/hyperv.h void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector);
vcpu               58 arch/x86/kvm/hyperv.h int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages);
vcpu               60 arch/x86/kvm/hyperv.h void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu);
vcpu               61 arch/x86/kvm/hyperv.h void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu);
vcpu               62 arch/x86/kvm/hyperv.h void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu);
vcpu               64 arch/x86/kvm/hyperv.h bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu);
vcpu               65 arch/x86/kvm/hyperv.h bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu,
vcpu               68 arch/x86/kvm/hyperv.h static inline struct kvm_vcpu_hv_stimer *vcpu_to_stimer(struct kvm_vcpu *vcpu,
vcpu               71 arch/x86/kvm/hyperv.h 	return &vcpu_to_hv_vcpu(vcpu)->stimer[timer_index];
vcpu               83 arch/x86/kvm/hyperv.h static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu)
vcpu               85 arch/x86/kvm/hyperv.h 	return !bitmap_empty(vcpu->arch.hyperv.stimer_pending_bitmap,
vcpu               89 arch/x86/kvm/hyperv.h void kvm_hv_process_stimers(struct kvm_vcpu *vcpu);
vcpu               97 arch/x86/kvm/hyperv.h int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
vcpu              218 arch/x86/kvm/i8254.c void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
vcpu              220 arch/x86/kvm/i8254.c 	struct kvm_pit *pit = vcpu->kvm->arch.vpit;
vcpu              223 arch/x86/kvm/i8254.c 	if (!kvm_vcpu_is_bsp(vcpu) || !pit)
vcpu              243 arch/x86/kvm/i8254.c 	struct kvm_vcpu *vcpu;
vcpu              263 arch/x86/kvm/i8254.c 		kvm_for_each_vcpu(i, vcpu, kvm)
vcpu              264 arch/x86/kvm/i8254.c 			kvm_apic_nmi_wd_deliver(vcpu);
vcpu              427 arch/x86/kvm/i8254.c static int pit_ioport_write(struct kvm_vcpu *vcpu,
vcpu              503 arch/x86/kvm/i8254.c static int pit_ioport_read(struct kvm_vcpu *vcpu,
vcpu              573 arch/x86/kvm/i8254.c static int speaker_ioport_write(struct kvm_vcpu *vcpu,
vcpu              590 arch/x86/kvm/i8254.c static int speaker_ioport_read(struct kvm_vcpu *vcpu,
vcpu               52 arch/x86/kvm/i8259.c 	struct kvm_vcpu *vcpu;
vcpu               60 arch/x86/kvm/i8259.c 		kvm_for_each_vcpu(i, vcpu, s->kvm) {
vcpu               61 arch/x86/kvm/i8259.c 			if (kvm_apic_accept_pic_intr(vcpu)) {
vcpu               62 arch/x86/kvm/i8259.c 				kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu               63 arch/x86/kvm/i8259.c 				kvm_vcpu_kick(vcpu);
vcpu              274 arch/x86/kvm/i8259.c 	struct kvm_vcpu *vcpu;
vcpu              290 arch/x86/kvm/i8259.c 	kvm_for_each_vcpu(i, vcpu, s->pics_state->kvm)
vcpu              291 arch/x86/kvm/i8259.c 		if (kvm_apic_accept_pic_intr(vcpu)) {
vcpu              516 arch/x86/kvm/i8259.c static int picdev_master_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
vcpu              523 arch/x86/kvm/i8259.c static int picdev_master_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
vcpu              530 arch/x86/kvm/i8259.c static int picdev_slave_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
vcpu              537 arch/x86/kvm/i8259.c static int picdev_slave_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
vcpu              544 arch/x86/kvm/i8259.c static int picdev_eclr_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
vcpu              551 arch/x86/kvm/i8259.c static int picdev_eclr_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
vcpu              105 arch/x86/kvm/ioapic.c static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
vcpu              108 arch/x86/kvm/ioapic.c 	struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
vcpu              113 arch/x86/kvm/ioapic.c 	if (!kvm_apic_match_dest(vcpu, NULL, 0,	e->fields.dest_id,
vcpu              117 arch/x86/kvm/ioapic.c 	new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector);
vcpu              118 arch/x86/kvm/ioapic.c 	old_val = test_bit(vcpu->vcpu_id, dest_map->map);
vcpu              124 arch/x86/kvm/ioapic.c 		__set_bit(vcpu->vcpu_id, dest_map->map);
vcpu              125 arch/x86/kvm/ioapic.c 		dest_map->vectors[vcpu->vcpu_id] = e->fields.vector;
vcpu              128 arch/x86/kvm/ioapic.c 		__clear_bit(vcpu->vcpu_id, dest_map->map);
vcpu              134 arch/x86/kvm/ioapic.c void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
vcpu              136 arch/x86/kvm/ioapic.c 	struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
vcpu              139 arch/x86/kvm/ioapic.c 	__rtc_irq_eoi_tracking_restore_one(vcpu);
vcpu              145 arch/x86/kvm/ioapic.c 	struct kvm_vcpu *vcpu;
vcpu              152 arch/x86/kvm/ioapic.c 	kvm_for_each_vcpu(i, vcpu, ioapic->kvm)
vcpu              153 arch/x86/kvm/ioapic.c 	    __rtc_irq_eoi_tracking_restore_one(vcpu);
vcpu              156 arch/x86/kvm/ioapic.c static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu)
vcpu              158 arch/x86/kvm/ioapic.c 	if (test_and_clear_bit(vcpu->vcpu_id,
vcpu              236 arch/x86/kvm/ioapic.c void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors)
vcpu              238 arch/x86/kvm/ioapic.c 	struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
vcpu              246 arch/x86/kvm/ioapic.c 	if (test_bit(vcpu->vcpu_id, dest_map->map))
vcpu              247 arch/x86/kvm/ioapic.c 		__set_bit(dest_map->vectors[vcpu->vcpu_id],
vcpu              255 arch/x86/kvm/ioapic.c 			if (kvm_apic_match_dest(vcpu, NULL, 0,
vcpu              257 arch/x86/kvm/ioapic.c 			    kvm_apic_pending_eoi(vcpu, e->fields.vector))
vcpu              422 arch/x86/kvm/ioapic.c static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
vcpu              426 arch/x86/kvm/ioapic.c 	struct kvm_lapic *apic = vcpu->arch.apic;
vcpu              430 arch/x86/kvm/ioapic.c 	if (test_bit(vcpu->vcpu_id, dest_map->map) &&
vcpu              431 arch/x86/kvm/ioapic.c 	    vector == dest_map->vectors[vcpu->vcpu_id])
vcpu              432 arch/x86/kvm/ioapic.c 		rtc_irq_eoi(ioapic, vcpu);
vcpu              481 arch/x86/kvm/ioapic.c void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, int trigger_mode)
vcpu              483 arch/x86/kvm/ioapic.c 	struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
vcpu              486 arch/x86/kvm/ioapic.c 	__kvm_ioapic_update_eoi(vcpu, ioapic, vector, trigger_mode);
vcpu              501 arch/x86/kvm/ioapic.c static int ioapic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
vcpu              543 arch/x86/kvm/ioapic.c static int ioapic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
vcpu              118 arch/x86/kvm/ioapic.h void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu);
vcpu              119 arch/x86/kvm/ioapic.h bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
vcpu              122 arch/x86/kvm/ioapic.h void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector,
vcpu              134 arch/x86/kvm/ioapic.h void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu,
vcpu              136 arch/x86/kvm/ioapic.h void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
vcpu               22 arch/x86/kvm/irq.c int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
vcpu               24 arch/x86/kvm/irq.c 	if (lapic_in_kernel(vcpu))
vcpu               25 arch/x86/kvm/irq.c 		return apic_has_pending_timer(vcpu);
vcpu              151 arch/x86/kvm/irq.c void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu)
vcpu              153 arch/x86/kvm/irq.c 	if (lapic_in_kernel(vcpu))
vcpu              154 arch/x86/kvm/irq.c 		kvm_inject_apic_timer_irqs(vcpu);
vcpu              158 arch/x86/kvm/irq.c void __kvm_migrate_timers(struct kvm_vcpu *vcpu)
vcpu              160 arch/x86/kvm/irq.c 	__kvm_migrate_apic_timer(vcpu);
vcpu              161 arch/x86/kvm/irq.c 	__kvm_migrate_pit_timer(vcpu);
vcpu              105 arch/x86/kvm/irq.h void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu);
vcpu              106 arch/x86/kvm/irq.h void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu);
vcpu              107 arch/x86/kvm/irq.h void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu);
vcpu              108 arch/x86/kvm/irq.h void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu);
vcpu              109 arch/x86/kvm/irq.h void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu);
vcpu              110 arch/x86/kvm/irq.h void __kvm_migrate_timers(struct kvm_vcpu *vcpu);
vcpu              112 arch/x86/kvm/irq.h int apic_has_pending_timer(struct kvm_vcpu *vcpu);
vcpu               51 arch/x86/kvm/irq_comm.c 	struct kvm_vcpu *vcpu, *lowest = NULL;
vcpu               66 arch/x86/kvm/irq_comm.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu               67 arch/x86/kvm/irq_comm.c 		if (!kvm_apic_present(vcpu))
vcpu               70 arch/x86/kvm/irq_comm.c 		if (!kvm_apic_match_dest(vcpu, src, irq->shorthand,
vcpu               77 arch/x86/kvm/irq_comm.c 			r += kvm_apic_set_irq(vcpu, irq, dest_map);
vcpu               78 arch/x86/kvm/irq_comm.c 		} else if (kvm_apic_sw_enabled(vcpu->arch.apic)) {
vcpu               81 arch/x86/kvm/irq_comm.c 					lowest = vcpu;
vcpu               82 arch/x86/kvm/irq_comm.c 				else if (kvm_apic_compare_prio(vcpu, lowest) < 0)
vcpu               83 arch/x86/kvm/irq_comm.c 					lowest = vcpu;
vcpu              157 arch/x86/kvm/irq_comm.c 	return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint);
vcpu              314 arch/x86/kvm/irq_comm.c 		e->hv_sint.vcpu = ue->u.hv_sint.vcpu;
vcpu              328 arch/x86/kvm/irq_comm.c 	struct kvm_vcpu *vcpu;
vcpu              333 arch/x86/kvm/irq_comm.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu              334 arch/x86/kvm/irq_comm.c 		if (!kvm_apic_present(vcpu))
vcpu              337 arch/x86/kvm/irq_comm.c 		if (!kvm_apic_match_dest(vcpu, NULL, irq->shorthand,
vcpu              344 arch/x86/kvm/irq_comm.c 		*dest_vcpu = vcpu;
vcpu              397 arch/x86/kvm/irq_comm.c void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
vcpu              400 arch/x86/kvm/irq_comm.c 	struct kvm *kvm = vcpu->kvm;
vcpu              417 arch/x86/kvm/irq_comm.c 			kvm_set_msi_irq(vcpu->kvm, entry, &irq);
vcpu              419 arch/x86/kvm/irq_comm.c 			if (irq.trig_mode && kvm_apic_match_dest(vcpu, NULL, 0,
vcpu               13 arch/x86/kvm/kvm_cache_regs.h static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
vcpu               15 arch/x86/kvm/kvm_cache_regs.h 	return vcpu->arch.regs[VCPU_REGS_##uname];			      \
vcpu               17 arch/x86/kvm/kvm_cache_regs.h static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu,	      \
vcpu               20 arch/x86/kvm/kvm_cache_regs.h 	vcpu->arch.regs[VCPU_REGS_##uname] = val;			      \
vcpu               40 arch/x86/kvm/kvm_cache_regs.h static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu,
vcpu               43 arch/x86/kvm/kvm_cache_regs.h 	if (!test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail))
vcpu               44 arch/x86/kvm/kvm_cache_regs.h 		kvm_x86_ops->cache_reg(vcpu, reg);
vcpu               46 arch/x86/kvm/kvm_cache_regs.h 	return vcpu->arch.regs[reg];
vcpu               49 arch/x86/kvm/kvm_cache_regs.h static inline void kvm_register_write(struct kvm_vcpu *vcpu,
vcpu               53 arch/x86/kvm/kvm_cache_regs.h 	vcpu->arch.regs[reg] = val;
vcpu               54 arch/x86/kvm/kvm_cache_regs.h 	__set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
vcpu               55 arch/x86/kvm/kvm_cache_regs.h 	__set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
vcpu               58 arch/x86/kvm/kvm_cache_regs.h static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
vcpu               60 arch/x86/kvm/kvm_cache_regs.h 	return kvm_register_read(vcpu, VCPU_REGS_RIP);
vcpu               63 arch/x86/kvm/kvm_cache_regs.h static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
vcpu               65 arch/x86/kvm/kvm_cache_regs.h 	kvm_register_write(vcpu, VCPU_REGS_RIP, val);
vcpu               68 arch/x86/kvm/kvm_cache_regs.h static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu)
vcpu               70 arch/x86/kvm/kvm_cache_regs.h 	return kvm_register_read(vcpu, VCPU_REGS_RSP);
vcpu               73 arch/x86/kvm/kvm_cache_regs.h static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val)
vcpu               75 arch/x86/kvm/kvm_cache_regs.h 	kvm_register_write(vcpu, VCPU_REGS_RSP, val);
vcpu               78 arch/x86/kvm/kvm_cache_regs.h static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
vcpu               83 arch/x86/kvm/kvm_cache_regs.h 		      (unsigned long *)&vcpu->arch.regs_avail))
vcpu               84 arch/x86/kvm/kvm_cache_regs.h 		kvm_x86_ops->cache_reg(vcpu, (enum kvm_reg)VCPU_EXREG_PDPTR);
vcpu               86 arch/x86/kvm/kvm_cache_regs.h 	return vcpu->arch.walk_mmu->pdptrs[index];
vcpu               89 arch/x86/kvm/kvm_cache_regs.h static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
vcpu               92 arch/x86/kvm/kvm_cache_regs.h 	if (tmask & vcpu->arch.cr0_guest_owned_bits)
vcpu               93 arch/x86/kvm/kvm_cache_regs.h 		kvm_x86_ops->decache_cr0_guest_bits(vcpu);
vcpu               94 arch/x86/kvm/kvm_cache_regs.h 	return vcpu->arch.cr0 & mask;
vcpu               97 arch/x86/kvm/kvm_cache_regs.h static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
vcpu               99 arch/x86/kvm/kvm_cache_regs.h 	return kvm_read_cr0_bits(vcpu, ~0UL);
vcpu              102 arch/x86/kvm/kvm_cache_regs.h static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
vcpu              105 arch/x86/kvm/kvm_cache_regs.h 	if (tmask & vcpu->arch.cr4_guest_owned_bits)
vcpu              106 arch/x86/kvm/kvm_cache_regs.h 		kvm_x86_ops->decache_cr4_guest_bits(vcpu);
vcpu              107 arch/x86/kvm/kvm_cache_regs.h 	return vcpu->arch.cr4 & mask;
vcpu              110 arch/x86/kvm/kvm_cache_regs.h static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
vcpu              112 arch/x86/kvm/kvm_cache_regs.h 	if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
vcpu              113 arch/x86/kvm/kvm_cache_regs.h 		kvm_x86_ops->decache_cr3(vcpu);
vcpu              114 arch/x86/kvm/kvm_cache_regs.h 	return vcpu->arch.cr3;
vcpu              117 arch/x86/kvm/kvm_cache_regs.h static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
vcpu              119 arch/x86/kvm/kvm_cache_regs.h 	return kvm_read_cr4_bits(vcpu, ~0UL);
vcpu              122 arch/x86/kvm/kvm_cache_regs.h static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
vcpu              124 arch/x86/kvm/kvm_cache_regs.h 	return (kvm_rax_read(vcpu) & -1u)
vcpu              125 arch/x86/kvm/kvm_cache_regs.h 		| ((u64)(kvm_rdx_read(vcpu) & -1u) << 32);
vcpu              128 arch/x86/kvm/kvm_cache_regs.h static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
vcpu              130 arch/x86/kvm/kvm_cache_regs.h 	vcpu->arch.hflags |= HF_GUEST_MASK;
vcpu              133 arch/x86/kvm/kvm_cache_regs.h static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
vcpu              135 arch/x86/kvm/kvm_cache_regs.h 	vcpu->arch.hflags &= ~HF_GUEST_MASK;
vcpu              137 arch/x86/kvm/kvm_cache_regs.h 	if (vcpu->arch.load_eoi_exitmap_pending) {
vcpu              138 arch/x86/kvm/kvm_cache_regs.h 		vcpu->arch.load_eoi_exitmap_pending = false;
vcpu              139 arch/x86/kvm/kvm_cache_regs.h 		kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
vcpu              143 arch/x86/kvm/kvm_cache_regs.h static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
vcpu              145 arch/x86/kvm/kvm_cache_regs.h 	return vcpu->arch.hflags & HF_GUEST_MASK;
vcpu              148 arch/x86/kvm/kvm_cache_regs.h static inline bool is_smm(struct kvm_vcpu *vcpu)
vcpu              150 arch/x86/kvm/kvm_cache_regs.h 	return vcpu->arch.hflags & HF_SMM_MASK;
vcpu               81 arch/x86/kvm/lapic.c bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector)
vcpu               83 arch/x86/kvm/lapic.c 	struct kvm_lapic *apic = vcpu->arch.apic;
vcpu              116 arch/x86/kvm/lapic.c 	return apic->vcpu->vcpu_id;
vcpu              119 arch/x86/kvm/lapic.c bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu)
vcpu              121 arch/x86/kvm/lapic.c 	return pi_inject_timer && kvm_vcpu_apicv_active(vcpu);
vcpu              125 arch/x86/kvm/lapic.c static bool kvm_use_posted_timer_interrupt(struct kvm_vcpu *vcpu)
vcpu              127 arch/x86/kvm/lapic.c 	return kvm_can_post_timer_interrupt(vcpu) && vcpu->mode == IN_GUEST_MODE;
vcpu              173 arch/x86/kvm/lapic.c 	struct kvm_vcpu *vcpu;
vcpu              179 arch/x86/kvm/lapic.c 	kvm_for_each_vcpu(i, vcpu, kvm)
vcpu              180 arch/x86/kvm/lapic.c 		if (kvm_apic_present(vcpu))
vcpu              181 arch/x86/kvm/lapic.c 			max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic));
vcpu              192 arch/x86/kvm/lapic.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu              193 arch/x86/kvm/lapic.c 		struct kvm_lapic *apic = vcpu->arch.apic;
vcpu              200 arch/x86/kvm/lapic.c 		if (!kvm_apic_present(vcpu))
vcpu              263 arch/x86/kvm/lapic.c 		recalculate_apic_map(apic->vcpu->kvm);
vcpu              270 arch/x86/kvm/lapic.c 	recalculate_apic_map(apic->vcpu->kvm);
vcpu              276 arch/x86/kvm/lapic.c 	recalculate_apic_map(apic->vcpu->kvm);
vcpu              288 arch/x86/kvm/lapic.c 	WARN_ON_ONCE(id != apic->vcpu->vcpu_id);
vcpu              292 arch/x86/kvm/lapic.c 	recalculate_apic_map(apic->vcpu->kvm);
vcpu              325 arch/x86/kvm/lapic.c void kvm_apic_set_version(struct kvm_vcpu *vcpu)
vcpu              327 arch/x86/kvm/lapic.c 	struct kvm_lapic *apic = vcpu->arch.apic;
vcpu              331 arch/x86/kvm/lapic.c 	if (!lapic_in_kernel(vcpu))
vcpu              341 arch/x86/kvm/lapic.c 	feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0);
vcpu              343 arch/x86/kvm/lapic.c 	    !ioapic_in_kernel(vcpu->kvm))
vcpu              415 arch/x86/kvm/lapic.c bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr)
vcpu              417 arch/x86/kvm/lapic.c 	struct kvm_lapic *apic = vcpu->arch.apic;
vcpu              447 arch/x86/kvm/lapic.c 	struct kvm_vcpu *vcpu;
vcpu              449 arch/x86/kvm/lapic.c 	vcpu = apic->vcpu;
vcpu              451 arch/x86/kvm/lapic.c 	if (unlikely(vcpu->arch.apicv_active)) {
vcpu              454 arch/x86/kvm/lapic.c 		kvm_x86_ops->hwapic_irr_update(vcpu,
vcpu              466 arch/x86/kvm/lapic.c 	struct kvm_vcpu *vcpu;
vcpu              471 arch/x86/kvm/lapic.c 	vcpu = apic->vcpu;
vcpu              478 arch/x86/kvm/lapic.c 	if (unlikely(vcpu->arch.apicv_active))
vcpu              479 arch/x86/kvm/lapic.c 		kvm_x86_ops->hwapic_isr_update(vcpu, vec);
vcpu              513 arch/x86/kvm/lapic.c 	struct kvm_vcpu *vcpu;
vcpu              517 arch/x86/kvm/lapic.c 	vcpu = apic->vcpu;
vcpu              526 arch/x86/kvm/lapic.c 	if (unlikely(vcpu->arch.apicv_active))
vcpu              527 arch/x86/kvm/lapic.c 		kvm_x86_ops->hwapic_isr_update(vcpu,
vcpu              536 arch/x86/kvm/lapic.c int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
vcpu              543 arch/x86/kvm/lapic.c 	return apic_find_highest_irr(vcpu->arch.apic);
vcpu              551 arch/x86/kvm/lapic.c int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
vcpu              554 arch/x86/kvm/lapic.c 	struct kvm_lapic *apic = vcpu->arch.apic;
vcpu              566 arch/x86/kvm/lapic.c 	struct kvm_vcpu *vcpu;
vcpu              595 arch/x86/kvm/lapic.c 			vcpu = map->phys_map[min + i]->vcpu;
vcpu              596 arch/x86/kvm/lapic.c 			count += kvm_apic_set_irq(vcpu, &irq, NULL);
vcpu              608 arch/x86/kvm/lapic.c 			vcpu = map->phys_map[min + i]->vcpu;
vcpu              609 arch/x86/kvm/lapic.c 			count += kvm_apic_set_irq(vcpu, &irq, NULL);
vcpu              618 arch/x86/kvm/lapic.c static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
vcpu              621 arch/x86/kvm/lapic.c 	return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
vcpu              625 arch/x86/kvm/lapic.c static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
vcpu              628 arch/x86/kvm/lapic.c 	return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
vcpu              632 arch/x86/kvm/lapic.c static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
vcpu              634 arch/x86/kvm/lapic.c 	return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
vcpu              637 arch/x86/kvm/lapic.c static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
vcpu              640 arch/x86/kvm/lapic.c 	if (pv_eoi_get_user(vcpu, &val) < 0) {
vcpu              642 arch/x86/kvm/lapic.c 			   (unsigned long long)vcpu->arch.pv_eoi.msr_val);
vcpu              648 arch/x86/kvm/lapic.c static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
vcpu              650 arch/x86/kvm/lapic.c 	if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) {
vcpu              652 arch/x86/kvm/lapic.c 			   (unsigned long long)vcpu->arch.pv_eoi.msr_val);
vcpu              655 arch/x86/kvm/lapic.c 	__set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
vcpu              658 arch/x86/kvm/lapic.c static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
vcpu              660 arch/x86/kvm/lapic.c 	if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) {
vcpu              662 arch/x86/kvm/lapic.c 			   (unsigned long long)vcpu->arch.pv_eoi.msr_val);
vcpu              665 arch/x86/kvm/lapic.c 	__clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
vcpu              671 arch/x86/kvm/lapic.c 	if (apic->vcpu->arch.apicv_active)
vcpu              672 arch/x86/kvm/lapic.c 		highest_irr = kvm_x86_ops->sync_pir_to_irr(apic->vcpu);
vcpu              708 arch/x86/kvm/lapic.c 		kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
vcpu              711 arch/x86/kvm/lapic.c void kvm_apic_update_ppr(struct kvm_vcpu *vcpu)
vcpu              713 arch/x86/kvm/lapic.c 	apic_update_ppr(vcpu->arch.apic);
vcpu              791 arch/x86/kvm/lapic.c static u32 kvm_apic_mda(struct kvm_vcpu *vcpu, unsigned int dest_id,
vcpu              796 arch/x86/kvm/lapic.c 	if (!vcpu->kvm->arch.x2apic_broadcast_quirk_disabled &&
vcpu              803 arch/x86/kvm/lapic.c bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
vcpu              806 arch/x86/kvm/lapic.c 	struct kvm_lapic *target = vcpu->arch.apic;
vcpu              807 arch/x86/kvm/lapic.c 	u32 mda = kvm_apic_mda(vcpu, dest, source, target);
vcpu              922 arch/x86/kvm/lapic.c 			else if (kvm_apic_compare_prio((*dst)[i]->vcpu,
vcpu              923 arch/x86/kvm/lapic.c 						(*dst)[lowest]->vcpu) < 0)
vcpu              957 arch/x86/kvm/lapic.c 		*r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
vcpu              970 arch/x86/kvm/lapic.c 			*r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
vcpu             1011 arch/x86/kvm/lapic.c 			*dest_vcpu = dst[i]->vcpu;
vcpu             1029 arch/x86/kvm/lapic.c 	struct kvm_vcpu *vcpu = apic->vcpu;
vcpu             1031 arch/x86/kvm/lapic.c 	trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
vcpu             1035 arch/x86/kvm/lapic.c 		vcpu->arch.apic_arb_prio++;
vcpu             1048 arch/x86/kvm/lapic.c 			__set_bit(vcpu->vcpu_id, dest_map->map);
vcpu             1049 arch/x86/kvm/lapic.c 			dest_map->vectors[vcpu->vcpu_id] = vector;
vcpu             1061 arch/x86/kvm/lapic.c 		if (kvm_x86_ops->deliver_posted_interrupt(vcpu, vector)) {
vcpu             1063 arch/x86/kvm/lapic.c 			kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu             1064 arch/x86/kvm/lapic.c 			kvm_vcpu_kick(vcpu);
vcpu             1070 arch/x86/kvm/lapic.c 		vcpu->arch.pv.pv_unhalted = 1;
vcpu             1071 arch/x86/kvm/lapic.c 		kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu             1072 arch/x86/kvm/lapic.c 		kvm_vcpu_kick(vcpu);
vcpu             1077 arch/x86/kvm/lapic.c 		kvm_make_request(KVM_REQ_SMI, vcpu);
vcpu             1078 arch/x86/kvm/lapic.c 		kvm_vcpu_kick(vcpu);
vcpu             1083 arch/x86/kvm/lapic.c 		kvm_inject_nmi(vcpu);
vcpu             1084 arch/x86/kvm/lapic.c 		kvm_vcpu_kick(vcpu);
vcpu             1095 arch/x86/kvm/lapic.c 			kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu             1096 arch/x86/kvm/lapic.c 			kvm_vcpu_kick(vcpu);
vcpu             1106 arch/x86/kvm/lapic.c 		kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu             1107 arch/x86/kvm/lapic.c 		kvm_vcpu_kick(vcpu);
vcpu             1133 arch/x86/kvm/lapic.c 	return test_bit(vector, apic->vcpu->arch.ioapic_handled_vectors);
vcpu             1145 arch/x86/kvm/lapic.c 	if (irqchip_split(apic->vcpu->kvm)) {
vcpu             1146 arch/x86/kvm/lapic.c 		apic->vcpu->arch.pending_ioapic_eoi = vector;
vcpu             1147 arch/x86/kvm/lapic.c 		kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu);
vcpu             1156 arch/x86/kvm/lapic.c 	kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode);
vcpu             1175 arch/x86/kvm/lapic.c 	if (test_bit(vector, vcpu_to_synic(apic->vcpu)->vec_bitmap))
vcpu             1176 arch/x86/kvm/lapic.c 		kvm_hv_synic_send_eoi(apic->vcpu, vector);
vcpu             1179 arch/x86/kvm/lapic.c 	kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
vcpu             1187 arch/x86/kvm/lapic.c void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
vcpu             1189 arch/x86/kvm/lapic.c 	struct kvm_lapic *apic = vcpu->arch.apic;
vcpu             1194 arch/x86/kvm/lapic.c 	kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
vcpu             1216 arch/x86/kvm/lapic.c 	kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL);
vcpu             1246 arch/x86/kvm/lapic.c 	struct kvm_vcpu *vcpu = apic->vcpu;
vcpu             1247 arch/x86/kvm/lapic.c 	struct kvm_run *run = vcpu->run;
vcpu             1249 arch/x86/kvm/lapic.c 	kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu);
vcpu             1250 arch/x86/kvm/lapic.c 	run->tpr_access.rip = kvm_rip_read(vcpu);
vcpu             1256 arch/x86/kvm/lapic.c 	if (apic->vcpu->arch.tpr_access_reporting)
vcpu             1363 arch/x86/kvm/lapic.c static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
vcpu             1373 arch/x86/kvm/lapic.c 		if (!kvm_check_has_quirk(vcpu->kvm,
vcpu             1410 arch/x86/kvm/lapic.c 			    apic->vcpu->vcpu_id,
vcpu             1440 arch/x86/kvm/lapic.c static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
vcpu             1442 arch/x86/kvm/lapic.c 	struct kvm_lapic *apic = vcpu->arch.apic;
vcpu             1449 arch/x86/kvm/lapic.c 		if (vcpu->arch.apicv_active)
vcpu             1458 arch/x86/kvm/lapic.c static inline void __wait_lapic_expire(struct kvm_vcpu *vcpu, u64 guest_cycles)
vcpu             1460 arch/x86/kvm/lapic.c 	u64 timer_advance_ns = vcpu->arch.apic->lapic_timer.timer_advance_ns;
vcpu             1468 arch/x86/kvm/lapic.c 	if (vcpu->arch.tsc_scaling_ratio == kvm_default_tsc_scaling_ratio) {
vcpu             1470 arch/x86/kvm/lapic.c 			nsec_to_cycles(vcpu, timer_advance_ns)));
vcpu             1473 arch/x86/kvm/lapic.c 		do_div(delay_ns, vcpu->arch.virtual_tsc_khz);
vcpu             1478 arch/x86/kvm/lapic.c static inline void adjust_lapic_timer_advance(struct kvm_vcpu *vcpu,
vcpu             1481 arch/x86/kvm/lapic.c 	struct kvm_lapic *apic = vcpu->arch.apic;
vcpu             1493 arch/x86/kvm/lapic.c 		do_div(ns, vcpu->arch.virtual_tsc_khz);
vcpu             1498 arch/x86/kvm/lapic.c 		do_div(ns, vcpu->arch.virtual_tsc_khz);
vcpu             1507 arch/x86/kvm/lapic.c static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
vcpu             1509 arch/x86/kvm/lapic.c 	struct kvm_lapic *apic = vcpu->arch.apic;
vcpu             1517 arch/x86/kvm/lapic.c 	guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
vcpu             1521 arch/x86/kvm/lapic.c 		__wait_lapic_expire(vcpu, tsc_deadline - guest_tsc);
vcpu             1524 arch/x86/kvm/lapic.c 		adjust_lapic_timer_advance(vcpu, apic->lapic_timer.advance_expire_delta);
vcpu             1527 arch/x86/kvm/lapic.c void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
vcpu             1529 arch/x86/kvm/lapic.c 	if (lapic_timer_int_injected(vcpu))
vcpu             1530 arch/x86/kvm/lapic.c 		__kvm_wait_lapic_expire(vcpu);
vcpu             1549 arch/x86/kvm/lapic.c 	struct kvm_vcpu *vcpu = apic->vcpu;
vcpu             1558 arch/x86/kvm/lapic.c 	if (kvm_use_posted_timer_interrupt(apic->vcpu)) {
vcpu             1560 arch/x86/kvm/lapic.c 			__kvm_wait_lapic_expire(vcpu);
vcpu             1566 arch/x86/kvm/lapic.c 	kvm_set_pending_timer(vcpu);
vcpu             1575 arch/x86/kvm/lapic.c 	struct kvm_vcpu *vcpu = apic->vcpu;
vcpu             1576 arch/x86/kvm/lapic.c 	unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
vcpu             1586 arch/x86/kvm/lapic.c 	guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
vcpu             1621 arch/x86/kvm/lapic.c 		nsec_to_cycles(apic->vcpu, ns_remaining_new) -
vcpu             1622 arch/x86/kvm/lapic.c 		nsec_to_cycles(apic->vcpu, ns_remaining_old);
vcpu             1642 arch/x86/kvm/lapic.c 	apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
vcpu             1643 arch/x86/kvm/lapic.c 		nsec_to_cycles(apic->vcpu, apic->lapic_timer.period);
vcpu             1666 arch/x86/kvm/lapic.c 	apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
vcpu             1667 arch/x86/kvm/lapic.c 		nsec_to_cycles(apic->vcpu, delta);
vcpu             1690 arch/x86/kvm/lapic.c bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
vcpu             1692 arch/x86/kvm/lapic.c 	if (!lapic_in_kernel(vcpu))
vcpu             1695 arch/x86/kvm/lapic.c 	return vcpu->arch.apic->lapic_timer.hv_timer_in_use;
vcpu             1703 arch/x86/kvm/lapic.c 	kvm_x86_ops->cancel_hv_timer(apic->vcpu);
vcpu             1710 arch/x86/kvm/lapic.c 	struct kvm_vcpu *vcpu = apic->vcpu;
vcpu             1720 arch/x86/kvm/lapic.c 	if (kvm_x86_ops->set_hv_timer(vcpu, ktimer->tscdeadline, &expired))
vcpu             1744 arch/x86/kvm/lapic.c 	trace_kvm_hv_timer_state(vcpu->vcpu_id, ktimer->hv_timer_in_use);
vcpu             1763 arch/x86/kvm/lapic.c 	trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, false);
vcpu             1779 arch/x86/kvm/lapic.c void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
vcpu             1781 arch/x86/kvm/lapic.c 	struct kvm_lapic *apic = vcpu->arch.apic;
vcpu             1787 arch/x86/kvm/lapic.c 	WARN_ON(swait_active(&vcpu->wq));
vcpu             1800 arch/x86/kvm/lapic.c void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
vcpu             1802 arch/x86/kvm/lapic.c 	restart_apic_timer(vcpu->arch.apic);
vcpu             1806 arch/x86/kvm/lapic.c void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
vcpu             1808 arch/x86/kvm/lapic.c 	struct kvm_lapic *apic = vcpu->arch.apic;
vcpu             1818 arch/x86/kvm/lapic.c void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu)
vcpu             1820 arch/x86/kvm/lapic.c 	struct kvm_lapic *apic = vcpu->arch.apic;
vcpu             1844 arch/x86/kvm/lapic.c 			atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
vcpu             1846 arch/x86/kvm/lapic.c 			atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
vcpu             1883 arch/x86/kvm/lapic.c 			recalculate_apic_map(apic->vcpu->kvm);
vcpu             1993 arch/x86/kvm/lapic.c static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
vcpu             2004 arch/x86/kvm/lapic.c 		if (!kvm_check_has_quirk(vcpu->kvm,
vcpu             2026 arch/x86/kvm/lapic.c void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
vcpu             2028 arch/x86/kvm/lapic.c 	kvm_lapic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
vcpu             2033 arch/x86/kvm/lapic.c void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
vcpu             2040 arch/x86/kvm/lapic.c 	kvm_lapic_reg_read(vcpu->arch.apic, offset, 4, &val);
vcpu             2043 arch/x86/kvm/lapic.c 	kvm_lapic_reg_write(vcpu->arch.apic, offset, val);
vcpu             2047 arch/x86/kvm/lapic.c void kvm_free_lapic(struct kvm_vcpu *vcpu)
vcpu             2049 arch/x86/kvm/lapic.c 	struct kvm_lapic *apic = vcpu->arch.apic;
vcpu             2051 arch/x86/kvm/lapic.c 	if (!vcpu->arch.apic)
vcpu             2056 arch/x86/kvm/lapic.c 	if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
vcpu             2073 arch/x86/kvm/lapic.c u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
vcpu             2075 arch/x86/kvm/lapic.c 	struct kvm_lapic *apic = vcpu->arch.apic;
vcpu             2077 arch/x86/kvm/lapic.c 	if (!lapic_in_kernel(vcpu) ||
vcpu             2084 arch/x86/kvm/lapic.c void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
vcpu             2086 arch/x86/kvm/lapic.c 	struct kvm_lapic *apic = vcpu->arch.apic;
vcpu             2088 arch/x86/kvm/lapic.c 	if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) ||
vcpu             2097 arch/x86/kvm/lapic.c void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
vcpu             2099 arch/x86/kvm/lapic.c 	struct kvm_lapic *apic = vcpu->arch.apic;
vcpu             2105 arch/x86/kvm/lapic.c u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
vcpu             2109 arch/x86/kvm/lapic.c 	tpr = (u64) kvm_lapic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
vcpu             2114 arch/x86/kvm/lapic.c void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
vcpu             2116 arch/x86/kvm/lapic.c 	u64 old_value = vcpu->arch.apic_base;
vcpu             2117 arch/x86/kvm/lapic.c 	struct kvm_lapic *apic = vcpu->arch.apic;
vcpu             2122 arch/x86/kvm/lapic.c 	vcpu->arch.apic_base = value;
vcpu             2125 arch/x86/kvm/lapic.c 		kvm_update_cpuid(vcpu);
vcpu             2133 arch/x86/kvm/lapic.c 			kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
vcpu             2137 arch/x86/kvm/lapic.c 			recalculate_apic_map(vcpu->kvm);
vcpu             2142 arch/x86/kvm/lapic.c 		kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
vcpu             2145 arch/x86/kvm/lapic.c 		kvm_x86_ops->set_virtual_apic_mode(vcpu);
vcpu             2147 arch/x86/kvm/lapic.c 	apic->base_address = apic->vcpu->arch.apic_base &
vcpu             2155 arch/x86/kvm/lapic.c void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
vcpu             2157 arch/x86/kvm/lapic.c 	struct kvm_lapic *apic = vcpu->arch.apic;
vcpu             2167 arch/x86/kvm/lapic.c 		kvm_lapic_set_base(vcpu, APIC_DEFAULT_PHYS_BASE |
vcpu             2169 arch/x86/kvm/lapic.c 		kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
vcpu             2171 arch/x86/kvm/lapic.c 	kvm_apic_set_version(apic->vcpu);
vcpu             2176 arch/x86/kvm/lapic.c 	if (kvm_vcpu_is_reset_bsp(vcpu) &&
vcpu             2177 arch/x86/kvm/lapic.c 	    kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
vcpu             2197 arch/x86/kvm/lapic.c 	apic->irr_pending = vcpu->arch.apicv_active;
vcpu             2198 arch/x86/kvm/lapic.c 	apic->isr_count = vcpu->arch.apicv_active ? 1 : 0;
vcpu             2202 arch/x86/kvm/lapic.c 	if (kvm_vcpu_is_bsp(vcpu))
vcpu             2203 arch/x86/kvm/lapic.c 		kvm_lapic_set_base(vcpu,
vcpu             2204 arch/x86/kvm/lapic.c 				vcpu->arch.apic_base | MSR_IA32_APICBASE_BSP);
vcpu             2205 arch/x86/kvm/lapic.c 	vcpu->arch.pv_eoi.msr_val = 0;
vcpu             2207 arch/x86/kvm/lapic.c 	if (vcpu->arch.apicv_active) {
vcpu             2208 arch/x86/kvm/lapic.c 		kvm_x86_ops->apicv_post_state_restore(vcpu);
vcpu             2209 arch/x86/kvm/lapic.c 		kvm_x86_ops->hwapic_irr_update(vcpu, -1);
vcpu             2210 arch/x86/kvm/lapic.c 		kvm_x86_ops->hwapic_isr_update(vcpu, -1);
vcpu             2213 arch/x86/kvm/lapic.c 	vcpu->arch.apic_arb_prio = 0;
vcpu             2214 arch/x86/kvm/lapic.c 	vcpu->arch.apic_attention = 0;
vcpu             2228 arch/x86/kvm/lapic.c int apic_has_pending_timer(struct kvm_vcpu *vcpu)
vcpu             2230 arch/x86/kvm/lapic.c 	struct kvm_lapic *apic = vcpu->arch.apic;
vcpu             2253 arch/x86/kvm/lapic.c void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu)
vcpu             2255 arch/x86/kvm/lapic.c 	struct kvm_lapic *apic = vcpu->arch.apic;
vcpu             2281 arch/x86/kvm/lapic.c int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
vcpu             2285 arch/x86/kvm/lapic.c 	ASSERT(vcpu != NULL);
vcpu             2291 arch/x86/kvm/lapic.c 	vcpu->arch.apic = apic;
vcpu             2296 arch/x86/kvm/lapic.c 		       vcpu->vcpu_id);
vcpu             2299 arch/x86/kvm/lapic.c 	apic->vcpu = vcpu;
vcpu             2316 arch/x86/kvm/lapic.c 	vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
vcpu             2323 arch/x86/kvm/lapic.c 	vcpu->arch.apic = NULL;
vcpu             2328 arch/x86/kvm/lapic.c int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
vcpu             2330 arch/x86/kvm/lapic.c 	struct kvm_lapic *apic = vcpu->arch.apic;
vcpu             2340 arch/x86/kvm/lapic.c int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
vcpu             2342 arch/x86/kvm/lapic.c 	u32 lvt0 = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LVT0);
vcpu             2345 arch/x86/kvm/lapic.c 	if (!kvm_apic_hw_enabled(vcpu->arch.apic))
vcpu             2353 arch/x86/kvm/lapic.c void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
vcpu             2355 arch/x86/kvm/lapic.c 	struct kvm_lapic *apic = vcpu->arch.apic;
vcpu             2363 arch/x86/kvm/lapic.c int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
vcpu             2365 arch/x86/kvm/lapic.c 	int vector = kvm_apic_has_interrupt(vcpu);
vcpu             2366 arch/x86/kvm/lapic.c 	struct kvm_lapic *apic = vcpu->arch.apic;
vcpu             2380 arch/x86/kvm/lapic.c 	if (test_bit(vector, vcpu_to_synic(vcpu)->auto_eoi_bitmap)) {
vcpu             2401 arch/x86/kvm/lapic.c static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
vcpu             2404 arch/x86/kvm/lapic.c 	if (apic_x2apic_mode(vcpu->arch.apic)) {
vcpu             2408 arch/x86/kvm/lapic.c 		if (vcpu->kvm->arch.x2apic_format) {
vcpu             2409 arch/x86/kvm/lapic.c 			if (*id != vcpu->vcpu_id)
vcpu             2426 arch/x86/kvm/lapic.c int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
vcpu             2428 arch/x86/kvm/lapic.c 	memcpy(s->regs, vcpu->arch.apic->regs, sizeof(*s));
vcpu             2429 arch/x86/kvm/lapic.c 	return kvm_apic_state_fixup(vcpu, s, false);
vcpu             2432 arch/x86/kvm/lapic.c int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
vcpu             2434 arch/x86/kvm/lapic.c 	struct kvm_lapic *apic = vcpu->arch.apic;
vcpu             2438 arch/x86/kvm/lapic.c 	kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
vcpu             2442 arch/x86/kvm/lapic.c 	r = kvm_apic_state_fixup(vcpu, s, true);
vcpu             2445 arch/x86/kvm/lapic.c 	memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
vcpu             2447 arch/x86/kvm/lapic.c 	recalculate_apic_map(vcpu->kvm);
vcpu             2448 arch/x86/kvm/lapic.c 	kvm_apic_set_version(vcpu);
vcpu             2457 arch/x86/kvm/lapic.c 	apic->isr_count = vcpu->arch.apicv_active ?
vcpu             2460 arch/x86/kvm/lapic.c 	if (vcpu->arch.apicv_active) {
vcpu             2461 arch/x86/kvm/lapic.c 		kvm_x86_ops->apicv_post_state_restore(vcpu);
vcpu             2462 arch/x86/kvm/lapic.c 		kvm_x86_ops->hwapic_irr_update(vcpu,
vcpu             2464 arch/x86/kvm/lapic.c 		kvm_x86_ops->hwapic_isr_update(vcpu,
vcpu             2467 arch/x86/kvm/lapic.c 	kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu             2468 arch/x86/kvm/lapic.c 	if (ioapic_in_kernel(vcpu->kvm))
vcpu             2469 arch/x86/kvm/lapic.c 		kvm_rtc_eoi_tracking_restore_one(vcpu);
vcpu             2471 arch/x86/kvm/lapic.c 	vcpu->arch.apic_arb_prio = 0;
vcpu             2476 arch/x86/kvm/lapic.c void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
vcpu             2480 arch/x86/kvm/lapic.c 	if (!lapic_in_kernel(vcpu) ||
vcpu             2481 arch/x86/kvm/lapic.c 		kvm_can_post_timer_interrupt(vcpu))
vcpu             2484 arch/x86/kvm/lapic.c 	timer = &vcpu->arch.apic->lapic_timer.timer;
vcpu             2496 arch/x86/kvm/lapic.c static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
vcpu             2512 arch/x86/kvm/lapic.c 	BUG_ON(!pv_eoi_enabled(vcpu));
vcpu             2513 arch/x86/kvm/lapic.c 	pending = pv_eoi_get_pending(vcpu);
vcpu             2519 arch/x86/kvm/lapic.c 	pv_eoi_clr_pending(vcpu);
vcpu             2526 arch/x86/kvm/lapic.c void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
vcpu             2530 arch/x86/kvm/lapic.c 	if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
vcpu             2531 arch/x86/kvm/lapic.c 		apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
vcpu             2533 arch/x86/kvm/lapic.c 	if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
vcpu             2536 arch/x86/kvm/lapic.c 	if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
vcpu             2540 arch/x86/kvm/lapic.c 	apic_set_tpr(vcpu->arch.apic, data & 0xff);
vcpu             2549 arch/x86/kvm/lapic.c static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu,
vcpu             2552 arch/x86/kvm/lapic.c 	if (!pv_eoi_enabled(vcpu) ||
vcpu             2566 arch/x86/kvm/lapic.c 	pv_eoi_set_pending(apic->vcpu);
vcpu             2569 arch/x86/kvm/lapic.c void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
vcpu             2573 arch/x86/kvm/lapic.c 	struct kvm_lapic *apic = vcpu->arch.apic;
vcpu             2575 arch/x86/kvm/lapic.c 	apic_sync_pv_eoi_to_guest(vcpu, apic);
vcpu             2577 arch/x86/kvm/lapic.c 	if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
vcpu             2589 arch/x86/kvm/lapic.c 	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
vcpu             2593 arch/x86/kvm/lapic.c int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
vcpu             2596 arch/x86/kvm/lapic.c 		if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
vcpu             2597 arch/x86/kvm/lapic.c 					&vcpu->arch.apic->vapic_cache,
vcpu             2600 arch/x86/kvm/lapic.c 		__set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
vcpu             2602 arch/x86/kvm/lapic.c 		__clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
vcpu             2605 arch/x86/kvm/lapic.c 	vcpu->arch.apic->vapic_addr = vapic_addr;
vcpu             2609 arch/x86/kvm/lapic.c int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
vcpu             2611 arch/x86/kvm/lapic.c 	struct kvm_lapic *apic = vcpu->arch.apic;
vcpu             2614 arch/x86/kvm/lapic.c 	if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
vcpu             2626 arch/x86/kvm/lapic.c int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
vcpu             2628 arch/x86/kvm/lapic.c 	struct kvm_lapic *apic = vcpu->arch.apic;
vcpu             2631 arch/x86/kvm/lapic.c 	if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
vcpu             2647 arch/x86/kvm/lapic.c int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
vcpu             2649 arch/x86/kvm/lapic.c 	struct kvm_lapic *apic = vcpu->arch.apic;
vcpu             2651 arch/x86/kvm/lapic.c 	if (!lapic_in_kernel(vcpu))
vcpu             2660 arch/x86/kvm/lapic.c int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
vcpu             2662 arch/x86/kvm/lapic.c 	struct kvm_lapic *apic = vcpu->arch.apic;
vcpu             2665 arch/x86/kvm/lapic.c 	if (!lapic_in_kernel(vcpu))
vcpu             2678 arch/x86/kvm/lapic.c int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len)
vcpu             2681 arch/x86/kvm/lapic.c 	struct gfn_to_hva_cache *ghc = &vcpu->arch.pv_eoi.data;
vcpu             2687 arch/x86/kvm/lapic.c 	vcpu->arch.pv_eoi.msr_val = data;
vcpu             2688 arch/x86/kvm/lapic.c 	if (!pv_eoi_enabled(vcpu))
vcpu             2696 arch/x86/kvm/lapic.c 	return kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len);
vcpu             2699 arch/x86/kvm/lapic.c void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
vcpu             2701 arch/x86/kvm/lapic.c 	struct kvm_lapic *apic = vcpu->arch.apic;
vcpu             2705 arch/x86/kvm/lapic.c 	if (!lapic_in_kernel(vcpu) || !apic->pending_events)
vcpu             2716 arch/x86/kvm/lapic.c 	if (is_smm(vcpu) || kvm_x86_ops->apic_init_signal_blocked(vcpu)) {
vcpu             2717 arch/x86/kvm/lapic.c 		WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
vcpu             2725 arch/x86/kvm/lapic.c 		kvm_vcpu_reset(vcpu, true);
vcpu             2726 arch/x86/kvm/lapic.c 		if (kvm_vcpu_is_bsp(apic->vcpu))
vcpu             2727 arch/x86/kvm/lapic.c 			vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
vcpu             2729 arch/x86/kvm/lapic.c 			vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
vcpu             2732 arch/x86/kvm/lapic.c 	    vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
vcpu             2736 arch/x86/kvm/lapic.c 		kvm_vcpu_deliver_sipi_vector(vcpu, sipi_vector);
vcpu             2737 arch/x86/kvm/lapic.c 		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
vcpu               45 arch/x86/kvm/lapic.h 	struct kvm_vcpu *vcpu;
vcpu               67 arch/x86/kvm/lapic.h int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns);
vcpu               68 arch/x86/kvm/lapic.h void kvm_free_lapic(struct kvm_vcpu *vcpu);
vcpu               70 arch/x86/kvm/lapic.h int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu);
vcpu               71 arch/x86/kvm/lapic.h int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu);
vcpu               72 arch/x86/kvm/lapic.h int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu);
vcpu               73 arch/x86/kvm/lapic.h void kvm_apic_accept_events(struct kvm_vcpu *vcpu);
vcpu               74 arch/x86/kvm/lapic.h void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event);
vcpu               75 arch/x86/kvm/lapic.h u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu);
vcpu               76 arch/x86/kvm/lapic.h void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8);
vcpu               77 arch/x86/kvm/lapic.h void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu);
vcpu               78 arch/x86/kvm/lapic.h void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value);
vcpu               79 arch/x86/kvm/lapic.h u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu);
vcpu               80 arch/x86/kvm/lapic.h void kvm_apic_set_version(struct kvm_vcpu *vcpu);
vcpu               84 arch/x86/kvm/lapic.h bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
vcpu               88 arch/x86/kvm/lapic.h bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr);
vcpu               89 arch/x86/kvm/lapic.h void kvm_apic_update_ppr(struct kvm_vcpu *vcpu);
vcpu               90 arch/x86/kvm/lapic.h int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
vcpu               97 arch/x86/kvm/lapic.h u64 kvm_get_apic_base(struct kvm_vcpu *vcpu);
vcpu               98 arch/x86/kvm/lapic.h int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
vcpu               99 arch/x86/kvm/lapic.h int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s);
vcpu              100 arch/x86/kvm/lapic.h int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s);
vcpu              101 arch/x86/kvm/lapic.h enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu);
vcpu              102 arch/x86/kvm/lapic.h int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
vcpu              104 arch/x86/kvm/lapic.h u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu);
vcpu              105 arch/x86/kvm/lapic.h void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data);
vcpu              107 arch/x86/kvm/lapic.h void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset);
vcpu              108 arch/x86/kvm/lapic.h void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector);
vcpu              110 arch/x86/kvm/lapic.h int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
vcpu              111 arch/x86/kvm/lapic.h void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu);
vcpu              112 arch/x86/kvm/lapic.h void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu);
vcpu              114 arch/x86/kvm/lapic.h int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data);
vcpu              115 arch/x86/kvm/lapic.h int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
vcpu              117 arch/x86/kvm/lapic.h int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data);
vcpu              118 arch/x86/kvm/lapic.h int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
vcpu              120 arch/x86/kvm/lapic.h static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu)
vcpu              122 arch/x86/kvm/lapic.h 	return vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE;
vcpu              125 arch/x86/kvm/lapic.h int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len);
vcpu              164 arch/x86/kvm/lapic.h static inline bool lapic_in_kernel(struct kvm_vcpu *vcpu)
vcpu              167 arch/x86/kvm/lapic.h 		return vcpu->arch.apic;
vcpu              176 arch/x86/kvm/lapic.h 		return apic->vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE;
vcpu              189 arch/x86/kvm/lapic.h static inline bool kvm_apic_present(struct kvm_vcpu *vcpu)
vcpu              191 arch/x86/kvm/lapic.h 	return lapic_in_kernel(vcpu) && kvm_apic_hw_enabled(vcpu->arch.apic);
vcpu              194 arch/x86/kvm/lapic.h static inline int kvm_lapic_enabled(struct kvm_vcpu *vcpu)
vcpu              196 arch/x86/kvm/lapic.h 	return kvm_apic_present(vcpu) && kvm_apic_sw_enabled(vcpu->arch.apic);
vcpu              201 arch/x86/kvm/lapic.h 	return apic->vcpu->arch.apic_base & X2APIC_ENABLE;
vcpu              204 arch/x86/kvm/lapic.h static inline bool kvm_vcpu_apicv_active(struct kvm_vcpu *vcpu)
vcpu              206 arch/x86/kvm/lapic.h 	return vcpu->arch.apic && vcpu->arch.apicv_active;
vcpu              209 arch/x86/kvm/lapic.h static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu)
vcpu              211 arch/x86/kvm/lapic.h 	return lapic_in_kernel(vcpu) && vcpu->arch.apic->pending_events;
vcpu              220 arch/x86/kvm/lapic.h static inline int kvm_lapic_latched_init(struct kvm_vcpu *vcpu)
vcpu              222 arch/x86/kvm/lapic.h 	return lapic_in_kernel(vcpu) && test_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
vcpu              225 arch/x86/kvm/lapic.h bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector);
vcpu              227 arch/x86/kvm/lapic.h void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu);
vcpu              233 arch/x86/kvm/lapic.h void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu);
vcpu              234 arch/x86/kvm/lapic.h void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu);
vcpu              235 arch/x86/kvm/lapic.h void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu);
vcpu              236 arch/x86/kvm/lapic.h bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu);
vcpu              237 arch/x86/kvm/lapic.h void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu);
vcpu              238 arch/x86/kvm/lapic.h bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu);
vcpu              308 arch/x86/kvm/mmu.c kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
vcpu              364 arch/x86/kvm/mmu.c static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
vcpu              372 arch/x86/kvm/mmu.c 	return vcpu->arch.mmu == &vcpu->arch.guest_mmu;
vcpu              456 arch/x86/kvm/mmu.c static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
vcpu              459 arch/x86/kvm/mmu.c 	u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK;
vcpu              488 arch/x86/kvm/mmu.c static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
vcpu              492 arch/x86/kvm/mmu.c 		mark_mmio_spte(vcpu, sptep, gfn, access);
vcpu              499 arch/x86/kvm/mmu.c static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
vcpu              503 arch/x86/kvm/mmu.c 	gen = kvm_vcpu_memslots(vcpu)->generation;
vcpu              604 arch/x86/kvm/mmu.c static int is_nx(struct kvm_vcpu *vcpu)
vcpu              606 arch/x86/kvm/mmu.c 	return vcpu->arch.efer & EFER_NX;
vcpu             1034 arch/x86/kvm/mmu.c static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
vcpu             1046 arch/x86/kvm/mmu.c 	smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
vcpu             1049 arch/x86/kvm/mmu.c static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
vcpu             1056 arch/x86/kvm/mmu.c 	smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
vcpu             1110 arch/x86/kvm/mmu.c static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
vcpu             1114 arch/x86/kvm/mmu.c 	r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
vcpu             1118 arch/x86/kvm/mmu.c 	r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
vcpu             1121 arch/x86/kvm/mmu.c 	r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
vcpu             1127 arch/x86/kvm/mmu.c static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
vcpu             1129 arch/x86/kvm/mmu.c 	mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
vcpu             1131 arch/x86/kvm/mmu.c 	mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
vcpu             1132 arch/x86/kvm/mmu.c 	mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache,
vcpu             1145 arch/x86/kvm/mmu.c static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu)
vcpu             1147 arch/x86/kvm/mmu.c 	return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
vcpu             1281 arch/x86/kvm/mmu.c static bool mmu_gfn_lpage_is_disallowed(struct kvm_vcpu *vcpu, gfn_t gfn,
vcpu             1286 arch/x86/kvm/mmu.c 	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
vcpu             1290 arch/x86/kvm/mmu.c static int host_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn)
vcpu             1295 arch/x86/kvm/mmu.c 	page_size = kvm_host_page_size(vcpu, gfn);
vcpu             1319 arch/x86/kvm/mmu.c gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
vcpu             1324 arch/x86/kvm/mmu.c 	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
vcpu             1331 arch/x86/kvm/mmu.c static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn,
vcpu             1340 arch/x86/kvm/mmu.c 	slot = kvm_vcpu_gfn_to_memslot(vcpu, large_gfn);
vcpu             1345 arch/x86/kvm/mmu.c 	host_level = host_mapping_level(vcpu, large_gfn);
vcpu             1370 arch/x86/kvm/mmu.c static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
vcpu             1381 arch/x86/kvm/mmu.c 		desc = mmu_alloc_pte_list_desc(vcpu);
vcpu             1394 arch/x86/kvm/mmu.c 			desc->more = mmu_alloc_pte_list_desc(vcpu);
vcpu             1489 arch/x86/kvm/mmu.c static bool rmap_can_add(struct kvm_vcpu *vcpu)
vcpu             1493 arch/x86/kvm/mmu.c 	cache = &vcpu->arch.mmu_pte_list_desc_cache;
vcpu             1497 arch/x86/kvm/mmu.c static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
vcpu             1504 arch/x86/kvm/mmu.c 	rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
vcpu             1505 arch/x86/kvm/mmu.c 	return pte_list_add(vcpu, spte, rmap_head);
vcpu             1616 arch/x86/kvm/mmu.c static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
vcpu             1618 arch/x86/kvm/mmu.c 	if (__drop_large_spte(vcpu->kvm, sptep)) {
vcpu             1621 arch/x86/kvm/mmu.c 		kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
vcpu             1822 arch/x86/kvm/mmu.c int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu)
vcpu             1825 arch/x86/kvm/mmu.c 		return kvm_x86_ops->write_log_dirty(vcpu);
vcpu             1845 arch/x86/kvm/mmu.c static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
vcpu             1849 arch/x86/kvm/mmu.c 	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
vcpu             1850 arch/x86/kvm/mmu.c 	return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn);
vcpu             2088 arch/x86/kvm/mmu.c static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
vcpu             2095 arch/x86/kvm/mmu.c 	rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
vcpu             2097 arch/x86/kvm/mmu.c 	kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, 0);
vcpu             2098 arch/x86/kvm/mmu.c 	kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
vcpu             2156 arch/x86/kvm/mmu.c static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
vcpu             2162 arch/x86/kvm/mmu.c 	pte_list_add(vcpu, parent_pte, &sp->parent_ptes);
vcpu             2178 arch/x86/kvm/mmu.c static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct)
vcpu             2182 arch/x86/kvm/mmu.c 	sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
vcpu             2183 arch/x86/kvm/mmu.c 	sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
vcpu             2185 arch/x86/kvm/mmu.c 		sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
vcpu             2193 arch/x86/kvm/mmu.c 	sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
vcpu             2194 arch/x86/kvm/mmu.c 	list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
vcpu             2195 arch/x86/kvm/mmu.c 	kvm_mod_used_mmu_pages(vcpu->kvm, +1);
vcpu             2224 arch/x86/kvm/mmu.c static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
vcpu             2230 arch/x86/kvm/mmu.c static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root)
vcpu             2234 arch/x86/kvm/mmu.c static void nonpaging_update_pte(struct kvm_vcpu *vcpu,
vcpu             2356 arch/x86/kvm/mmu.c static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
vcpu             2359 arch/x86/kvm/mmu.c 	if ((!is_ept_sp(sp) && sp->role.gpte_is_8_bytes != !!is_pae(vcpu)) ||
vcpu             2360 arch/x86/kvm/mmu.c 	    vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
vcpu             2361 arch/x86/kvm/mmu.c 		kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
vcpu             2382 arch/x86/kvm/mmu.c static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu,
vcpu             2386 arch/x86/kvm/mmu.c 	if (kvm_mmu_remote_flush_or_zap(vcpu->kvm, invalid_list, remote_flush))
vcpu             2390 arch/x86/kvm/mmu.c 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
vcpu             2396 arch/x86/kvm/mmu.c static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { }
vcpu             2406 arch/x86/kvm/mmu.c static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
vcpu             2409 arch/x86/kvm/mmu.c 	kvm_unlink_unsync_page(vcpu->kvm, sp);
vcpu             2410 arch/x86/kvm/mmu.c 	return __kvm_sync_page(vcpu, sp, invalid_list);
vcpu             2414 arch/x86/kvm/mmu.c static bool kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn,
vcpu             2420 arch/x86/kvm/mmu.c 	for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
vcpu             2425 arch/x86/kvm/mmu.c 		ret |= kvm_sync_page(vcpu, s, invalid_list);
vcpu             2503 arch/x86/kvm/mmu.c static void mmu_sync_children(struct kvm_vcpu *vcpu,
vcpu             2517 arch/x86/kvm/mmu.c 			protected |= rmap_write_protect(vcpu, sp->gfn);
vcpu             2520 arch/x86/kvm/mmu.c 			kvm_flush_remote_tlbs(vcpu->kvm);
vcpu             2525 arch/x86/kvm/mmu.c 			flush |= kvm_sync_page(vcpu, sp, &invalid_list);
vcpu             2528 arch/x86/kvm/mmu.c 		if (need_resched() || spin_needbreak(&vcpu->kvm->mmu_lock)) {
vcpu             2529 arch/x86/kvm/mmu.c 			kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
vcpu             2530 arch/x86/kvm/mmu.c 			cond_resched_lock(&vcpu->kvm->mmu_lock);
vcpu             2535 arch/x86/kvm/mmu.c 	kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
vcpu             2550 arch/x86/kvm/mmu.c static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
vcpu             2565 arch/x86/kvm/mmu.c 	role = vcpu->arch.mmu->mmu_role.base;
vcpu             2571 arch/x86/kvm/mmu.c 	if (!vcpu->arch.mmu->direct_map
vcpu             2572 arch/x86/kvm/mmu.c 	    && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) {
vcpu             2577 arch/x86/kvm/mmu.c 	for_each_valid_sp(vcpu->kvm, sp, gfn) {
vcpu             2593 arch/x86/kvm/mmu.c 			if (!__kvm_sync_page(vcpu, sp, &invalid_list))
vcpu             2597 arch/x86/kvm/mmu.c 			kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
vcpu             2601 arch/x86/kvm/mmu.c 			kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
vcpu             2608 arch/x86/kvm/mmu.c 	++vcpu->kvm->stat.mmu_cache_miss;
vcpu             2610 arch/x86/kvm/mmu.c 	sp = kvm_mmu_alloc_page(vcpu, direct);
vcpu             2615 arch/x86/kvm/mmu.c 		&vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]);
vcpu             2622 arch/x86/kvm/mmu.c 		account_shadowed(vcpu->kvm, sp);
vcpu             2624 arch/x86/kvm/mmu.c 		      rmap_write_protect(vcpu, gfn))
vcpu             2625 arch/x86/kvm/mmu.c 			kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
vcpu             2628 arch/x86/kvm/mmu.c 			flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
vcpu             2633 arch/x86/kvm/mmu.c 	kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
vcpu             2635 arch/x86/kvm/mmu.c 	if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions)
vcpu             2636 arch/x86/kvm/mmu.c 		vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions;
vcpu             2641 arch/x86/kvm/mmu.c 					struct kvm_vcpu *vcpu, hpa_t root,
vcpu             2646 arch/x86/kvm/mmu.c 	iterator->level = vcpu->arch.mmu->shadow_root_level;
vcpu             2649 arch/x86/kvm/mmu.c 	    vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL &&
vcpu             2650 arch/x86/kvm/mmu.c 	    !vcpu->arch.mmu->direct_map)
vcpu             2658 arch/x86/kvm/mmu.c 		BUG_ON(root != vcpu->arch.mmu->root_hpa);
vcpu             2661 arch/x86/kvm/mmu.c 			= vcpu->arch.mmu->pae_root[(addr >> 30) & 3];
vcpu             2670 arch/x86/kvm/mmu.c 			     struct kvm_vcpu *vcpu, u64 addr)
vcpu             2672 arch/x86/kvm/mmu.c 	shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root_hpa,
vcpu             2703 arch/x86/kvm/mmu.c static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
vcpu             2720 arch/x86/kvm/mmu.c 	mmu_page_add_parent_pte(vcpu, sp, sptep);
vcpu             2726 arch/x86/kvm/mmu.c static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
vcpu             2744 arch/x86/kvm/mmu.c 		kvm_flush_remote_tlbs_with_address(vcpu->kvm, child->gfn, 1);
vcpu             2954 arch/x86/kvm/mmu.c static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
vcpu             2957 arch/x86/kvm/mmu.c 	++vcpu->kvm->stat.mmu_unsync;
vcpu             2963 arch/x86/kvm/mmu.c static bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
vcpu             2968 arch/x86/kvm/mmu.c 	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
vcpu             2971 arch/x86/kvm/mmu.c 	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
vcpu             2979 arch/x86/kvm/mmu.c 		kvm_unsync_page(vcpu, sp);
vcpu             3049 arch/x86/kvm/mmu.c static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
vcpu             3058 arch/x86/kvm/mmu.c 	if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access))
vcpu             3064 arch/x86/kvm/mmu.c 	else if (kvm_vcpu_ad_need_write_protect(vcpu))
vcpu             3093 arch/x86/kvm/mmu.c 		spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
vcpu             3115 arch/x86/kvm/mmu.c 		    mmu_gfn_lpage_is_disallowed(vcpu, gfn, level))
vcpu             3129 arch/x86/kvm/mmu.c 		if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
vcpu             3139 arch/x86/kvm/mmu.c 		kvm_vcpu_mark_page_dirty(vcpu, gfn);
vcpu             3153 arch/x86/kvm/mmu.c static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
vcpu             3182 arch/x86/kvm/mmu.c 			drop_spte(vcpu->kvm, sptep);
vcpu             3188 arch/x86/kvm/mmu.c 	set_spte_ret = set_spte(vcpu, sptep, pte_access, level, gfn, pfn,
vcpu             3193 arch/x86/kvm/mmu.c 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
vcpu             3197 arch/x86/kvm/mmu.c 		kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn,
vcpu             3206 arch/x86/kvm/mmu.c 		++vcpu->kvm->stat.lpages;
vcpu             3210 arch/x86/kvm/mmu.c 			rmap_count = rmap_add(vcpu, sptep, gfn);
vcpu             3212 arch/x86/kvm/mmu.c 				rmap_recycle(vcpu, sptep, gfn);
vcpu             3219 arch/x86/kvm/mmu.c static kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
vcpu             3224 arch/x86/kvm/mmu.c 	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log);
vcpu             3231 arch/x86/kvm/mmu.c static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
vcpu             3242 arch/x86/kvm/mmu.c 	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
vcpu             3251 arch/x86/kvm/mmu.c 		mmu_set_spte(vcpu, start, access, 0, sp->role.level, gfn,
vcpu             3259 arch/x86/kvm/mmu.c static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
vcpu             3274 arch/x86/kvm/mmu.c 			if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
vcpu             3282 arch/x86/kvm/mmu.c static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
vcpu             3299 arch/x86/kvm/mmu.c 	__direct_pte_prefetch(vcpu, sp, sptep);
vcpu             3325 arch/x86/kvm/mmu.c static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write,
vcpu             3335 arch/x86/kvm/mmu.c 	if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
vcpu             3339 arch/x86/kvm/mmu.c 	for_each_shadow_entry(vcpu, gpa, it) {
vcpu             3350 arch/x86/kvm/mmu.c 		drop_large_spte(vcpu, it.sptep);
vcpu             3352 arch/x86/kvm/mmu.c 			sp = kvm_mmu_get_page(vcpu, base_gfn, it.addr,
vcpu             3355 arch/x86/kvm/mmu.c 			link_shadow_page(vcpu, it.sptep, sp);
vcpu             3357 arch/x86/kvm/mmu.c 				account_huge_nx_page(vcpu->kvm, sp);
vcpu             3361 arch/x86/kvm/mmu.c 	ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL,
vcpu             3364 arch/x86/kvm/mmu.c 	direct_pte_prefetch(vcpu, it.sptep);
vcpu             3365 arch/x86/kvm/mmu.c 	++vcpu->stat.pf_fixed;
vcpu             3374 arch/x86/kvm/mmu.c static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
vcpu             3385 arch/x86/kvm/mmu.c 		kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current);
vcpu             3392 arch/x86/kvm/mmu.c static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
vcpu             3408 arch/x86/kvm/mmu.c 	    !mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) {
vcpu             3431 arch/x86/kvm/mmu.c static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
vcpu             3436 arch/x86/kvm/mmu.c 		*ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
vcpu             3441 arch/x86/kvm/mmu.c 		vcpu_cache_mmio_info(vcpu, gva, gfn,
vcpu             3485 arch/x86/kvm/mmu.c fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
vcpu             3513 arch/x86/kvm/mmu.c 		kvm_vcpu_mark_page_dirty(vcpu, gfn);
vcpu             3536 arch/x86/kvm/mmu.c static bool fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, int level,
vcpu             3545 arch/x86/kvm/mmu.c 	if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
vcpu             3551 arch/x86/kvm/mmu.c 	walk_shadow_page_lockless_begin(vcpu);
vcpu             3556 arch/x86/kvm/mmu.c 		for_each_shadow_entry_lockless(vcpu, cr2_or_gpa, iterator, spte)
vcpu             3620 arch/x86/kvm/mmu.c 		fault_handled = fast_pf_fix_direct_spte(vcpu, sp,
vcpu             3634 arch/x86/kvm/mmu.c 	trace_fast_page_fault(vcpu, cr2_or_gpa, error_code, iterator.sptep,
vcpu             3636 arch/x86/kvm/mmu.c 	walk_shadow_page_lockless_end(vcpu);
vcpu             3641 arch/x86/kvm/mmu.c static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
vcpu             3644 arch/x86/kvm/mmu.c static int make_mmu_pages_available(struct kvm_vcpu *vcpu);
vcpu             3646 arch/x86/kvm/mmu.c static int nonpaging_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
vcpu             3659 arch/x86/kvm/mmu.c 	level = mapping_level(vcpu, gfn, &force_pt_level);
vcpu             3672 arch/x86/kvm/mmu.c 	if (fast_page_fault(vcpu, gpa, level, error_code))
vcpu             3675 arch/x86/kvm/mmu.c 	mmu_seq = vcpu->kvm->mmu_notifier_seq;
vcpu             3678 arch/x86/kvm/mmu.c 	if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
vcpu             3681 arch/x86/kvm/mmu.c 	if (handle_abnormal_pfn(vcpu, gpa, gfn, pfn, ACC_ALL, &r))
vcpu             3685 arch/x86/kvm/mmu.c 	spin_lock(&vcpu->kvm->mmu_lock);
vcpu             3686 arch/x86/kvm/mmu.c 	if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
vcpu             3688 arch/x86/kvm/mmu.c 	if (make_mmu_pages_available(vcpu) < 0)
vcpu             3691 arch/x86/kvm/mmu.c 		transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
vcpu             3692 arch/x86/kvm/mmu.c 	r = __direct_map(vcpu, gpa, write, map_writable, level, pfn,
vcpu             3695 arch/x86/kvm/mmu.c 	spin_unlock(&vcpu->kvm->mmu_lock);
vcpu             3717 arch/x86/kvm/mmu.c void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
vcpu             3737 arch/x86/kvm/mmu.c 	spin_lock(&vcpu->kvm->mmu_lock);
vcpu             3741 arch/x86/kvm/mmu.c 			mmu_free_root_page(vcpu->kvm, &mmu->prev_roots[i].hpa,
vcpu             3747 arch/x86/kvm/mmu.c 			mmu_free_root_page(vcpu->kvm, &mmu->root_hpa,
vcpu             3752 arch/x86/kvm/mmu.c 					mmu_free_root_page(vcpu->kvm,
vcpu             3760 arch/x86/kvm/mmu.c 	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
vcpu             3761 arch/x86/kvm/mmu.c 	spin_unlock(&vcpu->kvm->mmu_lock);
vcpu             3765 arch/x86/kvm/mmu.c static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
vcpu             3769 arch/x86/kvm/mmu.c 	if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
vcpu             3770 arch/x86/kvm/mmu.c 		kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
vcpu             3777 arch/x86/kvm/mmu.c static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
vcpu             3782 arch/x86/kvm/mmu.c 	if (vcpu->arch.mmu->shadow_root_level >= PT64_ROOT_4LEVEL) {
vcpu             3783 arch/x86/kvm/mmu.c 		spin_lock(&vcpu->kvm->mmu_lock);
vcpu             3784 arch/x86/kvm/mmu.c 		if(make_mmu_pages_available(vcpu) < 0) {
vcpu             3785 arch/x86/kvm/mmu.c 			spin_unlock(&vcpu->kvm->mmu_lock);
vcpu             3788 arch/x86/kvm/mmu.c 		sp = kvm_mmu_get_page(vcpu, 0, 0,
vcpu             3789 arch/x86/kvm/mmu.c 				vcpu->arch.mmu->shadow_root_level, 1, ACC_ALL);
vcpu             3791 arch/x86/kvm/mmu.c 		spin_unlock(&vcpu->kvm->mmu_lock);
vcpu             3792 arch/x86/kvm/mmu.c 		vcpu->arch.mmu->root_hpa = __pa(sp->spt);
vcpu             3793 arch/x86/kvm/mmu.c 	} else if (vcpu->arch.mmu->shadow_root_level == PT32E_ROOT_LEVEL) {
vcpu             3795 arch/x86/kvm/mmu.c 			hpa_t root = vcpu->arch.mmu->pae_root[i];
vcpu             3798 arch/x86/kvm/mmu.c 			spin_lock(&vcpu->kvm->mmu_lock);
vcpu             3799 arch/x86/kvm/mmu.c 			if (make_mmu_pages_available(vcpu) < 0) {
vcpu             3800 arch/x86/kvm/mmu.c 				spin_unlock(&vcpu->kvm->mmu_lock);
vcpu             3803 arch/x86/kvm/mmu.c 			sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
vcpu             3807 arch/x86/kvm/mmu.c 			spin_unlock(&vcpu->kvm->mmu_lock);
vcpu             3808 arch/x86/kvm/mmu.c 			vcpu->arch.mmu->pae_root[i] = root | PT_PRESENT_MASK;
vcpu             3810 arch/x86/kvm/mmu.c 		vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
vcpu             3813 arch/x86/kvm/mmu.c 	vcpu->arch.mmu->root_cr3 = vcpu->arch.mmu->get_cr3(vcpu);
vcpu             3818 arch/x86/kvm/mmu.c static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
vcpu             3825 arch/x86/kvm/mmu.c 	root_cr3 = vcpu->arch.mmu->get_cr3(vcpu);
vcpu             3828 arch/x86/kvm/mmu.c 	if (mmu_check_root(vcpu, root_gfn))
vcpu             3835 arch/x86/kvm/mmu.c 	if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
vcpu             3836 arch/x86/kvm/mmu.c 		hpa_t root = vcpu->arch.mmu->root_hpa;
vcpu             3840 arch/x86/kvm/mmu.c 		spin_lock(&vcpu->kvm->mmu_lock);
vcpu             3841 arch/x86/kvm/mmu.c 		if (make_mmu_pages_available(vcpu) < 0) {
vcpu             3842 arch/x86/kvm/mmu.c 			spin_unlock(&vcpu->kvm->mmu_lock);
vcpu             3845 arch/x86/kvm/mmu.c 		sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
vcpu             3846 arch/x86/kvm/mmu.c 				vcpu->arch.mmu->shadow_root_level, 0, ACC_ALL);
vcpu             3849 arch/x86/kvm/mmu.c 		spin_unlock(&vcpu->kvm->mmu_lock);
vcpu             3850 arch/x86/kvm/mmu.c 		vcpu->arch.mmu->root_hpa = root;
vcpu             3860 arch/x86/kvm/mmu.c 	if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL)
vcpu             3864 arch/x86/kvm/mmu.c 		hpa_t root = vcpu->arch.mmu->pae_root[i];
vcpu             3867 arch/x86/kvm/mmu.c 		if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) {
vcpu             3868 arch/x86/kvm/mmu.c 			pdptr = vcpu->arch.mmu->get_pdptr(vcpu, i);
vcpu             3870 arch/x86/kvm/mmu.c 				vcpu->arch.mmu->pae_root[i] = 0;
vcpu             3874 arch/x86/kvm/mmu.c 			if (mmu_check_root(vcpu, root_gfn))
vcpu             3877 arch/x86/kvm/mmu.c 		spin_lock(&vcpu->kvm->mmu_lock);
vcpu             3878 arch/x86/kvm/mmu.c 		if (make_mmu_pages_available(vcpu) < 0) {
vcpu             3879 arch/x86/kvm/mmu.c 			spin_unlock(&vcpu->kvm->mmu_lock);
vcpu             3882 arch/x86/kvm/mmu.c 		sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL,
vcpu             3886 arch/x86/kvm/mmu.c 		spin_unlock(&vcpu->kvm->mmu_lock);
vcpu             3888 arch/x86/kvm/mmu.c 		vcpu->arch.mmu->pae_root[i] = root | pm_mask;
vcpu             3890 arch/x86/kvm/mmu.c 	vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
vcpu             3896 arch/x86/kvm/mmu.c 	if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
vcpu             3897 arch/x86/kvm/mmu.c 		if (vcpu->arch.mmu->lm_root == NULL) {
vcpu             3909 arch/x86/kvm/mmu.c 			lm_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask;
vcpu             3911 arch/x86/kvm/mmu.c 			vcpu->arch.mmu->lm_root = lm_root;
vcpu             3914 arch/x86/kvm/mmu.c 		vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root);
vcpu             3918 arch/x86/kvm/mmu.c 	vcpu->arch.mmu->root_cr3 = root_cr3;
vcpu             3923 arch/x86/kvm/mmu.c static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
vcpu             3925 arch/x86/kvm/mmu.c 	if (vcpu->arch.mmu->direct_map)
vcpu             3926 arch/x86/kvm/mmu.c 		return mmu_alloc_direct_roots(vcpu);
vcpu             3928 arch/x86/kvm/mmu.c 		return mmu_alloc_shadow_roots(vcpu);
vcpu             3931 arch/x86/kvm/mmu.c void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
vcpu             3936 arch/x86/kvm/mmu.c 	if (vcpu->arch.mmu->direct_map)
vcpu             3939 arch/x86/kvm/mmu.c 	if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
vcpu             3942 arch/x86/kvm/mmu.c 	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
vcpu             3944 arch/x86/kvm/mmu.c 	if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
vcpu             3945 arch/x86/kvm/mmu.c 		hpa_t root = vcpu->arch.mmu->root_hpa;
vcpu             3962 arch/x86/kvm/mmu.c 		spin_lock(&vcpu->kvm->mmu_lock);
vcpu             3963 arch/x86/kvm/mmu.c 		kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
vcpu             3965 arch/x86/kvm/mmu.c 		mmu_sync_children(vcpu, sp);
vcpu             3967 arch/x86/kvm/mmu.c 		kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
vcpu             3968 arch/x86/kvm/mmu.c 		spin_unlock(&vcpu->kvm->mmu_lock);
vcpu             3972 arch/x86/kvm/mmu.c 	spin_lock(&vcpu->kvm->mmu_lock);
vcpu             3973 arch/x86/kvm/mmu.c 	kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
vcpu             3976 arch/x86/kvm/mmu.c 		hpa_t root = vcpu->arch.mmu->pae_root[i];
vcpu             3981 arch/x86/kvm/mmu.c 			mmu_sync_children(vcpu, sp);
vcpu             3985 arch/x86/kvm/mmu.c 	kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
vcpu             3986 arch/x86/kvm/mmu.c 	spin_unlock(&vcpu->kvm->mmu_lock);
vcpu             3990 arch/x86/kvm/mmu.c static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gpa_t vaddr,
vcpu             3998 arch/x86/kvm/mmu.c static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gpa_t vaddr,
vcpu             4004 arch/x86/kvm/mmu.c 	return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception);
vcpu             4026 arch/x86/kvm/mmu.c static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
vcpu             4032 arch/x86/kvm/mmu.c 	if (mmu_is_nested(vcpu))
vcpu             4036 arch/x86/kvm/mmu.c 		return vcpu_match_mmio_gpa(vcpu, addr);
vcpu             4038 arch/x86/kvm/mmu.c 	return vcpu_match_mmio_gva(vcpu, addr);
vcpu             4043 arch/x86/kvm/mmu.c walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
vcpu             4050 arch/x86/kvm/mmu.c 	if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
vcpu             4053 arch/x86/kvm/mmu.c 	walk_shadow_page_lockless_begin(vcpu);
vcpu             4055 arch/x86/kvm/mmu.c 	for (shadow_walk_init(&iterator, vcpu, addr),
vcpu             4067 arch/x86/kvm/mmu.c 		reserved |= is_shadow_zero_bits_set(vcpu->arch.mmu, spte,
vcpu             4071 arch/x86/kvm/mmu.c 	walk_shadow_page_lockless_end(vcpu);
vcpu             4087 arch/x86/kvm/mmu.c static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
vcpu             4092 arch/x86/kvm/mmu.c 	if (mmio_info_in_cache(vcpu, addr, direct))
vcpu             4095 arch/x86/kvm/mmu.c 	reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte);
vcpu             4103 arch/x86/kvm/mmu.c 		if (!check_mmio_spte(vcpu, spte))
vcpu             4110 arch/x86/kvm/mmu.c 		vcpu_cache_mmio_info(vcpu, addr, gfn, access);
vcpu             4121 arch/x86/kvm/mmu.c static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
vcpu             4135 arch/x86/kvm/mmu.c 	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
vcpu             4141 arch/x86/kvm/mmu.c static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
vcpu             4146 arch/x86/kvm/mmu.c 	if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
vcpu             4149 arch/x86/kvm/mmu.c 	walk_shadow_page_lockless_begin(vcpu);
vcpu             4150 arch/x86/kvm/mmu.c 	for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
vcpu             4155 arch/x86/kvm/mmu.c 	walk_shadow_page_lockless_end(vcpu);
vcpu             4158 arch/x86/kvm/mmu.c static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa,
vcpu             4167 arch/x86/kvm/mmu.c 	if (page_fault_handle_page_track(vcpu, error_code, gfn))
vcpu             4170 arch/x86/kvm/mmu.c 	r = mmu_topup_memory_caches(vcpu);
vcpu             4174 arch/x86/kvm/mmu.c 	MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa));
vcpu             4177 arch/x86/kvm/mmu.c 	return nonpaging_map(vcpu, gpa & PAGE_MASK,
vcpu             4181 arch/x86/kvm/mmu.c static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
vcpu             4186 arch/x86/kvm/mmu.c 	arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
vcpu             4188 arch/x86/kvm/mmu.c 	arch.direct_map = vcpu->arch.mmu->direct_map;
vcpu             4189 arch/x86/kvm/mmu.c 	arch.cr3 = vcpu->arch.mmu->get_cr3(vcpu);
vcpu             4191 arch/x86/kvm/mmu.c 	return kvm_setup_async_pf(vcpu, cr2_or_gpa,
vcpu             4192 arch/x86/kvm/mmu.c 				  kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
vcpu             4195 arch/x86/kvm/mmu.c static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
vcpu             4205 arch/x86/kvm/mmu.c 	if (is_guest_mode(vcpu) && !kvm_is_visible_gfn(vcpu->kvm, gfn)) {
vcpu             4210 arch/x86/kvm/mmu.c 	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
vcpu             4216 arch/x86/kvm/mmu.c 	if (!prefault && kvm_can_do_async_pf(vcpu)) {
vcpu             4218 arch/x86/kvm/mmu.c 		if (kvm_find_async_pf_gfn(vcpu, gfn)) {
vcpu             4220 arch/x86/kvm/mmu.c 			kvm_make_request(KVM_REQ_APF_HALT, vcpu);
vcpu             4222 arch/x86/kvm/mmu.c 		} else if (kvm_arch_setup_async_pf(vcpu, cr2_or_gpa, gfn))
vcpu             4230 arch/x86/kvm/mmu.c int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
vcpu             4241 arch/x86/kvm/mmu.c 	vcpu->arch.l1tf_flush_l1d = true;
vcpu             4242 arch/x86/kvm/mmu.c 	switch (vcpu->arch.apf.host_apf_reason) {
vcpu             4246 arch/x86/kvm/mmu.c 		if (kvm_event_needs_reinjection(vcpu))
vcpu             4247 arch/x86/kvm/mmu.c 			kvm_mmu_unprotect_page_virt(vcpu, fault_address);
vcpu             4248 arch/x86/kvm/mmu.c 		r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn,
vcpu             4252 arch/x86/kvm/mmu.c 		vcpu->arch.apf.host_apf_reason = 0;
vcpu             4258 arch/x86/kvm/mmu.c 		vcpu->arch.apf.host_apf_reason = 0;
vcpu             4269 arch/x86/kvm/mmu.c check_hugepage_cache_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int level)
vcpu             4275 arch/x86/kvm/mmu.c 	return kvm_mtrr_check_gfn_range_consistency(vcpu, gfn, page_num);
vcpu             4278 arch/x86/kvm/mmu.c static int tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
vcpu             4292 arch/x86/kvm/mmu.c 	MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa));
vcpu             4294 arch/x86/kvm/mmu.c 	if (page_fault_handle_page_track(vcpu, error_code, gfn))
vcpu             4297 arch/x86/kvm/mmu.c 	r = mmu_topup_memory_caches(vcpu);
vcpu             4303 arch/x86/kvm/mmu.c 		!check_hugepage_cache_consistency(vcpu, gfn, PT_DIRECTORY_LEVEL);
vcpu             4304 arch/x86/kvm/mmu.c 	level = mapping_level(vcpu, gfn, &force_pt_level);
vcpu             4307 arch/x86/kvm/mmu.c 		    !check_hugepage_cache_consistency(vcpu, gfn, level))
vcpu             4312 arch/x86/kvm/mmu.c 	if (fast_page_fault(vcpu, gpa, level, error_code))
vcpu             4315 arch/x86/kvm/mmu.c 	mmu_seq = vcpu->kvm->mmu_notifier_seq;
vcpu             4318 arch/x86/kvm/mmu.c 	if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
vcpu             4321 arch/x86/kvm/mmu.c 	if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r))
vcpu             4325 arch/x86/kvm/mmu.c 	spin_lock(&vcpu->kvm->mmu_lock);
vcpu             4326 arch/x86/kvm/mmu.c 	if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
vcpu             4328 arch/x86/kvm/mmu.c 	if (make_mmu_pages_available(vcpu) < 0)
vcpu             4331 arch/x86/kvm/mmu.c 		transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
vcpu             4332 arch/x86/kvm/mmu.c 	r = __direct_map(vcpu, gpa, write, map_writable, level, pfn,
vcpu             4335 arch/x86/kvm/mmu.c 	spin_unlock(&vcpu->kvm->mmu_lock);
vcpu             4340 arch/x86/kvm/mmu.c static void nonpaging_init_context(struct kvm_vcpu *vcpu,
vcpu             4362 arch/x86/kvm/mmu.c static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3,
vcpu             4367 arch/x86/kvm/mmu.c 	struct kvm_mmu *mmu = vcpu->arch.mmu;
vcpu             4387 arch/x86/kvm/mmu.c static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
vcpu             4391 arch/x86/kvm/mmu.c 	struct kvm_mmu *mmu = vcpu->arch.mmu;
vcpu             4400 arch/x86/kvm/mmu.c 		if (mmu_check_root(vcpu, new_cr3 >> PAGE_SHIFT))
vcpu             4403 arch/x86/kvm/mmu.c 		if (cached_root_available(vcpu, new_cr3, new_role)) {
vcpu             4411 arch/x86/kvm/mmu.c 			kvm_make_request(KVM_REQ_LOAD_CR3, vcpu);
vcpu             4413 arch/x86/kvm/mmu.c 				kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
vcpu             4414 arch/x86/kvm/mmu.c 				kvm_x86_ops->tlb_flush(vcpu, true);
vcpu             4424 arch/x86/kvm/mmu.c 			vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
vcpu             4436 arch/x86/kvm/mmu.c static void __kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3,
vcpu             4440 arch/x86/kvm/mmu.c 	if (!fast_cr3_switch(vcpu, new_cr3, new_role, skip_tlb_flush))
vcpu             4441 arch/x86/kvm/mmu.c 		kvm_mmu_free_roots(vcpu, vcpu->arch.mmu,
vcpu             4445 arch/x86/kvm/mmu.c void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush)
vcpu             4447 arch/x86/kvm/mmu.c 	__kvm_mmu_new_cr3(vcpu, new_cr3, kvm_mmu_calc_root_page_role(vcpu),
vcpu             4452 arch/x86/kvm/mmu.c static unsigned long get_cr3(struct kvm_vcpu *vcpu)
vcpu             4454 arch/x86/kvm/mmu.c 	return kvm_read_cr3(vcpu);
vcpu             4457 arch/x86/kvm/mmu.c static void inject_page_fault(struct kvm_vcpu *vcpu,
vcpu             4460 arch/x86/kvm/mmu.c 	vcpu->arch.mmu->inject_page_fault(vcpu, fault);
vcpu             4463 arch/x86/kvm/mmu.c static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
vcpu             4473 arch/x86/kvm/mmu.c 		mark_mmio_spte(vcpu, sptep, gfn, access);
vcpu             4514 arch/x86/kvm/mmu.c __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
vcpu             4603 arch/x86/kvm/mmu.c static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
vcpu             4606 arch/x86/kvm/mmu.c 	__reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check,
vcpu             4607 arch/x86/kvm/mmu.c 				cpuid_maxphyaddr(vcpu), context->root_level,
vcpu             4609 arch/x86/kvm/mmu.c 				guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
vcpu             4610 arch/x86/kvm/mmu.c 				is_pse(vcpu), guest_cpuid_is_amd(vcpu));
vcpu             4650 arch/x86/kvm/mmu.c static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
vcpu             4654 arch/x86/kvm/mmu.c 				    cpuid_maxphyaddr(vcpu), execonly);
vcpu             4663 arch/x86/kvm/mmu.c reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
vcpu             4675 arch/x86/kvm/mmu.c 	__reset_rsvds_bits_mask(vcpu, shadow_zero_check,
vcpu             4678 arch/x86/kvm/mmu.c 				guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
vcpu             4679 arch/x86/kvm/mmu.c 				is_pse(vcpu), true);
vcpu             4703 arch/x86/kvm/mmu.c reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
vcpu             4712 arch/x86/kvm/mmu.c 		__reset_rsvds_bits_mask(vcpu, shadow_zero_check,
vcpu             4736 arch/x86/kvm/mmu.c reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
vcpu             4753 arch/x86/kvm/mmu.c static void update_permission_bitmask(struct kvm_vcpu *vcpu,
vcpu             4762 arch/x86/kvm/mmu.c 	bool cr4_smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP) != 0;
vcpu             4763 arch/x86/kvm/mmu.c 	bool cr4_smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP) != 0;
vcpu             4764 arch/x86/kvm/mmu.c 	bool cr0_wp = is_write_protection(vcpu);
vcpu             4849 arch/x86/kvm/mmu.c static void update_pkru_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
vcpu             4861 arch/x86/kvm/mmu.c 	if (!kvm_read_cr4_bits(vcpu, X86_CR4_PKE) || !is_long_mode(vcpu)) {
vcpu             4866 arch/x86/kvm/mmu.c 	wp = is_write_protection(vcpu);
vcpu             4900 arch/x86/kvm/mmu.c static void update_last_nonleaf_level(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
vcpu             4905 arch/x86/kvm/mmu.c 	if (root_level == PT32_ROOT_LEVEL && is_pse(vcpu))
vcpu             4909 arch/x86/kvm/mmu.c static void paging64_init_context_common(struct kvm_vcpu *vcpu,
vcpu             4913 arch/x86/kvm/mmu.c 	context->nx = is_nx(vcpu);
vcpu             4916 arch/x86/kvm/mmu.c 	reset_rsvds_bits_mask(vcpu, context);
vcpu             4917 arch/x86/kvm/mmu.c 	update_permission_bitmask(vcpu, context, false);
vcpu             4918 arch/x86/kvm/mmu.c 	update_pkru_bitmask(vcpu, context, false);
vcpu             4919 arch/x86/kvm/mmu.c 	update_last_nonleaf_level(vcpu, context);
vcpu             4921 arch/x86/kvm/mmu.c 	MMU_WARN_ON(!is_pae(vcpu));
vcpu             4931 arch/x86/kvm/mmu.c static void paging64_init_context(struct kvm_vcpu *vcpu,
vcpu             4934 arch/x86/kvm/mmu.c 	int root_level = is_la57_mode(vcpu) ?
vcpu             4937 arch/x86/kvm/mmu.c 	paging64_init_context_common(vcpu, context, root_level);
vcpu             4940 arch/x86/kvm/mmu.c static void paging32_init_context(struct kvm_vcpu *vcpu,
vcpu             4946 arch/x86/kvm/mmu.c 	reset_rsvds_bits_mask(vcpu, context);
vcpu             4947 arch/x86/kvm/mmu.c 	update_permission_bitmask(vcpu, context, false);
vcpu             4948 arch/x86/kvm/mmu.c 	update_pkru_bitmask(vcpu, context, false);
vcpu             4949 arch/x86/kvm/mmu.c 	update_last_nonleaf_level(vcpu, context);
vcpu             4960 arch/x86/kvm/mmu.c static void paging32E_init_context(struct kvm_vcpu *vcpu,
vcpu             4963 arch/x86/kvm/mmu.c 	paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
vcpu             4966 arch/x86/kvm/mmu.c static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu)
vcpu             4970 arch/x86/kvm/mmu.c 	ext.cr0_pg = !!is_paging(vcpu);
vcpu             4971 arch/x86/kvm/mmu.c 	ext.cr4_pae = !!is_pae(vcpu);
vcpu             4972 arch/x86/kvm/mmu.c 	ext.cr4_smep = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
vcpu             4973 arch/x86/kvm/mmu.c 	ext.cr4_smap = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
vcpu             4974 arch/x86/kvm/mmu.c 	ext.cr4_pse = !!is_pse(vcpu);
vcpu             4975 arch/x86/kvm/mmu.c 	ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE);
vcpu             4976 arch/x86/kvm/mmu.c 	ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57);
vcpu             4977 arch/x86/kvm/mmu.c 	ext.maxphyaddr = cpuid_maxphyaddr(vcpu);
vcpu             4984 arch/x86/kvm/mmu.c static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
vcpu             4990 arch/x86/kvm/mmu.c 	role.base.nxe = !!is_nx(vcpu);
vcpu             4991 arch/x86/kvm/mmu.c 	role.base.cr0_wp = is_write_protection(vcpu);
vcpu             4992 arch/x86/kvm/mmu.c 	role.base.smm = is_smm(vcpu);
vcpu             4993 arch/x86/kvm/mmu.c 	role.base.guest_mode = is_guest_mode(vcpu);
vcpu             4998 arch/x86/kvm/mmu.c 	role.ext = kvm_calc_mmu_role_ext(vcpu);
vcpu             5004 arch/x86/kvm/mmu.c kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
vcpu             5006 arch/x86/kvm/mmu.c 	union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
vcpu             5009 arch/x86/kvm/mmu.c 	role.base.level = kvm_x86_ops->get_tdp_level(vcpu);
vcpu             5016 arch/x86/kvm/mmu.c static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
vcpu             5018 arch/x86/kvm/mmu.c 	struct kvm_mmu *context = vcpu->arch.mmu;
vcpu             5020 arch/x86/kvm/mmu.c 		kvm_calc_tdp_mmu_root_page_role(vcpu, false);
vcpu             5031 arch/x86/kvm/mmu.c 	context->shadow_root_level = kvm_x86_ops->get_tdp_level(vcpu);
vcpu             5038 arch/x86/kvm/mmu.c 	if (!is_paging(vcpu)) {
vcpu             5042 arch/x86/kvm/mmu.c 	} else if (is_long_mode(vcpu)) {
vcpu             5043 arch/x86/kvm/mmu.c 		context->nx = is_nx(vcpu);
vcpu             5044 arch/x86/kvm/mmu.c 		context->root_level = is_la57_mode(vcpu) ?
vcpu             5046 arch/x86/kvm/mmu.c 		reset_rsvds_bits_mask(vcpu, context);
vcpu             5048 arch/x86/kvm/mmu.c 	} else if (is_pae(vcpu)) {
vcpu             5049 arch/x86/kvm/mmu.c 		context->nx = is_nx(vcpu);
vcpu             5051 arch/x86/kvm/mmu.c 		reset_rsvds_bits_mask(vcpu, context);
vcpu             5056 arch/x86/kvm/mmu.c 		reset_rsvds_bits_mask(vcpu, context);
vcpu             5060 arch/x86/kvm/mmu.c 	update_permission_bitmask(vcpu, context, false);
vcpu             5061 arch/x86/kvm/mmu.c 	update_pkru_bitmask(vcpu, context, false);
vcpu             5062 arch/x86/kvm/mmu.c 	update_last_nonleaf_level(vcpu, context);
vcpu             5063 arch/x86/kvm/mmu.c 	reset_tdp_shadow_zero_bits_mask(vcpu, context);
vcpu             5067 arch/x86/kvm/mmu.c kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
vcpu             5069 arch/x86/kvm/mmu.c 	union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
vcpu             5072 arch/x86/kvm/mmu.c 		!is_write_protection(vcpu);
vcpu             5074 arch/x86/kvm/mmu.c 		!is_write_protection(vcpu);
vcpu             5075 arch/x86/kvm/mmu.c 	role.base.direct = !is_paging(vcpu);
vcpu             5076 arch/x86/kvm/mmu.c 	role.base.gpte_is_8_bytes = !!is_pae(vcpu);
vcpu             5078 arch/x86/kvm/mmu.c 	if (!is_long_mode(vcpu))
vcpu             5080 arch/x86/kvm/mmu.c 	else if (is_la57_mode(vcpu))
vcpu             5088 arch/x86/kvm/mmu.c void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
vcpu             5090 arch/x86/kvm/mmu.c 	struct kvm_mmu *context = vcpu->arch.mmu;
vcpu             5092 arch/x86/kvm/mmu.c 		kvm_calc_shadow_mmu_root_page_role(vcpu, false);
vcpu             5098 arch/x86/kvm/mmu.c 	if (!is_paging(vcpu))
vcpu             5099 arch/x86/kvm/mmu.c 		nonpaging_init_context(vcpu, context);
vcpu             5100 arch/x86/kvm/mmu.c 	else if (is_long_mode(vcpu))
vcpu             5101 arch/x86/kvm/mmu.c 		paging64_init_context(vcpu, context);
vcpu             5102 arch/x86/kvm/mmu.c 	else if (is_pae(vcpu))
vcpu             5103 arch/x86/kvm/mmu.c 		paging32E_init_context(vcpu, context);
vcpu             5105 arch/x86/kvm/mmu.c 		paging32_init_context(vcpu, context);
vcpu             5108 arch/x86/kvm/mmu.c 	reset_shadow_zero_bits_mask(vcpu, context);
vcpu             5113 arch/x86/kvm/mmu.c kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
vcpu             5119 arch/x86/kvm/mmu.c 	role.base.smm = vcpu->arch.root_mmu.mmu_role.base.smm;
vcpu             5135 arch/x86/kvm/mmu.c 	role.ext = kvm_calc_mmu_role_ext(vcpu);
vcpu             5141 arch/x86/kvm/mmu.c void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
vcpu             5144 arch/x86/kvm/mmu.c 	struct kvm_mmu *context = vcpu->arch.mmu;
vcpu             5146 arch/x86/kvm/mmu.c 		kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
vcpu             5149 arch/x86/kvm/mmu.c 	__kvm_mmu_new_cr3(vcpu, new_eptp, new_role.base, false);
vcpu             5168 arch/x86/kvm/mmu.c 	update_permission_bitmask(vcpu, context, true);
vcpu             5169 arch/x86/kvm/mmu.c 	update_pkru_bitmask(vcpu, context, true);
vcpu             5170 arch/x86/kvm/mmu.c 	update_last_nonleaf_level(vcpu, context);
vcpu             5171 arch/x86/kvm/mmu.c 	reset_rsvds_bits_mask_ept(vcpu, context, execonly);
vcpu             5172 arch/x86/kvm/mmu.c 	reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
vcpu             5176 arch/x86/kvm/mmu.c static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
vcpu             5178 arch/x86/kvm/mmu.c 	struct kvm_mmu *context = vcpu->arch.mmu;
vcpu             5180 arch/x86/kvm/mmu.c 	kvm_init_shadow_mmu(vcpu);
vcpu             5187 arch/x86/kvm/mmu.c static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
vcpu             5189 arch/x86/kvm/mmu.c 	union kvm_mmu_role new_role = kvm_calc_mmu_role_common(vcpu, false);
vcpu             5190 arch/x86/kvm/mmu.c 	struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
vcpu             5209 arch/x86/kvm/mmu.c 	if (!is_paging(vcpu)) {
vcpu             5213 arch/x86/kvm/mmu.c 	} else if (is_long_mode(vcpu)) {
vcpu             5214 arch/x86/kvm/mmu.c 		g_context->nx = is_nx(vcpu);
vcpu             5215 arch/x86/kvm/mmu.c 		g_context->root_level = is_la57_mode(vcpu) ?
vcpu             5217 arch/x86/kvm/mmu.c 		reset_rsvds_bits_mask(vcpu, g_context);
vcpu             5219 arch/x86/kvm/mmu.c 	} else if (is_pae(vcpu)) {
vcpu             5220 arch/x86/kvm/mmu.c 		g_context->nx = is_nx(vcpu);
vcpu             5222 arch/x86/kvm/mmu.c 		reset_rsvds_bits_mask(vcpu, g_context);
vcpu             5227 arch/x86/kvm/mmu.c 		reset_rsvds_bits_mask(vcpu, g_context);
vcpu             5231 arch/x86/kvm/mmu.c 	update_permission_bitmask(vcpu, g_context, false);
vcpu             5232 arch/x86/kvm/mmu.c 	update_pkru_bitmask(vcpu, g_context, false);
vcpu             5233 arch/x86/kvm/mmu.c 	update_last_nonleaf_level(vcpu, g_context);
vcpu             5236 arch/x86/kvm/mmu.c void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots)
vcpu             5241 arch/x86/kvm/mmu.c 		vcpu->arch.mmu->root_hpa = INVALID_PAGE;
vcpu             5244 arch/x86/kvm/mmu.c 			vcpu->arch.mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
vcpu             5247 arch/x86/kvm/mmu.c 	if (mmu_is_nested(vcpu))
vcpu             5248 arch/x86/kvm/mmu.c 		init_kvm_nested_mmu(vcpu);
vcpu             5250 arch/x86/kvm/mmu.c 		init_kvm_tdp_mmu(vcpu);
vcpu             5252 arch/x86/kvm/mmu.c 		init_kvm_softmmu(vcpu);
vcpu             5257 arch/x86/kvm/mmu.c kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu)
vcpu             5262 arch/x86/kvm/mmu.c 		role = kvm_calc_tdp_mmu_root_page_role(vcpu, true);
vcpu             5264 arch/x86/kvm/mmu.c 		role = kvm_calc_shadow_mmu_root_page_role(vcpu, true);
vcpu             5269 arch/x86/kvm/mmu.c void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
vcpu             5271 arch/x86/kvm/mmu.c 	kvm_mmu_unload(vcpu);
vcpu             5272 arch/x86/kvm/mmu.c 	kvm_init_mmu(vcpu, true);
vcpu             5276 arch/x86/kvm/mmu.c int kvm_mmu_load(struct kvm_vcpu *vcpu)
vcpu             5280 arch/x86/kvm/mmu.c 	r = mmu_topup_memory_caches(vcpu);
vcpu             5283 arch/x86/kvm/mmu.c 	r = mmu_alloc_roots(vcpu);
vcpu             5284 arch/x86/kvm/mmu.c 	kvm_mmu_sync_roots(vcpu);
vcpu             5287 arch/x86/kvm/mmu.c 	kvm_mmu_load_cr3(vcpu);
vcpu             5288 arch/x86/kvm/mmu.c 	kvm_x86_ops->tlb_flush(vcpu, true);
vcpu             5294 arch/x86/kvm/mmu.c void kvm_mmu_unload(struct kvm_vcpu *vcpu)
vcpu             5296 arch/x86/kvm/mmu.c 	kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL);
vcpu             5297 arch/x86/kvm/mmu.c 	WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root_hpa));
vcpu             5298 arch/x86/kvm/mmu.c 	kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
vcpu             5299 arch/x86/kvm/mmu.c 	WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root_hpa));
vcpu             5303 arch/x86/kvm/mmu.c static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
vcpu             5308 arch/x86/kvm/mmu.c 		++vcpu->kvm->stat.mmu_pde_zapped;
vcpu             5312 arch/x86/kvm/mmu.c 	++vcpu->kvm->stat.mmu_pte_updated;
vcpu             5313 arch/x86/kvm/mmu.c 	vcpu->arch.mmu->update_pte(vcpu, sp, spte, new);
vcpu             5329 arch/x86/kvm/mmu.c static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
vcpu             5340 arch/x86/kvm/mmu.c 	if (is_pae(vcpu) && *bytes == 4) {
vcpu             5347 arch/x86/kvm/mmu.c 		r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes);
vcpu             5431 arch/x86/kvm/mmu.c static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
vcpu             5446 arch/x86/kvm/mmu.c 	if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
vcpu             5458 arch/x86/kvm/mmu.c 	mmu_topup_memory_caches(vcpu);
vcpu             5460 arch/x86/kvm/mmu.c 	spin_lock(&vcpu->kvm->mmu_lock);
vcpu             5462 arch/x86/kvm/mmu.c 	gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);
vcpu             5464 arch/x86/kvm/mmu.c 	++vcpu->kvm->stat.mmu_pte_write;
vcpu             5465 arch/x86/kvm/mmu.c 	kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
vcpu             5467 arch/x86/kvm/mmu.c 	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
vcpu             5470 arch/x86/kvm/mmu.c 			kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
vcpu             5471 arch/x86/kvm/mmu.c 			++vcpu->kvm->stat.mmu_flooded;
vcpu             5481 arch/x86/kvm/mmu.c 			u32 base_role = vcpu->arch.mmu->mmu_role.base.word;
vcpu             5484 arch/x86/kvm/mmu.c 			mmu_page_zap_pte(vcpu->kvm, sp, spte);
vcpu             5487 arch/x86/kvm/mmu.c 			      & mmu_base_role_mask.word) && rmap_can_add(vcpu))
vcpu             5488 arch/x86/kvm/mmu.c 				mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
vcpu             5494 arch/x86/kvm/mmu.c 	kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush);
vcpu             5495 arch/x86/kvm/mmu.c 	kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
vcpu             5496 arch/x86/kvm/mmu.c 	spin_unlock(&vcpu->kvm->mmu_lock);
vcpu             5499 arch/x86/kvm/mmu.c int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
vcpu             5504 arch/x86/kvm/mmu.c 	if (vcpu->arch.mmu->direct_map)
vcpu             5507 arch/x86/kvm/mmu.c 	gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
vcpu             5509 arch/x86/kvm/mmu.c 	r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
vcpu             5515 arch/x86/kvm/mmu.c static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
vcpu             5519 arch/x86/kvm/mmu.c 	if (likely(kvm_mmu_available_pages(vcpu->kvm) >= KVM_MIN_FREE_MMU_PAGES))
vcpu             5522 arch/x86/kvm/mmu.c 	while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES) {
vcpu             5523 arch/x86/kvm/mmu.c 		if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list))
vcpu             5526 arch/x86/kvm/mmu.c 		++vcpu->kvm->stat.mmu_recycled;
vcpu             5528 arch/x86/kvm/mmu.c 	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
vcpu             5530 arch/x86/kvm/mmu.c 	if (!kvm_mmu_available_pages(vcpu->kvm))
vcpu             5535 arch/x86/kvm/mmu.c int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
vcpu             5539 arch/x86/kvm/mmu.c 	bool direct = vcpu->arch.mmu->direct_map;
vcpu             5542 arch/x86/kvm/mmu.c 	if (vcpu->arch.mmu->direct_map) {
vcpu             5543 arch/x86/kvm/mmu.c 		vcpu->arch.gpa_available = true;
vcpu             5544 arch/x86/kvm/mmu.c 		vcpu->arch.gpa_val = cr2_or_gpa;
vcpu             5549 arch/x86/kvm/mmu.c 		r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct);
vcpu             5555 arch/x86/kvm/mmu.c 		r = vcpu->arch.mmu->page_fault(vcpu, cr2_or_gpa,
vcpu             5573 arch/x86/kvm/mmu.c 	if (vcpu->arch.mmu->direct_map &&
vcpu             5575 arch/x86/kvm/mmu.c 		kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa));
vcpu             5590 arch/x86/kvm/mmu.c 	if (!mmio_info_in_cache(vcpu, cr2_or_gpa, direct) && !is_guest_mode(vcpu))
vcpu             5601 arch/x86/kvm/mmu.c 		if (!kvm_x86_ops->need_emulation_on_page_fault(vcpu))
vcpu             5605 arch/x86/kvm/mmu.c 	return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn,
vcpu             5610 arch/x86/kvm/mmu.c void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
vcpu             5612 arch/x86/kvm/mmu.c 	struct kvm_mmu *mmu = vcpu->arch.mmu;
vcpu             5616 arch/x86/kvm/mmu.c 	if (is_noncanonical_address(gva, vcpu))
vcpu             5619 arch/x86/kvm/mmu.c 	mmu->invlpg(vcpu, gva, mmu->root_hpa);
vcpu             5634 arch/x86/kvm/mmu.c 			mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
vcpu             5636 arch/x86/kvm/mmu.c 	kvm_x86_ops->tlb_flush_gva(vcpu, gva);
vcpu             5637 arch/x86/kvm/mmu.c 	++vcpu->stat.invlpg;
vcpu             5641 arch/x86/kvm/mmu.c void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
vcpu             5643 arch/x86/kvm/mmu.c 	struct kvm_mmu *mmu = vcpu->arch.mmu;
vcpu             5647 arch/x86/kvm/mmu.c 	if (pcid == kvm_get_active_pcid(vcpu)) {
vcpu             5648 arch/x86/kvm/mmu.c 		mmu->invlpg(vcpu, gva, mmu->root_hpa);
vcpu             5654 arch/x86/kvm/mmu.c 		    pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].cr3)) {
vcpu             5655 arch/x86/kvm/mmu.c 			mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
vcpu             5661 arch/x86/kvm/mmu.c 		kvm_x86_ops->tlb_flush_gva(vcpu, gva);
vcpu             5663 arch/x86/kvm/mmu.c 	++vcpu->stat.invlpg;
vcpu             5764 arch/x86/kvm/mmu.c static int alloc_mmu_pages(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
vcpu             5778 arch/x86/kvm/mmu.c 	if (tdp_enabled && kvm_x86_ops->get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
vcpu             5792 arch/x86/kvm/mmu.c int kvm_mmu_create(struct kvm_vcpu *vcpu)
vcpu             5797 arch/x86/kvm/mmu.c 	vcpu->arch.mmu = &vcpu->arch.root_mmu;
vcpu             5798 arch/x86/kvm/mmu.c 	vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
vcpu             5800 arch/x86/kvm/mmu.c 	vcpu->arch.root_mmu.root_hpa = INVALID_PAGE;
vcpu             5801 arch/x86/kvm/mmu.c 	vcpu->arch.root_mmu.root_cr3 = 0;
vcpu             5802 arch/x86/kvm/mmu.c 	vcpu->arch.root_mmu.translate_gpa = translate_gpa;
vcpu             5804 arch/x86/kvm/mmu.c 		vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
vcpu             5806 arch/x86/kvm/mmu.c 	vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE;
vcpu             5807 arch/x86/kvm/mmu.c 	vcpu->arch.guest_mmu.root_cr3 = 0;
vcpu             5808 arch/x86/kvm/mmu.c 	vcpu->arch.guest_mmu.translate_gpa = translate_gpa;
vcpu             5810 arch/x86/kvm/mmu.c 		vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
vcpu             5812 arch/x86/kvm/mmu.c 	vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
vcpu             5814 arch/x86/kvm/mmu.c 	ret = alloc_mmu_pages(vcpu, &vcpu->arch.guest_mmu);
vcpu             5818 arch/x86/kvm/mmu.c 	ret = alloc_mmu_pages(vcpu, &vcpu->arch.root_mmu);
vcpu             5824 arch/x86/kvm/mmu.c 	free_mmu_pages(&vcpu->arch.guest_mmu);
vcpu             6382 arch/x86/kvm/mmu.c void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
vcpu             6384 arch/x86/kvm/mmu.c 	kvm_mmu_unload(vcpu);
vcpu             6385 arch/x86/kvm/mmu.c 	free_mmu_pages(&vcpu->arch.root_mmu);
vcpu             6386 arch/x86/kvm/mmu.c 	free_mmu_pages(&vcpu->arch.guest_mmu);
vcpu             6387 arch/x86/kvm/mmu.c 	mmu_free_memory_caches(vcpu);
vcpu               57 arch/x86/kvm/mmu.h reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
vcpu               59 arch/x86/kvm/mmu.h void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots);
vcpu               60 arch/x86/kvm/mmu.h void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
vcpu               61 arch/x86/kvm/mmu.h void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
vcpu               63 arch/x86/kvm/mmu.h bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
vcpu               64 arch/x86/kvm/mmu.h int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
vcpu               76 arch/x86/kvm/mmu.h static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
vcpu               78 arch/x86/kvm/mmu.h 	if (likely(vcpu->arch.mmu->root_hpa != INVALID_PAGE))
vcpu               81 arch/x86/kvm/mmu.h 	return kvm_mmu_load(vcpu);
vcpu               84 arch/x86/kvm/mmu.h static inline unsigned long kvm_get_pcid(struct kvm_vcpu *vcpu, gpa_t cr3)
vcpu               88 arch/x86/kvm/mmu.h 	return kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)
vcpu               93 arch/x86/kvm/mmu.h static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
vcpu               95 arch/x86/kvm/mmu.h 	return kvm_get_pcid(vcpu, kvm_read_cr3(vcpu));
vcpu               98 arch/x86/kvm/mmu.h static inline void kvm_mmu_load_cr3(struct kvm_vcpu *vcpu)
vcpu              100 arch/x86/kvm/mmu.h 	if (VALID_PAGE(vcpu->arch.mmu->root_hpa))
vcpu              101 arch/x86/kvm/mmu.h 		vcpu->arch.mmu->set_cr3(vcpu, vcpu->arch.mmu->root_hpa |
vcpu              102 arch/x86/kvm/mmu.h 					      kvm_get_active_pcid(vcpu));
vcpu              143 arch/x86/kvm/mmu.h static inline bool is_write_protection(struct kvm_vcpu *vcpu)
vcpu              145 arch/x86/kvm/mmu.h 	return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
vcpu              156 arch/x86/kvm/mmu.h static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
vcpu              160 arch/x86/kvm/mmu.h 	int cpl = kvm_x86_ops->get_cpl(vcpu);
vcpu              161 arch/x86/kvm/mmu.h 	unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
vcpu              192 arch/x86/kvm/mmu.h 		pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
vcpu              212 arch/x86/kvm/mmu.h int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
vcpu               32 arch/x86/kvm/mmu_audit.c typedef void (*inspect_spte_fn) (struct kvm_vcpu *vcpu, u64 *sptep, int level);
vcpu               34 arch/x86/kvm/mmu_audit.c static void __mmu_spte_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
vcpu               42 arch/x86/kvm/mmu_audit.c 		fn(vcpu, ent + i, level);
vcpu               49 arch/x86/kvm/mmu_audit.c 			__mmu_spte_walk(vcpu, child, fn, level - 1);
vcpu               54 arch/x86/kvm/mmu_audit.c static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
vcpu               59 arch/x86/kvm/mmu_audit.c 	if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
vcpu               62 arch/x86/kvm/mmu_audit.c 	if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
vcpu               63 arch/x86/kvm/mmu_audit.c 		hpa_t root = vcpu->arch.mmu->root_hpa;
vcpu               66 arch/x86/kvm/mmu_audit.c 		__mmu_spte_walk(vcpu, sp, fn, vcpu->arch.mmu->root_level);
vcpu               71 arch/x86/kvm/mmu_audit.c 		hpa_t root = vcpu->arch.mmu->pae_root[i];
vcpu               76 arch/x86/kvm/mmu_audit.c 			__mmu_spte_walk(vcpu, sp, fn, 2);
vcpu               93 arch/x86/kvm/mmu_audit.c static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level)
vcpu              104 arch/x86/kvm/mmu_audit.c 			audit_printk(vcpu->kvm, "unsync sp: %p "
vcpu              114 arch/x86/kvm/mmu_audit.c 	pfn = kvm_vcpu_gfn_to_pfn_atomic(vcpu, gfn);
vcpu              121 arch/x86/kvm/mmu_audit.c 		audit_printk(vcpu->kvm, "levels %d pfn %llx hpa %llx "
vcpu              122 arch/x86/kvm/mmu_audit.c 			     "ent %llxn", vcpu->arch.mmu->root_level, pfn,
vcpu              160 arch/x86/kvm/mmu_audit.c static void audit_sptes_have_rmaps(struct kvm_vcpu *vcpu, u64 *sptep, int level)
vcpu              163 arch/x86/kvm/mmu_audit.c 		inspect_spte_has_rmap(vcpu->kvm, sptep);
vcpu              166 arch/x86/kvm/mmu_audit.c static void audit_spte_after_sync(struct kvm_vcpu *vcpu, u64 *sptep, int level)
vcpu              170 arch/x86/kvm/mmu_audit.c 	if (vcpu->kvm->arch.audit_point == AUDIT_POST_SYNC && sp->unsync)
vcpu              171 arch/x86/kvm/mmu_audit.c 		audit_printk(vcpu->kvm, "meet unsync sp(%p) after sync "
vcpu              224 arch/x86/kvm/mmu_audit.c static void audit_spte(struct kvm_vcpu *vcpu, u64 *sptep, int level)
vcpu              226 arch/x86/kvm/mmu_audit.c 	audit_sptes_have_rmaps(vcpu, sptep, level);
vcpu              227 arch/x86/kvm/mmu_audit.c 	audit_mappings(vcpu, sptep, level);
vcpu              228 arch/x86/kvm/mmu_audit.c 	audit_spte_after_sync(vcpu, sptep, level);
vcpu              231 arch/x86/kvm/mmu_audit.c static void audit_vcpu_spte(struct kvm_vcpu *vcpu)
vcpu              233 arch/x86/kvm/mmu_audit.c 	mmu_spte_walk(vcpu, audit_spte);
vcpu              239 arch/x86/kvm/mmu_audit.c static void __kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
vcpu              246 arch/x86/kvm/mmu_audit.c 	vcpu->kvm->arch.audit_point = point;
vcpu              247 arch/x86/kvm/mmu_audit.c 	audit_all_active_sps(vcpu->kvm);
vcpu              248 arch/x86/kvm/mmu_audit.c 	audit_vcpu_spte(vcpu);
vcpu              251 arch/x86/kvm/mmu_audit.c static inline void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
vcpu              254 arch/x86/kvm/mmu_audit.c 		__kvm_mmu_audit(vcpu, point);
vcpu              252 arch/x86/kvm/mmutrace.h 	TP_PROTO(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 error_code,
vcpu              254 arch/x86/kvm/mmutrace.h 	TP_ARGS(vcpu, cr2_or_gpa, error_code, sptep, old_spte, retry),
vcpu              267 arch/x86/kvm/mmutrace.h 		__entry->vcpu_id = vcpu->vcpu_id;
vcpu               54 arch/x86/kvm/mtrr.c bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
vcpu               78 arch/x86/kvm/mtrr.c 	mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
vcpu               88 arch/x86/kvm/mtrr.c 		kvm_inject_gp(vcpu, 0);
vcpu              111 arch/x86/kvm/mtrr.c static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu)
vcpu              123 arch/x86/kvm/mtrr.c 	if (guest_cpuid_has(vcpu, X86_FEATURE_MTRR))
vcpu              310 arch/x86/kvm/mtrr.c static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr)
vcpu              312 arch/x86/kvm/mtrr.c 	struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
vcpu              317 arch/x86/kvm/mtrr.c 	      !kvm_arch_has_noncoherent_dma(vcpu->kvm))
vcpu              336 arch/x86/kvm/mtrr.c 	kvm_zap_gfn_range(vcpu->kvm, gpa_to_gfn(start), gpa_to_gfn(end));
vcpu              344 arch/x86/kvm/mtrr.c static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
vcpu              346 arch/x86/kvm/mtrr.c 	struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
vcpu              365 arch/x86/kvm/mtrr.c 		cur->mask = data | (-1LL << cpuid_maxphyaddr(vcpu));
vcpu              376 arch/x86/kvm/mtrr.c int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
vcpu              380 arch/x86/kvm/mtrr.c 	if (!kvm_mtrr_valid(vcpu, msr, data))
vcpu              385 arch/x86/kvm/mtrr.c 		*(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index] = data;
vcpu              387 arch/x86/kvm/mtrr.c 		vcpu->arch.mtrr_state.deftype = data;
vcpu              389 arch/x86/kvm/mtrr.c 		vcpu->arch.pat = data;
vcpu              391 arch/x86/kvm/mtrr.c 		set_var_mtrr_msr(vcpu, msr, data);
vcpu              393 arch/x86/kvm/mtrr.c 	update_mtrr(vcpu, msr);
vcpu              397 arch/x86/kvm/mtrr.c int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
vcpu              418 arch/x86/kvm/mtrr.c 		*pdata = *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index];
vcpu              420 arch/x86/kvm/mtrr.c 		*pdata = vcpu->arch.mtrr_state.deftype;
vcpu              422 arch/x86/kvm/mtrr.c 		*pdata = vcpu->arch.pat;
vcpu              429 arch/x86/kvm/mtrr.c 			*pdata = vcpu->arch.mtrr_state.var_ranges[index].base;
vcpu              431 arch/x86/kvm/mtrr.c 			*pdata = vcpu->arch.mtrr_state.var_ranges[index].mask;
vcpu              433 arch/x86/kvm/mtrr.c 		*pdata &= (1ULL << cpuid_maxphyaddr(vcpu)) - 1;
vcpu              439 arch/x86/kvm/mtrr.c void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu)
vcpu              441 arch/x86/kvm/mtrr.c 	INIT_LIST_HEAD(&vcpu->arch.mtrr_state.head);
vcpu              619 arch/x86/kvm/mtrr.c u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
vcpu              621 arch/x86/kvm/mtrr.c 	struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
vcpu              679 arch/x86/kvm/mtrr.c 		return mtrr_disabled_type(vcpu);
vcpu              695 arch/x86/kvm/mtrr.c bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
vcpu              698 arch/x86/kvm/mtrr.c 	struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
vcpu              145 arch/x86/kvm/page_track.c bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
vcpu              154 arch/x86/kvm/page_track.c 	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
vcpu              223 arch/x86/kvm/page_track.c void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
vcpu              230 arch/x86/kvm/page_track.c 	head = &vcpu->kvm->arch.track_notifier_head;
vcpu              238 arch/x86/kvm/page_track.c 			n->track_write(vcpu, gpa, new, bytes, n);
vcpu              131 arch/x86/kvm/paging_tmpl.h static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
vcpu              174 arch/x86/kvm/paging_tmpl.h static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
vcpu              178 arch/x86/kvm/paging_tmpl.h 	if (is_rsvd_bits_set(vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
vcpu              185 arch/x86/kvm/paging_tmpl.h 	if (PT_HAVE_ACCESSED_DIRTY(vcpu->arch.mmu) &&
vcpu              192 arch/x86/kvm/paging_tmpl.h 	drop_spte(vcpu->kvm, spte);
vcpu              220 arch/x86/kvm/paging_tmpl.h static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
vcpu              248 arch/x86/kvm/paging_tmpl.h 			if (kvm_arch_write_log_dirty(vcpu))
vcpu              272 arch/x86/kvm/paging_tmpl.h 		ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte);
vcpu              276 arch/x86/kvm/paging_tmpl.h 		kvm_vcpu_mark_page_dirty(vcpu, table_gfn);
vcpu              282 arch/x86/kvm/paging_tmpl.h static inline unsigned FNAME(gpte_pkeys)(struct kvm_vcpu *vcpu, u64 gpte)
vcpu              297 arch/x86/kvm/paging_tmpl.h 				    struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
vcpu              321 arch/x86/kvm/paging_tmpl.h 	pte           = mmu->get_cr3(vcpu);
vcpu              327 arch/x86/kvm/paging_tmpl.h 		pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3);
vcpu              335 arch/x86/kvm/paging_tmpl.h 	ASSERT(!(is_long_mode(vcpu) && !is_pae(vcpu)));
vcpu              363 arch/x86/kvm/paging_tmpl.h 		real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
vcpu              382 arch/x86/kvm/paging_tmpl.h 		host_addr = kvm_vcpu_gfn_to_hva_prot(vcpu, real_gfn,
vcpu              411 arch/x86/kvm/paging_tmpl.h 	pte_pkey = FNAME(gpte_pkeys)(vcpu, pte);
vcpu              417 arch/x86/kvm/paging_tmpl.h 	errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access);
vcpu              427 arch/x86/kvm/paging_tmpl.h 	real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access, &walker->fault);
vcpu              445 arch/x86/kvm/paging_tmpl.h 		ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault);
vcpu              459 arch/x86/kvm/paging_tmpl.h 			    kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)))
vcpu              481 arch/x86/kvm/paging_tmpl.h 		vcpu->arch.exit_qualification &= 0x180;
vcpu              483 arch/x86/kvm/paging_tmpl.h 			vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_WRITE;
vcpu              485 arch/x86/kvm/paging_tmpl.h 			vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_READ;
vcpu              487 arch/x86/kvm/paging_tmpl.h 			vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_INSTR;
vcpu              488 arch/x86/kvm/paging_tmpl.h 		vcpu->arch.exit_qualification |= (pte_access & 0x7) << 3;
vcpu              492 arch/x86/kvm/paging_tmpl.h 	walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
vcpu              499 arch/x86/kvm/paging_tmpl.h 			    struct kvm_vcpu *vcpu, gpa_t addr, u32 access)
vcpu              501 arch/x86/kvm/paging_tmpl.h 	return FNAME(walk_addr_generic)(walker, vcpu, vcpu->arch.mmu, addr,
vcpu              507 arch/x86/kvm/paging_tmpl.h 				   struct kvm_vcpu *vcpu, gva_t addr,
vcpu              510 arch/x86/kvm/paging_tmpl.h 	return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu,
vcpu              516 arch/x86/kvm/paging_tmpl.h FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
vcpu              523 arch/x86/kvm/paging_tmpl.h 	if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
vcpu              530 arch/x86/kvm/paging_tmpl.h 	FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
vcpu              531 arch/x86/kvm/paging_tmpl.h 	pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
vcpu              540 arch/x86/kvm/paging_tmpl.h 	mmu_set_spte(vcpu, spte, pte_access, 0, PT_PAGE_TABLE_LEVEL, gfn, pfn,
vcpu              547 arch/x86/kvm/paging_tmpl.h static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
vcpu              552 arch/x86/kvm/paging_tmpl.h 	FNAME(prefetch_gpte)(vcpu, sp, spte, gpte, false);
vcpu              555 arch/x86/kvm/paging_tmpl.h static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,
vcpu              568 arch/x86/kvm/paging_tmpl.h 		r = kvm_vcpu_read_guest_atomic(vcpu, base_gpa,
vcpu              572 arch/x86/kvm/paging_tmpl.h 		r = kvm_vcpu_read_guest_atomic(vcpu, pte_gpa,
vcpu              578 arch/x86/kvm/paging_tmpl.h static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
vcpu              592 arch/x86/kvm/paging_tmpl.h 		return __direct_pte_prefetch(vcpu, sp, sptep);
vcpu              604 arch/x86/kvm/paging_tmpl.h 		if (!FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i], true))
vcpu              614 arch/x86/kvm/paging_tmpl.h static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
vcpu              628 arch/x86/kvm/paging_tmpl.h 	top_level = vcpu->arch.mmu->root_level;
vcpu              637 arch/x86/kvm/paging_tmpl.h 	if (FNAME(gpte_changed)(vcpu, gw, top_level))
vcpu              640 arch/x86/kvm/paging_tmpl.h 	if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
vcpu              643 arch/x86/kvm/paging_tmpl.h 	for (shadow_walk_init(&it, vcpu, addr);
vcpu              649 arch/x86/kvm/paging_tmpl.h 		drop_large_spte(vcpu, it.sptep);
vcpu              654 arch/x86/kvm/paging_tmpl.h 			sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
vcpu              662 arch/x86/kvm/paging_tmpl.h 		if (FNAME(gpte_changed)(vcpu, gw, it.level - 1))
vcpu              666 arch/x86/kvm/paging_tmpl.h 			link_shadow_page(vcpu, it.sptep, sp);
vcpu              691 arch/x86/kvm/paging_tmpl.h 		validate_direct_spte(vcpu, it.sptep, direct_access);
vcpu              693 arch/x86/kvm/paging_tmpl.h 		drop_large_spte(vcpu, it.sptep);
vcpu              696 arch/x86/kvm/paging_tmpl.h 			sp = kvm_mmu_get_page(vcpu, base_gfn, addr,
vcpu              698 arch/x86/kvm/paging_tmpl.h 			link_shadow_page(vcpu, it.sptep, sp);
vcpu              700 arch/x86/kvm/paging_tmpl.h 				account_huge_nx_page(vcpu->kvm, sp);
vcpu              704 arch/x86/kvm/paging_tmpl.h 	ret = mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault,
vcpu              706 arch/x86/kvm/paging_tmpl.h 	FNAME(pte_prefetch)(vcpu, gw, it.sptep);
vcpu              707 arch/x86/kvm/paging_tmpl.h 	++vcpu->stat.pf_fixed;
vcpu              732 arch/x86/kvm/paging_tmpl.h FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
vcpu              741 arch/x86/kvm/paging_tmpl.h 	      (!is_write_protection(vcpu) && !user_fault)))
vcpu              768 arch/x86/kvm/paging_tmpl.h static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
vcpu              785 arch/x86/kvm/paging_tmpl.h 	r = mmu_topup_memory_caches(vcpu);
vcpu              798 arch/x86/kvm/paging_tmpl.h 	r = FNAME(walk_addr)(&walker, vcpu, addr, error_code);
vcpu              806 arch/x86/kvm/paging_tmpl.h 			inject_page_fault(vcpu, &walker.fault);
vcpu              811 arch/x86/kvm/paging_tmpl.h 	if (page_fault_handle_page_track(vcpu, error_code, walker.gfn)) {
vcpu              812 arch/x86/kvm/paging_tmpl.h 		shadow_page_table_clear_flood(vcpu, addr);
vcpu              816 arch/x86/kvm/paging_tmpl.h 	vcpu->arch.write_fault_to_shadow_pgtable = false;
vcpu              818 arch/x86/kvm/paging_tmpl.h 	is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu,
vcpu              819 arch/x86/kvm/paging_tmpl.h 	      &walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable);
vcpu              822 arch/x86/kvm/paging_tmpl.h 		level = mapping_level(vcpu, walker.gfn, &force_pt_level);
vcpu              830 arch/x86/kvm/paging_tmpl.h 	mmu_seq = vcpu->kvm->mmu_notifier_seq;
vcpu              833 arch/x86/kvm/paging_tmpl.h 	if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault,
vcpu              837 arch/x86/kvm/paging_tmpl.h 	if (handle_abnormal_pfn(vcpu, addr, walker.gfn, pfn, walker.pte_access, &r))
vcpu              845 arch/x86/kvm/paging_tmpl.h 	     !is_write_protection(vcpu) && !user_fault &&
vcpu              856 arch/x86/kvm/paging_tmpl.h 		if (kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
vcpu              861 arch/x86/kvm/paging_tmpl.h 	spin_lock(&vcpu->kvm->mmu_lock);
vcpu              862 arch/x86/kvm/paging_tmpl.h 	if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
vcpu              865 arch/x86/kvm/paging_tmpl.h 	kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
vcpu              866 arch/x86/kvm/paging_tmpl.h 	if (make_mmu_pages_available(vcpu) < 0)
vcpu              869 arch/x86/kvm/paging_tmpl.h 		transparent_hugepage_adjust(vcpu, walker.gfn, &pfn, &level);
vcpu              870 arch/x86/kvm/paging_tmpl.h 	r = FNAME(fetch)(vcpu, addr, &walker, write_fault,
vcpu              872 arch/x86/kvm/paging_tmpl.h 	kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
vcpu              875 arch/x86/kvm/paging_tmpl.h 	spin_unlock(&vcpu->kvm->mmu_lock);
vcpu              892 arch/x86/kvm/paging_tmpl.h static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
vcpu              899 arch/x86/kvm/paging_tmpl.h 	vcpu_clear_mmio_info(vcpu, gva);
vcpu              905 arch/x86/kvm/paging_tmpl.h 	mmu_topup_memory_caches(vcpu);
vcpu              912 arch/x86/kvm/paging_tmpl.h 	spin_lock(&vcpu->kvm->mmu_lock);
vcpu              913 arch/x86/kvm/paging_tmpl.h 	for_each_shadow_entry_using_root(vcpu, root_hpa, gva, iterator) {
vcpu              928 arch/x86/kvm/paging_tmpl.h 			if (mmu_page_zap_pte(vcpu->kvm, sp, sptep))
vcpu              929 arch/x86/kvm/paging_tmpl.h 				kvm_flush_remote_tlbs_with_address(vcpu->kvm,
vcpu              932 arch/x86/kvm/paging_tmpl.h 			if (!rmap_can_add(vcpu))
vcpu              935 arch/x86/kvm/paging_tmpl.h 			if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
vcpu              939 arch/x86/kvm/paging_tmpl.h 			FNAME(update_pte)(vcpu, sp, sptep, &gpte);
vcpu              945 arch/x86/kvm/paging_tmpl.h 	spin_unlock(&vcpu->kvm->mmu_lock);
vcpu              949 arch/x86/kvm/paging_tmpl.h static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gpa_t addr, u32 access,
vcpu              956 arch/x86/kvm/paging_tmpl.h 	r = FNAME(walk_addr)(&walker, vcpu, addr, access);
vcpu              969 arch/x86/kvm/paging_tmpl.h static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gpa_t vaddr,
vcpu              982 arch/x86/kvm/paging_tmpl.h 	r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access);
vcpu             1007 arch/x86/kvm/paging_tmpl.h static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
vcpu             1030 arch/x86/kvm/paging_tmpl.h 		if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
vcpu             1034 arch/x86/kvm/paging_tmpl.h 		if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
vcpu             1041 arch/x86/kvm/paging_tmpl.h 			vcpu->kvm->tlbs_dirty++;
vcpu             1048 arch/x86/kvm/paging_tmpl.h 		FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
vcpu             1050 arch/x86/kvm/paging_tmpl.h 		if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access,
vcpu             1055 arch/x86/kvm/paging_tmpl.h 			drop_spte(vcpu->kvm, &sp->spt[i]);
vcpu             1061 arch/x86/kvm/paging_tmpl.h 			vcpu->kvm->tlbs_dirty++;
vcpu             1069 arch/x86/kvm/paging_tmpl.h 		set_spte_ret |= set_spte(vcpu, &sp->spt[i],
vcpu             1076 arch/x86/kvm/paging_tmpl.h 		kvm_flush_remote_tlbs(vcpu->kvm);
vcpu               53 arch/x86/kvm/pmu.c 	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
vcpu               55 arch/x86/kvm/pmu.c 	kvm_pmu_deliver_pmi(vcpu);
vcpu               68 arch/x86/kvm/pmu.c 		kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
vcpu               82 arch/x86/kvm/pmu.c 		kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
vcpu               95 arch/x86/kvm/pmu.c 			kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
vcpu              147 arch/x86/kvm/pmu.c 	struct kvm *kvm = pmc->vcpu->kvm;
vcpu              210 arch/x86/kvm/pmu.c 	struct kvm *kvm = pmc->vcpu->kvm;
vcpu              253 arch/x86/kvm/pmu.c void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
vcpu              255 arch/x86/kvm/pmu.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
vcpu              274 arch/x86/kvm/pmu.c int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
vcpu              276 arch/x86/kvm/pmu.c 	return kvm_x86_ops->pmu_ops->is_valid_msr_idx(vcpu, idx);
vcpu              290 arch/x86/kvm/pmu.c static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
vcpu              303 arch/x86/kvm/pmu.c 			vcpu->kvm->arch.kvmclock_offset;
vcpu              313 arch/x86/kvm/pmu.c int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
vcpu              316 arch/x86/kvm/pmu.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
vcpu              324 arch/x86/kvm/pmu.c 		return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
vcpu              326 arch/x86/kvm/pmu.c 	pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx, &mask);
vcpu              334 arch/x86/kvm/pmu.c void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
vcpu              336 arch/x86/kvm/pmu.c 	if (lapic_in_kernel(vcpu))
vcpu              337 arch/x86/kvm/pmu.c 		kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
vcpu              340 arch/x86/kvm/pmu.c bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
vcpu              342 arch/x86/kvm/pmu.c 	return kvm_x86_ops->pmu_ops->is_valid_msr(vcpu, msr);
vcpu              345 arch/x86/kvm/pmu.c int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
vcpu              347 arch/x86/kvm/pmu.c 	return kvm_x86_ops->pmu_ops->get_msr(vcpu, msr, data);
vcpu              350 arch/x86/kvm/pmu.c int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vcpu              352 arch/x86/kvm/pmu.c 	return kvm_x86_ops->pmu_ops->set_msr(vcpu, msr_info);
vcpu              359 arch/x86/kvm/pmu.c void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
vcpu              361 arch/x86/kvm/pmu.c 	kvm_x86_ops->pmu_ops->refresh(vcpu);
vcpu              364 arch/x86/kvm/pmu.c void kvm_pmu_reset(struct kvm_vcpu *vcpu)
vcpu              366 arch/x86/kvm/pmu.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
vcpu              369 arch/x86/kvm/pmu.c 	kvm_x86_ops->pmu_ops->reset(vcpu);
vcpu              372 arch/x86/kvm/pmu.c void kvm_pmu_init(struct kvm_vcpu *vcpu)
vcpu              374 arch/x86/kvm/pmu.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
vcpu              377 arch/x86/kvm/pmu.c 	kvm_x86_ops->pmu_ops->init(vcpu);
vcpu              379 arch/x86/kvm/pmu.c 	kvm_pmu_refresh(vcpu);
vcpu              382 arch/x86/kvm/pmu.c void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
vcpu              384 arch/x86/kvm/pmu.c 	kvm_pmu_reset(vcpu);
vcpu                7 arch/x86/kvm/pmu.h #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu)
vcpu                9 arch/x86/kvm/pmu.h #define pmc_to_pmu(pmc)   (&(pmc)->vcpu->arch.pmu)
vcpu               30 arch/x86/kvm/pmu.h 	struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx,
vcpu               32 arch/x86/kvm/pmu.h 	int (*is_valid_msr_idx)(struct kvm_vcpu *vcpu, unsigned idx);
vcpu               33 arch/x86/kvm/pmu.h 	bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
vcpu               34 arch/x86/kvm/pmu.h 	int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
vcpu               35 arch/x86/kvm/pmu.h 	int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
vcpu               36 arch/x86/kvm/pmu.h 	void (*refresh)(struct kvm_vcpu *vcpu);
vcpu               37 arch/x86/kvm/pmu.h 	void (*init)(struct kvm_vcpu *vcpu);
vcpu               38 arch/x86/kvm/pmu.h 	void (*reset)(struct kvm_vcpu *vcpu);
vcpu              120 arch/x86/kvm/pmu.h void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
vcpu              121 arch/x86/kvm/pmu.h void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
vcpu              122 arch/x86/kvm/pmu.h int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
vcpu              123 arch/x86/kvm/pmu.h int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx);
vcpu              124 arch/x86/kvm/pmu.h bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
vcpu              125 arch/x86/kvm/pmu.h int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
vcpu              126 arch/x86/kvm/pmu.h int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
vcpu              127 arch/x86/kvm/pmu.h void kvm_pmu_refresh(struct kvm_vcpu *vcpu);
vcpu              128 arch/x86/kvm/pmu.h void kvm_pmu_reset(struct kvm_vcpu *vcpu);
vcpu              129 arch/x86/kvm/pmu.h void kvm_pmu_init(struct kvm_vcpu *vcpu);
vcpu              130 arch/x86/kvm/pmu.h void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
vcpu               49 arch/x86/kvm/pmu_amd.c 	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
vcpu               51 arch/x86/kvm/pmu_amd.c 	if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
vcpu              163 arch/x86/kvm/pmu_amd.c 	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
vcpu              165 arch/x86/kvm/pmu_amd.c 	if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
vcpu              177 arch/x86/kvm/pmu_amd.c static int amd_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
vcpu              179 arch/x86/kvm/pmu_amd.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
vcpu              187 arch/x86/kvm/pmu_amd.c static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *mask)
vcpu              189 arch/x86/kvm/pmu_amd.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
vcpu              200 arch/x86/kvm/pmu_amd.c static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
vcpu              202 arch/x86/kvm/pmu_amd.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
vcpu              211 arch/x86/kvm/pmu_amd.c static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
vcpu              213 arch/x86/kvm/pmu_amd.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
vcpu              232 arch/x86/kvm/pmu_amd.c static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vcpu              234 arch/x86/kvm/pmu_amd.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
vcpu              259 arch/x86/kvm/pmu_amd.c static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
vcpu              261 arch/x86/kvm/pmu_amd.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
vcpu              263 arch/x86/kvm/pmu_amd.c 	if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
vcpu              277 arch/x86/kvm/pmu_amd.c static void amd_pmu_init(struct kvm_vcpu *vcpu)
vcpu              279 arch/x86/kvm/pmu_amd.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
vcpu              286 arch/x86/kvm/pmu_amd.c 		pmu->gp_counters[i].vcpu = vcpu;
vcpu              291 arch/x86/kvm/pmu_amd.c static void amd_pmu_reset(struct kvm_vcpu *vcpu)
vcpu              293 arch/x86/kvm/pmu_amd.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
vcpu              188 arch/x86/kvm/svm.c 	struct kvm_vcpu vcpu;
vcpu              386 arch/x86/kvm/svm.c static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
vcpu              387 arch/x86/kvm/svm.c static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa);
vcpu              479 arch/x86/kvm/svm.c static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
vcpu              481 arch/x86/kvm/svm.c 	return container_of(vcpu, struct vcpu_svm, vcpu);
vcpu              490 arch/x86/kvm/svm.c static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu)
vcpu              492 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu              508 arch/x86/kvm/svm.c 	if (!is_guest_mode(&svm->vcpu))
vcpu              523 arch/x86/kvm/svm.c 	if (is_guest_mode(&svm->vcpu))
vcpu              633 arch/x86/kvm/svm.c 		svm->vcpu.arch.hflags |= HF_GIF_MASK;
vcpu              641 arch/x86/kvm/svm.c 		svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
vcpu              649 arch/x86/kvm/svm.c 		return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
vcpu              725 arch/x86/kvm/svm.c static int get_npt_level(struct kvm_vcpu *vcpu)
vcpu              734 arch/x86/kvm/svm.c static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
vcpu              736 arch/x86/kvm/svm.c 	vcpu->arch.efer = efer;
vcpu              746 arch/x86/kvm/svm.c 	to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
vcpu              747 arch/x86/kvm/svm.c 	mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
vcpu              756 arch/x86/kvm/svm.c static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
vcpu              758 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu              766 arch/x86/kvm/svm.c static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
vcpu              768 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu              777 arch/x86/kvm/svm.c static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
vcpu              779 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu              787 arch/x86/kvm/svm.c 		if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP))
vcpu              790 arch/x86/kvm/svm.c 		if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
vcpu              792 arch/x86/kvm/svm.c 			       __func__, kvm_rip_read(vcpu), svm->next_rip);
vcpu              793 arch/x86/kvm/svm.c 		kvm_rip_write(vcpu, svm->next_rip);
vcpu              795 arch/x86/kvm/svm.c 	svm_set_interrupt_shadow(vcpu, 0);
vcpu              800 arch/x86/kvm/svm.c static void svm_queue_exception(struct kvm_vcpu *vcpu)
vcpu              802 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu              803 arch/x86/kvm/svm.c 	unsigned nr = vcpu->arch.exception.nr;
vcpu              804 arch/x86/kvm/svm.c 	bool has_error_code = vcpu->arch.exception.has_error_code;
vcpu              805 arch/x86/kvm/svm.c 	bool reinject = vcpu->arch.exception.injected;
vcpu              806 arch/x86/kvm/svm.c 	u32 error_code = vcpu->arch.exception.error_code;
vcpu              816 arch/x86/kvm/svm.c 	kvm_deliver_exception_payload(&svm->vcpu);
vcpu              819 arch/x86/kvm/svm.c 		unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
vcpu              828 arch/x86/kvm/svm.c 		(void)skip_emulated_instruction(&svm->vcpu);
vcpu              829 arch/x86/kvm/svm.c 		rip = kvm_rip_read(&svm->vcpu);
vcpu              865 arch/x86/kvm/svm.c static void svm_init_osvw(struct kvm_vcpu *vcpu)
vcpu              871 arch/x86/kvm/svm.c 	vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
vcpu              872 arch/x86/kvm/svm.c 	vcpu->arch.osvw.status = osvw_status & ~(6ULL);
vcpu              883 arch/x86/kvm/svm.c 		vcpu->arch.osvw.status |= 1;
vcpu             1041 arch/x86/kvm/svm.c static bool msr_write_intercepted(struct kvm_vcpu *vcpu, unsigned msr)
vcpu             1048 arch/x86/kvm/svm.c 	msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm:
vcpu             1049 arch/x86/kvm/svm.c 				      to_svm(vcpu)->msrpm;
vcpu             1169 arch/x86/kvm/svm.c 	if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) {
vcpu             1197 arch/x86/kvm/svm.c 	struct kvm_vcpu *vcpu = NULL;
vcpu             1207 arch/x86/kvm/svm.c 		vcpu = kvm_get_vcpu_by_id(&kvm_svm->kvm, vcpu_id);
vcpu             1217 arch/x86/kvm/svm.c 	if (vcpu)
vcpu             1218 arch/x86/kvm/svm.c 		kvm_vcpu_wake_up(vcpu);
vcpu             1264 arch/x86/kvm/svm.c static void grow_ple_window(struct kvm_vcpu *vcpu)
vcpu             1266 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             1277 arch/x86/kvm/svm.c 		trace_kvm_ple_window_update(vcpu->vcpu_id,
vcpu             1282 arch/x86/kvm/svm.c static void shrink_ple_window(struct kvm_vcpu *vcpu)
vcpu             1284 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             1295 arch/x86/kvm/svm.c 		trace_kvm_ple_window_update(vcpu->vcpu_id,
vcpu             1490 arch/x86/kvm/svm.c static u64 svm_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
vcpu             1492 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             1494 arch/x86/kvm/svm.c 	if (is_guest_mode(vcpu))
vcpu             1497 arch/x86/kvm/svm.c 	return vcpu->arch.tsc_offset;
vcpu             1500 arch/x86/kvm/svm.c static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
vcpu             1502 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             1505 arch/x86/kvm/svm.c 	if (is_guest_mode(vcpu)) {
vcpu             1512 arch/x86/kvm/svm.c 	trace_kvm_write_tsc_offset(vcpu->vcpu_id,
vcpu             1525 arch/x86/kvm/svm.c 	struct kvm_svm *kvm_svm = to_kvm_svm(svm->vcpu.kvm);
vcpu             1542 arch/x86/kvm/svm.c 	svm->vcpu.arch.hflags = 0;
vcpu             1550 arch/x86/kvm/svm.c 	if (!kvm_vcpu_apicv_active(&svm->vcpu))
vcpu             1594 arch/x86/kvm/svm.c 	if (!kvm_mwait_in_guest(svm->vcpu.kvm)) {
vcpu             1599 arch/x86/kvm/svm.c 	if (!kvm_hlt_in_guest(svm->vcpu.kvm))
vcpu             1625 arch/x86/kvm/svm.c 	svm_set_efer(&svm->vcpu, 0);
vcpu             1627 arch/x86/kvm/svm.c 	kvm_set_rflags(&svm->vcpu, 2);
vcpu             1629 arch/x86/kvm/svm.c 	svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
vcpu             1635 arch/x86/kvm/svm.c 	svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
vcpu             1636 arch/x86/kvm/svm.c 	kvm_mmu_reset_context(&svm->vcpu);
vcpu             1648 arch/x86/kvm/svm.c 		save->g_pat = svm->vcpu.arch.pat;
vcpu             1655 arch/x86/kvm/svm.c 	svm->vcpu.arch.hflags = 0;
vcpu             1666 arch/x86/kvm/svm.c 	if (kvm_vcpu_apicv_active(&svm->vcpu))
vcpu             1685 arch/x86/kvm/svm.c 	if (sev_guest(svm->vcpu.kvm)) {
vcpu             1696 arch/x86/kvm/svm.c static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
vcpu             1700 arch/x86/kvm/svm.c 	struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
vcpu             1718 arch/x86/kvm/svm.c static int avic_init_access_page(struct kvm_vcpu *vcpu)
vcpu             1720 arch/x86/kvm/svm.c 	struct kvm *kvm = vcpu->kvm;
vcpu             1740 arch/x86/kvm/svm.c static int avic_init_backing_page(struct kvm_vcpu *vcpu)
vcpu             1744 arch/x86/kvm/svm.c 	int id = vcpu->vcpu_id;
vcpu             1745 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             1747 arch/x86/kvm/svm.c 	ret = avic_init_access_page(vcpu);
vcpu             1754 arch/x86/kvm/svm.c 	if (!svm->vcpu.arch.apic->regs)
vcpu             1757 arch/x86/kvm/svm.c 	svm->avic_backing_page = virt_to_page(svm->vcpu.arch.apic->regs);
vcpu             1760 arch/x86/kvm/svm.c 	entry = avic_get_physical_id_entry(vcpu, id);
vcpu             2047 arch/x86/kvm/svm.c avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
vcpu             2052 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             2054 arch/x86/kvm/svm.c 	if (!kvm_arch_has_assigned_device(vcpu->kvm))
vcpu             2076 arch/x86/kvm/svm.c static void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu             2081 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             2083 arch/x86/kvm/svm.c 	if (!kvm_vcpu_apicv_active(vcpu))
vcpu             2104 arch/x86/kvm/svm.c 	avic_update_iommu_vcpu_affinity(vcpu, h_physical_id,
vcpu             2108 arch/x86/kvm/svm.c static void avic_vcpu_put(struct kvm_vcpu *vcpu)
vcpu             2111 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             2113 arch/x86/kvm/svm.c 	if (!kvm_vcpu_apicv_active(vcpu))
vcpu             2118 arch/x86/kvm/svm.c 		avic_update_iommu_vcpu_affinity(vcpu, -1, 0);
vcpu             2127 arch/x86/kvm/svm.c static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
vcpu             2129 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             2133 arch/x86/kvm/svm.c 		avic_vcpu_load(vcpu, vcpu->cpu);
vcpu             2135 arch/x86/kvm/svm.c 		avic_vcpu_put(vcpu);
vcpu             2138 arch/x86/kvm/svm.c static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
vcpu             2140 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             2144 arch/x86/kvm/svm.c 	vcpu->arch.microcode_version = 0x01000065;
vcpu             2149 arch/x86/kvm/svm.c 		svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
vcpu             2151 arch/x86/kvm/svm.c 		if (kvm_vcpu_is_reset_bsp(&svm->vcpu))
vcpu             2152 arch/x86/kvm/svm.c 			svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
vcpu             2156 arch/x86/kvm/svm.c 	kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy, true);
vcpu             2157 arch/x86/kvm/svm.c 	kvm_rdx_write(vcpu, eax);
vcpu             2159 arch/x86/kvm/svm.c 	if (kvm_vcpu_apicv_active(vcpu) && !init_event)
vcpu             2167 arch/x86/kvm/svm.c 	if (!kvm_vcpu_apicv_active(&svm->vcpu))
vcpu             2170 arch/x86/kvm/svm.c 	ret = avic_init_backing_page(&svm->vcpu);
vcpu             2190 arch/x86/kvm/svm.c 	BUILD_BUG_ON_MSG(offsetof(struct vcpu_svm, vcpu) != 0,
vcpu             2199 arch/x86/kvm/svm.c 	svm->vcpu.arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache,
vcpu             2201 arch/x86/kvm/svm.c 	if (!svm->vcpu.arch.user_fpu) {
vcpu             2207 arch/x86/kvm/svm.c 	svm->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache,
vcpu             2209 arch/x86/kvm/svm.c 	if (!svm->vcpu.arch.guest_fpu) {
vcpu             2215 arch/x86/kvm/svm.c 	err = kvm_vcpu_init(&svm->vcpu, kvm, id);
vcpu             2259 arch/x86/kvm/svm.c 	svm_init_osvw(&svm->vcpu);
vcpu             2261 arch/x86/kvm/svm.c 	return &svm->vcpu;
vcpu             2272 arch/x86/kvm/svm.c 	kvm_vcpu_uninit(&svm->vcpu);
vcpu             2274 arch/x86/kvm/svm.c 	kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu);
vcpu             2276 arch/x86/kvm/svm.c 	kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.user_fpu);
vcpu             2291 arch/x86/kvm/svm.c static void svm_free_vcpu(struct kvm_vcpu *vcpu)
vcpu             2293 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             2306 arch/x86/kvm/svm.c 	kvm_vcpu_uninit(vcpu);
vcpu             2307 arch/x86/kvm/svm.c 	kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.user_fpu);
vcpu             2308 arch/x86/kvm/svm.c 	kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu);
vcpu             2312 arch/x86/kvm/svm.c static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu             2314 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             2318 arch/x86/kvm/svm.c 	if (unlikely(cpu != vcpu->cpu)) {
vcpu             2324 arch/x86/kvm/svm.c 	rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
vcpu             2334 arch/x86/kvm/svm.c 		u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio;
vcpu             2348 arch/x86/kvm/svm.c 	avic_vcpu_load(vcpu, cpu);
vcpu             2351 arch/x86/kvm/svm.c static void svm_vcpu_put(struct kvm_vcpu *vcpu)
vcpu             2353 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             2356 arch/x86/kvm/svm.c 	avic_vcpu_put(vcpu);
vcpu             2358 arch/x86/kvm/svm.c 	++vcpu->stat.host_state_reload;
vcpu             2373 arch/x86/kvm/svm.c static void svm_vcpu_blocking(struct kvm_vcpu *vcpu)
vcpu             2375 arch/x86/kvm/svm.c 	avic_set_running(vcpu, false);
vcpu             2378 arch/x86/kvm/svm.c static void svm_vcpu_unblocking(struct kvm_vcpu *vcpu)
vcpu             2380 arch/x86/kvm/svm.c 	avic_set_running(vcpu, true);
vcpu             2383 arch/x86/kvm/svm.c static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
vcpu             2385 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             2398 arch/x86/kvm/svm.c static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
vcpu             2400 arch/x86/kvm/svm.c 	if (to_svm(vcpu)->nmi_singlestep)
vcpu             2408 arch/x86/kvm/svm.c 	to_svm(vcpu)->vmcb->save.rflags = rflags;
vcpu             2411 arch/x86/kvm/svm.c static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
vcpu             2416 arch/x86/kvm/svm.c 		load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
vcpu             2433 arch/x86/kvm/svm.c static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
vcpu             2435 arch/x86/kvm/svm.c 	struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
vcpu             2451 arch/x86/kvm/svm.c static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
vcpu             2453 arch/x86/kvm/svm.c 	struct vmcb_seg *s = svm_seg(vcpu, seg);
vcpu             2458 arch/x86/kvm/svm.c static void svm_get_segment(struct kvm_vcpu *vcpu,
vcpu             2461 arch/x86/kvm/svm.c 	struct vmcb_seg *s = svm_seg(vcpu, seg);
vcpu             2522 arch/x86/kvm/svm.c 		var->dpl = to_svm(vcpu)->vmcb->save.cpl;
vcpu             2527 arch/x86/kvm/svm.c static int svm_get_cpl(struct kvm_vcpu *vcpu)
vcpu             2529 arch/x86/kvm/svm.c 	struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
vcpu             2534 arch/x86/kvm/svm.c static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
vcpu             2536 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             2542 arch/x86/kvm/svm.c static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
vcpu             2544 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             2551 arch/x86/kvm/svm.c static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
vcpu             2553 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             2559 arch/x86/kvm/svm.c static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
vcpu             2561 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             2568 arch/x86/kvm/svm.c static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
vcpu             2572 arch/x86/kvm/svm.c static void svm_decache_cr3(struct kvm_vcpu *vcpu)
vcpu             2576 arch/x86/kvm/svm.c static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
vcpu             2582 arch/x86/kvm/svm.c 	ulong gcr0 = svm->vcpu.arch.cr0;
vcpu             2599 arch/x86/kvm/svm.c static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
vcpu             2601 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             2604 arch/x86/kvm/svm.c 	if (vcpu->arch.efer & EFER_LME) {
vcpu             2605 arch/x86/kvm/svm.c 		if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
vcpu             2606 arch/x86/kvm/svm.c 			vcpu->arch.efer |= EFER_LMA;
vcpu             2610 arch/x86/kvm/svm.c 		if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
vcpu             2611 arch/x86/kvm/svm.c 			vcpu->arch.efer &= ~EFER_LMA;
vcpu             2616 arch/x86/kvm/svm.c 	vcpu->arch.cr0 = cr0;
vcpu             2626 arch/x86/kvm/svm.c 	if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
vcpu             2633 arch/x86/kvm/svm.c static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
vcpu             2636 arch/x86/kvm/svm.c 	unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
vcpu             2642 arch/x86/kvm/svm.c 		svm_flush_tlb(vcpu, true);
vcpu             2644 arch/x86/kvm/svm.c 	vcpu->arch.cr4 = cr4;
vcpu             2648 arch/x86/kvm/svm.c 	to_svm(vcpu)->vmcb->save.cr4 = cr4;
vcpu             2649 arch/x86/kvm/svm.c 	mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
vcpu             2653 arch/x86/kvm/svm.c static void svm_set_segment(struct kvm_vcpu *vcpu,
vcpu             2656 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             2657 arch/x86/kvm/svm.c 	struct vmcb_seg *s = svm_seg(vcpu, seg);
vcpu             2684 arch/x86/kvm/svm.c static void update_bp_intercept(struct kvm_vcpu *vcpu)
vcpu             2686 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             2690 arch/x86/kvm/svm.c 	if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
vcpu             2691 arch/x86/kvm/svm.c 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
vcpu             2694 arch/x86/kvm/svm.c 		vcpu->guest_debug = 0;
vcpu             2711 arch/x86/kvm/svm.c static u64 svm_get_dr6(struct kvm_vcpu *vcpu)
vcpu             2713 arch/x86/kvm/svm.c 	return to_svm(vcpu)->vmcb->save.dr6;
vcpu             2716 arch/x86/kvm/svm.c static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
vcpu             2718 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             2724 arch/x86/kvm/svm.c static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
vcpu             2726 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             2728 arch/x86/kvm/svm.c 	get_debugreg(vcpu->arch.db[0], 0);
vcpu             2729 arch/x86/kvm/svm.c 	get_debugreg(vcpu->arch.db[1], 1);
vcpu             2730 arch/x86/kvm/svm.c 	get_debugreg(vcpu->arch.db[2], 2);
vcpu             2731 arch/x86/kvm/svm.c 	get_debugreg(vcpu->arch.db[3], 3);
vcpu             2732 arch/x86/kvm/svm.c 	vcpu->arch.dr6 = svm_get_dr6(vcpu);
vcpu             2733 arch/x86/kvm/svm.c 	vcpu->arch.dr7 = svm->vmcb->save.dr7;
vcpu             2735 arch/x86/kvm/svm.c 	vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
vcpu             2739 arch/x86/kvm/svm.c static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
vcpu             2741 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             2752 arch/x86/kvm/svm.c 	return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address,
vcpu             2764 arch/x86/kvm/svm.c 	return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
vcpu             2772 arch/x86/kvm/svm.c 	struct kvm_run *kvm_run = svm->vcpu.run;
vcpu             2773 arch/x86/kvm/svm.c 	struct kvm_vcpu *vcpu = &svm->vcpu;
vcpu             2775 arch/x86/kvm/svm.c 	if (!(svm->vcpu.guest_debug &
vcpu             2778 arch/x86/kvm/svm.c 		kvm_queue_exception(&svm->vcpu, DB_VECTOR);
vcpu             2785 arch/x86/kvm/svm.c 		kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu             2788 arch/x86/kvm/svm.c 	if (svm->vcpu.guest_debug &
vcpu             2802 arch/x86/kvm/svm.c 	struct kvm_run *kvm_run = svm->vcpu.run;
vcpu             2812 arch/x86/kvm/svm.c 	return handle_ud(&svm->vcpu);
vcpu             2817 arch/x86/kvm/svm.c 	kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0);
vcpu             2823 arch/x86/kvm/svm.c 	struct kvm_vcpu *vcpu = &svm->vcpu;
vcpu             2833 arch/x86/kvm/svm.c 		kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
vcpu             2836 arch/x86/kvm/svm.c 	return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP);
vcpu             2887 arch/x86/kvm/svm.c 		kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu);
vcpu             2910 arch/x86/kvm/svm.c 	struct kvm_run *kvm_run = svm->vcpu.run;
vcpu             2925 arch/x86/kvm/svm.c 	struct kvm_vcpu *vcpu = &svm->vcpu;
vcpu             2930 arch/x86/kvm/svm.c 	++svm->vcpu.stat.io_exits;
vcpu             2934 arch/x86/kvm/svm.c 		return kvm_emulate_instruction(vcpu, 0);
vcpu             2940 arch/x86/kvm/svm.c 	return kvm_fast_pio(&svm->vcpu, size, port, in);
vcpu             2950 arch/x86/kvm/svm.c 	++svm->vcpu.stat.irq_exits;
vcpu             2961 arch/x86/kvm/svm.c 	return kvm_emulate_halt(&svm->vcpu);
vcpu             2966 arch/x86/kvm/svm.c 	return kvm_emulate_hypercall(&svm->vcpu);
vcpu             2969 arch/x86/kvm/svm.c static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
vcpu             2971 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             2976 arch/x86/kvm/svm.c static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
vcpu             2978 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             2983 arch/x86/kvm/svm.c 	ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(__sme_clr(cr3)), &pdpte,
vcpu             2990 arch/x86/kvm/svm.c static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
vcpu             2993 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             2999 arch/x86/kvm/svm.c static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
vcpu             3002 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             3028 arch/x86/kvm/svm.c static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
vcpu             3030 arch/x86/kvm/svm.c 	WARN_ON(mmu_is_nested(vcpu));
vcpu             3032 arch/x86/kvm/svm.c 	vcpu->arch.mmu = &vcpu->arch.guest_mmu;
vcpu             3033 arch/x86/kvm/svm.c 	kvm_init_shadow_mmu(vcpu);
vcpu             3034 arch/x86/kvm/svm.c 	vcpu->arch.mmu->set_cr3           = nested_svm_set_tdp_cr3;
vcpu             3035 arch/x86/kvm/svm.c 	vcpu->arch.mmu->get_cr3           = nested_svm_get_tdp_cr3;
vcpu             3036 arch/x86/kvm/svm.c 	vcpu->arch.mmu->get_pdptr         = nested_svm_get_tdp_pdptr;
vcpu             3037 arch/x86/kvm/svm.c 	vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
vcpu             3038 arch/x86/kvm/svm.c 	vcpu->arch.mmu->shadow_root_level = get_npt_level(vcpu);
vcpu             3039 arch/x86/kvm/svm.c 	reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu);
vcpu             3040 arch/x86/kvm/svm.c 	vcpu->arch.walk_mmu              = &vcpu->arch.nested_mmu;
vcpu             3043 arch/x86/kvm/svm.c static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
vcpu             3045 arch/x86/kvm/svm.c 	vcpu->arch.mmu = &vcpu->arch.root_mmu;
vcpu             3046 arch/x86/kvm/svm.c 	vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
vcpu             3051 arch/x86/kvm/svm.c 	if (!(svm->vcpu.arch.efer & EFER_SVME) ||
vcpu             3052 arch/x86/kvm/svm.c 	    !is_paging(&svm->vcpu)) {
vcpu             3053 arch/x86/kvm/svm.c 		kvm_queue_exception(&svm->vcpu, UD_VECTOR);
vcpu             3058 arch/x86/kvm/svm.c 		kvm_inject_gp(&svm->vcpu, 0);
vcpu             3070 arch/x86/kvm/svm.c 	if (!is_guest_mode(&svm->vcpu))
vcpu             3085 arch/x86/kvm/svm.c 	if (svm->vcpu.arch.exception.nested_apf)
vcpu             3086 arch/x86/kvm/svm.c 		svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
vcpu             3087 arch/x86/kvm/svm.c 	else if (svm->vcpu.arch.exception.has_payload)
vcpu             3088 arch/x86/kvm/svm.c 		svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
vcpu             3090 arch/x86/kvm/svm.c 		svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
vcpu             3099 arch/x86/kvm/svm.c 	if (!is_guest_mode(&svm->vcpu))
vcpu             3102 arch/x86/kvm/svm.c 	if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
vcpu             3105 arch/x86/kvm/svm.c 	if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
vcpu             3138 arch/x86/kvm/svm.c 	if (!is_guest_mode(&svm->vcpu))
vcpu             3169 arch/x86/kvm/svm.c 	if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
vcpu             3183 arch/x86/kvm/svm.c 	msr    = svm->vcpu.arch.regs[VCPU_REGS_RCX];
vcpu             3194 arch/x86/kvm/svm.c 	if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.vmcb_msrpm + offset, &value, 4))
vcpu             3210 arch/x86/kvm/svm.c 	if (kvm_get_dr(&svm->vcpu, 6, &dr6))
vcpu             3241 arch/x86/kvm/svm.c 		if (!npt_enabled || svm->vcpu.arch.apf.host_apf_reason)
vcpu             3288 arch/x86/kvm/svm.c 			 svm->vcpu.arch.exception.nested_apf != 0)
vcpu             3365 arch/x86/kvm/svm.c 	rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb), &map);
vcpu             3368 arch/x86/kvm/svm.c 			kvm_inject_gp(&svm->vcpu, 0);
vcpu             3375 arch/x86/kvm/svm.c 	leave_guest_mode(&svm->vcpu);
vcpu             3387 arch/x86/kvm/svm.c 	nested_vmcb->save.efer   = svm->vcpu.arch.efer;
vcpu             3388 arch/x86/kvm/svm.c 	nested_vmcb->save.cr0    = kvm_read_cr0(&svm->vcpu);
vcpu             3389 arch/x86/kvm/svm.c 	nested_vmcb->save.cr3    = kvm_read_cr3(&svm->vcpu);
vcpu             3391 arch/x86/kvm/svm.c 	nested_vmcb->save.cr4    = svm->vcpu.arch.cr4;
vcpu             3392 arch/x86/kvm/svm.c 	nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu);
vcpu             3438 arch/x86/kvm/svm.c 	if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
vcpu             3444 arch/x86/kvm/svm.c 	svm->vcpu.arch.tsc_offset = svm->vmcb->control.tsc_offset;
vcpu             3445 arch/x86/kvm/svm.c 	kvm_clear_exception_queue(&svm->vcpu);
vcpu             3446 arch/x86/kvm/svm.c 	kvm_clear_interrupt_queue(&svm->vcpu);
vcpu             3457 arch/x86/kvm/svm.c 	kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
vcpu             3458 arch/x86/kvm/svm.c 	svm_set_efer(&svm->vcpu, hsave->save.efer);
vcpu             3459 arch/x86/kvm/svm.c 	svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
vcpu             3460 arch/x86/kvm/svm.c 	svm_set_cr4(&svm->vcpu, hsave->save.cr4);
vcpu             3463 arch/x86/kvm/svm.c 		svm->vcpu.arch.cr3 = hsave->save.cr3;
vcpu             3465 arch/x86/kvm/svm.c 		(void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
vcpu             3467 arch/x86/kvm/svm.c 	kvm_rax_write(&svm->vcpu, hsave->save.rax);
vcpu             3468 arch/x86/kvm/svm.c 	kvm_rsp_write(&svm->vcpu, hsave->save.rsp);
vcpu             3469 arch/x86/kvm/svm.c 	kvm_rip_write(&svm->vcpu, hsave->save.rip);
vcpu             3476 arch/x86/kvm/svm.c 	kvm_vcpu_unmap(&svm->vcpu, &map, true);
vcpu             3478 arch/x86/kvm/svm.c 	nested_svm_uninit_mmu_context(&svm->vcpu);
vcpu             3479 arch/x86/kvm/svm.c 	kvm_mmu_reset_context(&svm->vcpu);
vcpu             3480 arch/x86/kvm/svm.c 	kvm_mmu_load(&svm->vcpu);
vcpu             3486 arch/x86/kvm/svm.c 	svm->vcpu.arch.nmi_injected = false;
vcpu             3487 arch/x86/kvm/svm.c 	kvm_clear_exception_queue(&svm->vcpu);
vcpu             3488 arch/x86/kvm/svm.c 	kvm_clear_interrupt_queue(&svm->vcpu);
vcpu             3515 arch/x86/kvm/svm.c 		if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
vcpu             3544 arch/x86/kvm/svm.c 	if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
vcpu             3545 arch/x86/kvm/svm.c 		svm->vcpu.arch.hflags |= HF_HIF_MASK;
vcpu             3547 arch/x86/kvm/svm.c 		svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
vcpu             3551 arch/x86/kvm/svm.c 		nested_svm_init_mmu_context(&svm->vcpu);
vcpu             3561 arch/x86/kvm/svm.c 	kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags);
vcpu             3562 arch/x86/kvm/svm.c 	svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
vcpu             3563 arch/x86/kvm/svm.c 	svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
vcpu             3564 arch/x86/kvm/svm.c 	svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
vcpu             3567 arch/x86/kvm/svm.c 		svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
vcpu             3569 arch/x86/kvm/svm.c 		(void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
vcpu             3572 arch/x86/kvm/svm.c 	kvm_mmu_reset_context(&svm->vcpu);
vcpu             3574 arch/x86/kvm/svm.c 	svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
vcpu             3575 arch/x86/kvm/svm.c 	kvm_rax_write(&svm->vcpu, nested_vmcb->save.rax);
vcpu             3576 arch/x86/kvm/svm.c 	kvm_rsp_write(&svm->vcpu, nested_vmcb->save.rsp);
vcpu             3577 arch/x86/kvm/svm.c 	kvm_rip_write(&svm->vcpu, nested_vmcb->save.rip);
vcpu             3596 arch/x86/kvm/svm.c 	svm_flush_tlb(&svm->vcpu, true);
vcpu             3599 arch/x86/kvm/svm.c 		svm->vcpu.arch.hflags |= HF_VINTR_MASK;
vcpu             3601 arch/x86/kvm/svm.c 		svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
vcpu             3603 arch/x86/kvm/svm.c 	if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
vcpu             3612 arch/x86/kvm/svm.c 	svm->vcpu.arch.tsc_offset += nested_vmcb->control.tsc_offset;
vcpu             3613 arch/x86/kvm/svm.c 	svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset;
vcpu             3626 arch/x86/kvm/svm.c 	kvm_vcpu_unmap(&svm->vcpu, map, true);
vcpu             3629 arch/x86/kvm/svm.c 	enter_guest_mode(&svm->vcpu);
vcpu             3655 arch/x86/kvm/svm.c 	ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb_gpa), &map);
vcpu             3657 arch/x86/kvm/svm.c 		kvm_inject_gp(&svm->vcpu, 0);
vcpu             3660 arch/x86/kvm/svm.c 		return kvm_skip_emulated_instruction(&svm->vcpu);
vcpu             3663 arch/x86/kvm/svm.c 	ret = kvm_skip_emulated_instruction(&svm->vcpu);
vcpu             3673 arch/x86/kvm/svm.c 		kvm_vcpu_unmap(&svm->vcpu, &map, true);
vcpu             3690 arch/x86/kvm/svm.c 	kvm_clear_exception_queue(&svm->vcpu);
vcpu             3691 arch/x86/kvm/svm.c 	kvm_clear_interrupt_queue(&svm->vcpu);
vcpu             3703 arch/x86/kvm/svm.c 	hsave->save.efer   = svm->vcpu.arch.efer;
vcpu             3704 arch/x86/kvm/svm.c 	hsave->save.cr0    = kvm_read_cr0(&svm->vcpu);
vcpu             3705 arch/x86/kvm/svm.c 	hsave->save.cr4    = svm->vcpu.arch.cr4;
vcpu             3706 arch/x86/kvm/svm.c 	hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
vcpu             3707 arch/x86/kvm/svm.c 	hsave->save.rip    = kvm_rip_read(&svm->vcpu);
vcpu             3713 arch/x86/kvm/svm.c 		hsave->save.cr3    = kvm_read_cr3(&svm->vcpu);
vcpu             3756 arch/x86/kvm/svm.c 	ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
vcpu             3759 arch/x86/kvm/svm.c 			kvm_inject_gp(&svm->vcpu, 0);
vcpu             3765 arch/x86/kvm/svm.c 	ret = kvm_skip_emulated_instruction(&svm->vcpu);
vcpu             3768 arch/x86/kvm/svm.c 	kvm_vcpu_unmap(&svm->vcpu, &map, true);
vcpu             3782 arch/x86/kvm/svm.c 	ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
vcpu             3785 arch/x86/kvm/svm.c 			kvm_inject_gp(&svm->vcpu, 0);
vcpu             3791 arch/x86/kvm/svm.c 	ret = kvm_skip_emulated_instruction(&svm->vcpu);
vcpu             3794 arch/x86/kvm/svm.c 	kvm_vcpu_unmap(&svm->vcpu, &map, true);
vcpu             3821 arch/x86/kvm/svm.c 	ret = kvm_skip_emulated_instruction(&svm->vcpu);
vcpu             3822 arch/x86/kvm/svm.c 	kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
vcpu             3836 arch/x86/kvm/svm.c 	ret = kvm_skip_emulated_instruction(&svm->vcpu);
vcpu             3841 arch/x86/kvm/svm.c 	if (!kvm_vcpu_apicv_active(&svm->vcpu)) {
vcpu             3852 arch/x86/kvm/svm.c 	struct kvm_vcpu *vcpu = &svm->vcpu;
vcpu             3854 arch/x86/kvm/svm.c 	trace_kvm_invlpga(svm->vmcb->save.rip, kvm_rcx_read(&svm->vcpu),
vcpu             3855 arch/x86/kvm/svm.c 			  kvm_rax_read(&svm->vcpu));
vcpu             3858 arch/x86/kvm/svm.c 	kvm_mmu_invlpg(vcpu, kvm_rax_read(&svm->vcpu));
vcpu             3860 arch/x86/kvm/svm.c 	return kvm_skip_emulated_instruction(&svm->vcpu);
vcpu             3865 arch/x86/kvm/svm.c 	trace_kvm_skinit(svm->vmcb->save.rip, kvm_rax_read(&svm->vcpu));
vcpu             3867 arch/x86/kvm/svm.c 	kvm_queue_exception(&svm->vcpu, UD_VECTOR);
vcpu             3873 arch/x86/kvm/svm.c 	return kvm_emulate_wbinvd(&svm->vcpu);
vcpu             3878 arch/x86/kvm/svm.c 	u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
vcpu             3879 arch/x86/kvm/svm.c 	u32 index = kvm_rcx_read(&svm->vcpu);
vcpu             3881 arch/x86/kvm/svm.c 	if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
vcpu             3882 arch/x86/kvm/svm.c 		return kvm_skip_emulated_instruction(&svm->vcpu);
vcpu             3890 arch/x86/kvm/svm.c 	kvm_queue_exception(&svm->vcpu, UD_VECTOR);
vcpu             3924 arch/x86/kvm/svm.c 			svm->vcpu.arch.nmi_injected = false;
vcpu             3933 arch/x86/kvm/svm.c 			kvm_clear_exception_queue(&svm->vcpu);
vcpu             3936 arch/x86/kvm/svm.c 			kvm_clear_interrupt_queue(&svm->vcpu);
vcpu             3947 arch/x86/kvm/svm.c 		if (!skip_emulated_instruction(&svm->vcpu))
vcpu             3954 arch/x86/kvm/svm.c 	return kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason,
vcpu             3960 arch/x86/kvm/svm.c 	return kvm_emulate_cpuid(&svm->vcpu);
vcpu             3965 arch/x86/kvm/svm.c 	++svm->vcpu.stat.nmi_window_exits;
vcpu             3967 arch/x86/kvm/svm.c 	svm->vcpu.arch.hflags |= HF_IRET_MASK;
vcpu             3968 arch/x86/kvm/svm.c 	svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
vcpu             3969 arch/x86/kvm/svm.c 	kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
vcpu             3976 arch/x86/kvm/svm.c 		return kvm_emulate_instruction(&svm->vcpu, 0);
vcpu             3978 arch/x86/kvm/svm.c 	kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
vcpu             3979 arch/x86/kvm/svm.c 	return kvm_skip_emulated_instruction(&svm->vcpu);
vcpu             3984 arch/x86/kvm/svm.c 	return kvm_emulate_instruction(&svm->vcpu, 0);
vcpu             3989 arch/x86/kvm/svm.c 	return kvm_emulate_instruction_from_buffer(&svm->vcpu, rsm_ins_bytes, 2);
vcpu             3999 arch/x86/kvm/svm.c 	err = kvm_rdpmc(&svm->vcpu);
vcpu             4000 arch/x86/kvm/svm.c 	return kvm_complete_insn_gp(&svm->vcpu, err);
vcpu             4006 arch/x86/kvm/svm.c 	unsigned long cr0 = svm->vcpu.arch.cr0;
vcpu             4012 arch/x86/kvm/svm.c 	if (!is_guest_mode(&svm->vcpu) ||
vcpu             4050 arch/x86/kvm/svm.c 		val = kvm_register_read(&svm->vcpu, reg);
vcpu             4054 arch/x86/kvm/svm.c 				err = kvm_set_cr0(&svm->vcpu, val);
vcpu             4060 arch/x86/kvm/svm.c 			err = kvm_set_cr3(&svm->vcpu, val);
vcpu             4063 arch/x86/kvm/svm.c 			err = kvm_set_cr4(&svm->vcpu, val);
vcpu             4066 arch/x86/kvm/svm.c 			err = kvm_set_cr8(&svm->vcpu, val);
vcpu             4070 arch/x86/kvm/svm.c 			kvm_queue_exception(&svm->vcpu, UD_VECTOR);
vcpu             4076 arch/x86/kvm/svm.c 			val = kvm_read_cr0(&svm->vcpu);
vcpu             4079 arch/x86/kvm/svm.c 			val = svm->vcpu.arch.cr2;
vcpu             4082 arch/x86/kvm/svm.c 			val = kvm_read_cr3(&svm->vcpu);
vcpu             4085 arch/x86/kvm/svm.c 			val = kvm_read_cr4(&svm->vcpu);
vcpu             4088 arch/x86/kvm/svm.c 			val = kvm_get_cr8(&svm->vcpu);
vcpu             4092 arch/x86/kvm/svm.c 			kvm_queue_exception(&svm->vcpu, UD_VECTOR);
vcpu             4095 arch/x86/kvm/svm.c 		kvm_register_write(&svm->vcpu, reg, val);
vcpu             4097 arch/x86/kvm/svm.c 	return kvm_complete_insn_gp(&svm->vcpu, err);
vcpu             4105 arch/x86/kvm/svm.c 	if (svm->vcpu.guest_debug == 0) {
vcpu             4112 arch/x86/kvm/svm.c 		svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
vcpu             4123 arch/x86/kvm/svm.c 		if (!kvm_require_dr(&svm->vcpu, dr - 16))
vcpu             4125 arch/x86/kvm/svm.c 		val = kvm_register_read(&svm->vcpu, reg);
vcpu             4126 arch/x86/kvm/svm.c 		kvm_set_dr(&svm->vcpu, dr - 16, val);
vcpu             4128 arch/x86/kvm/svm.c 		if (!kvm_require_dr(&svm->vcpu, dr))
vcpu             4130 arch/x86/kvm/svm.c 		kvm_get_dr(&svm->vcpu, dr, &val);
vcpu             4131 arch/x86/kvm/svm.c 		kvm_register_write(&svm->vcpu, reg, val);
vcpu             4134 arch/x86/kvm/svm.c 	return kvm_skip_emulated_instruction(&svm->vcpu);
vcpu             4139 arch/x86/kvm/svm.c 	struct kvm_run *kvm_run = svm->vcpu.run;
vcpu             4142 arch/x86/kvm/svm.c 	u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
vcpu             4145 arch/x86/kvm/svm.c 	if (lapic_in_kernel(&svm->vcpu))
vcpu             4147 arch/x86/kvm/svm.c 	if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
vcpu             4169 arch/x86/kvm/svm.c static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vcpu             4171 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             4233 arch/x86/kvm/svm.c 		    !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) &&
vcpu             4234 arch/x86/kvm/svm.c 		    !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
vcpu             4241 arch/x86/kvm/svm.c 		    !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
vcpu             4250 arch/x86/kvm/svm.c 		family = guest_cpuid_family(vcpu);
vcpu             4251 arch/x86/kvm/svm.c 		model  = guest_cpuid_model(vcpu);
vcpu             4254 arch/x86/kvm/svm.c 			return kvm_get_msr_common(vcpu, msr_info);
vcpu             4267 arch/x86/kvm/svm.c 		return kvm_get_msr_common(vcpu, msr_info);
vcpu             4274 arch/x86/kvm/svm.c 	return kvm_emulate_rdmsr(&svm->vcpu);
vcpu             4277 arch/x86/kvm/svm.c static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
vcpu             4279 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             4296 arch/x86/kvm/svm.c 	if (svm_dis && (vcpu->arch.efer & EFER_SVME))
vcpu             4302 arch/x86/kvm/svm.c static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
vcpu             4304 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             4310 arch/x86/kvm/svm.c 		if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
vcpu             4312 arch/x86/kvm/svm.c 		vcpu->arch.pat = data;
vcpu             4318 arch/x86/kvm/svm.c 		    !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) &&
vcpu             4319 arch/x86/kvm/svm.c 		    !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
vcpu             4346 arch/x86/kvm/svm.c 		    !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB))
vcpu             4356 arch/x86/kvm/svm.c 		if (is_guest_mode(vcpu))
vcpu             4362 arch/x86/kvm/svm.c 		    !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
vcpu             4412 arch/x86/kvm/svm.c 			vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
vcpu             4430 arch/x86/kvm/svm.c 		return svm_set_vm_cr(vcpu, data);
vcpu             4432 arch/x86/kvm/svm.c 		vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
vcpu             4453 arch/x86/kvm/svm.c 		if (kvm_vcpu_apicv_active(vcpu))
vcpu             4454 arch/x86/kvm/svm.c 			avic_update_vapic_bar(to_svm(vcpu), data);
vcpu             4457 arch/x86/kvm/svm.c 		return kvm_set_msr_common(vcpu, msr);
vcpu             4464 arch/x86/kvm/svm.c 	return kvm_emulate_wrmsr(&svm->vcpu);
vcpu             4477 arch/x86/kvm/svm.c 	kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
vcpu             4481 arch/x86/kvm/svm.c 	++svm->vcpu.stat.irq_window_exits;
vcpu             4487 arch/x86/kvm/svm.c 	struct kvm_vcpu *vcpu = &svm->vcpu;
vcpu             4488 arch/x86/kvm/svm.c 	bool in_kernel = (svm_get_cpl(vcpu) == 0);
vcpu             4491 arch/x86/kvm/svm.c 		grow_ple_window(vcpu);
vcpu             4493 arch/x86/kvm/svm.c 	kvm_vcpu_on_spin(vcpu, in_kernel);
vcpu             4499 arch/x86/kvm/svm.c 	return kvm_skip_emulated_instruction(&(svm->vcpu));
vcpu             4527 arch/x86/kvm/svm.c 	struct kvm_lapic *apic = svm->vcpu.arch.apic;
vcpu             4529 arch/x86/kvm/svm.c 	trace_kvm_avic_incomplete_ipi(svm->vcpu.vcpu_id, icrh, icrl, id, index);
vcpu             4549 arch/x86/kvm/svm.c 		struct kvm_vcpu *vcpu;
vcpu             4550 arch/x86/kvm/svm.c 		struct kvm *kvm = svm->vcpu.kvm;
vcpu             4551 arch/x86/kvm/svm.c 		struct kvm_lapic *apic = svm->vcpu.arch.apic;
vcpu             4558 arch/x86/kvm/svm.c 		kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu             4559 arch/x86/kvm/svm.c 			bool m = kvm_apic_match_dest(vcpu, apic,
vcpu             4564 arch/x86/kvm/svm.c 			if (m && !avic_vcpu_is_running(vcpu))
vcpu             4565 arch/x86/kvm/svm.c 				kvm_vcpu_wake_up(vcpu);
vcpu             4571 arch/x86/kvm/svm.c 			  index, svm->vcpu.vcpu_id, icrh, icrl);
vcpu             4583 arch/x86/kvm/svm.c static u32 *avic_get_logical_id_entry(struct kvm_vcpu *vcpu, u32 ldr, bool flat)
vcpu             4585 arch/x86/kvm/svm.c 	struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
vcpu             4612 arch/x86/kvm/svm.c static int avic_ldr_write(struct kvm_vcpu *vcpu, u8 g_physical_id, u32 ldr)
vcpu             4617 arch/x86/kvm/svm.c 	flat = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR) == APIC_DFR_FLAT;
vcpu             4618 arch/x86/kvm/svm.c 	entry = avic_get_logical_id_entry(vcpu, ldr, flat);
vcpu             4631 arch/x86/kvm/svm.c static void avic_invalidate_logical_id_entry(struct kvm_vcpu *vcpu)
vcpu             4633 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             4635 arch/x86/kvm/svm.c 	u32 *entry = avic_get_logical_id_entry(vcpu, svm->ldr_reg, flat);
vcpu             4641 arch/x86/kvm/svm.c static int avic_handle_ldr_update(struct kvm_vcpu *vcpu)
vcpu             4644 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             4645 arch/x86/kvm/svm.c 	u32 ldr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LDR);
vcpu             4646 arch/x86/kvm/svm.c 	u32 id = kvm_xapic_id(vcpu->arch.apic);
vcpu             4651 arch/x86/kvm/svm.c 	avic_invalidate_logical_id_entry(vcpu);
vcpu             4654 arch/x86/kvm/svm.c 		ret = avic_ldr_write(vcpu, id, ldr);
vcpu             4662 arch/x86/kvm/svm.c static int avic_handle_apic_id_update(struct kvm_vcpu *vcpu)
vcpu             4665 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             4666 arch/x86/kvm/svm.c 	u32 id = kvm_xapic_id(vcpu->arch.apic);
vcpu             4668 arch/x86/kvm/svm.c 	if (vcpu->vcpu_id == id)
vcpu             4671 arch/x86/kvm/svm.c 	old = avic_get_physical_id_entry(vcpu, vcpu->vcpu_id);
vcpu             4672 arch/x86/kvm/svm.c 	new = avic_get_physical_id_entry(vcpu, id);
vcpu             4679 arch/x86/kvm/svm.c 	to_svm(vcpu)->avic_physical_id_cache = new;
vcpu             4686 arch/x86/kvm/svm.c 		avic_handle_ldr_update(vcpu);
vcpu             4691 arch/x86/kvm/svm.c static void avic_handle_dfr_update(struct kvm_vcpu *vcpu)
vcpu             4693 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             4694 arch/x86/kvm/svm.c 	u32 dfr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR);
vcpu             4699 arch/x86/kvm/svm.c 	avic_invalidate_logical_id_entry(vcpu);
vcpu             4705 arch/x86/kvm/svm.c 	struct kvm_lapic *apic = svm->vcpu.arch.apic;
vcpu             4711 arch/x86/kvm/svm.c 		if (avic_handle_apic_id_update(&svm->vcpu))
vcpu             4715 arch/x86/kvm/svm.c 		if (avic_handle_ldr_update(&svm->vcpu))
vcpu             4719 arch/x86/kvm/svm.c 		avic_handle_dfr_update(&svm->vcpu);
vcpu             4770 arch/x86/kvm/svm.c 	trace_kvm_avic_unaccelerated_access(svm->vcpu.vcpu_id, offset,
vcpu             4778 arch/x86/kvm/svm.c 		ret = kvm_emulate_instruction(&svm->vcpu, 0);
vcpu             4852 arch/x86/kvm/svm.c static void dump_vmcb(struct kvm_vcpu *vcpu)
vcpu             4854 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             4968 arch/x86/kvm/svm.c static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
vcpu             4970 arch/x86/kvm/svm.c 	struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
vcpu             4976 arch/x86/kvm/svm.c static int handle_exit(struct kvm_vcpu *vcpu)
vcpu             4978 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             4979 arch/x86/kvm/svm.c 	struct kvm_run *kvm_run = vcpu->run;
vcpu             4982 arch/x86/kvm/svm.c 	trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
vcpu             4985 arch/x86/kvm/svm.c 		vcpu->arch.cr0 = svm->vmcb->save.cr0;
vcpu             4987 arch/x86/kvm/svm.c 		vcpu->arch.cr3 = svm->vmcb->save.cr3;
vcpu             4996 arch/x86/kvm/svm.c 	if (is_guest_mode(vcpu)) {
vcpu             5021 arch/x86/kvm/svm.c 		dump_vmcb(vcpu);
vcpu             5036 arch/x86/kvm/svm.c 		vcpu_unimpl(vcpu, "svm: unexpected exit reason 0x%x\n", exit_code);
vcpu             5037 arch/x86/kvm/svm.c 		dump_vmcb(vcpu);
vcpu             5038 arch/x86/kvm/svm.c 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu             5039 arch/x86/kvm/svm.c 		vcpu->run->internal.suberror =
vcpu             5041 arch/x86/kvm/svm.c 		vcpu->run->internal.ndata = 1;
vcpu             5042 arch/x86/kvm/svm.c 		vcpu->run->internal.data[0] = exit_code;
vcpu             5049 arch/x86/kvm/svm.c static void reload_tss(struct kvm_vcpu *vcpu)
vcpu             5061 arch/x86/kvm/svm.c 	int asid = sev_get_asid(svm->vcpu.kvm);
vcpu             5088 arch/x86/kvm/svm.c 	if (sev_guest(svm->vcpu.kvm))
vcpu             5096 arch/x86/kvm/svm.c static void svm_inject_nmi(struct kvm_vcpu *vcpu)
vcpu             5098 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             5101 arch/x86/kvm/svm.c 	vcpu->arch.hflags |= HF_NMI_MASK;
vcpu             5103 arch/x86/kvm/svm.c 	++vcpu->stat.nmi_injections;
vcpu             5119 arch/x86/kvm/svm.c static void svm_set_irq(struct kvm_vcpu *vcpu)
vcpu             5121 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             5125 arch/x86/kvm/svm.c 	trace_kvm_inj_virq(vcpu->arch.interrupt.nr);
vcpu             5126 arch/x86/kvm/svm.c 	++vcpu->stat.irq_injections;
vcpu             5128 arch/x86/kvm/svm.c 	svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
vcpu             5132 arch/x86/kvm/svm.c static inline bool svm_nested_virtualize_tpr(struct kvm_vcpu *vcpu)
vcpu             5134 arch/x86/kvm/svm.c 	return is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK);
vcpu             5137 arch/x86/kvm/svm.c static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
vcpu             5139 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             5141 arch/x86/kvm/svm.c 	if (svm_nested_virtualize_tpr(vcpu) ||
vcpu             5142 arch/x86/kvm/svm.c 	    kvm_vcpu_apicv_active(vcpu))
vcpu             5154 arch/x86/kvm/svm.c static void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
vcpu             5159 arch/x86/kvm/svm.c static bool svm_get_enable_apicv(struct kvm_vcpu *vcpu)
vcpu             5161 arch/x86/kvm/svm.c 	return avic && irqchip_split(vcpu->kvm);
vcpu             5164 arch/x86/kvm/svm.c static void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
vcpu             5168 arch/x86/kvm/svm.c static void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
vcpu             5173 arch/x86/kvm/svm.c static void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
vcpu             5175 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             5178 arch/x86/kvm/svm.c 	if (kvm_vcpu_apicv_active(vcpu))
vcpu             5185 arch/x86/kvm/svm.c static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
vcpu             5190 arch/x86/kvm/svm.c static int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
vcpu             5192 arch/x86/kvm/svm.c 	if (!vcpu->arch.apicv_active)
vcpu             5195 arch/x86/kvm/svm.c 	kvm_lapic_set_irr(vec, vcpu->arch.apic);
vcpu             5198 arch/x86/kvm/svm.c 	if (avic_vcpu_is_running(vcpu)) {
vcpu             5199 arch/x86/kvm/svm.c 		int cpuid = vcpu->cpu;
vcpu             5205 arch/x86/kvm/svm.c 		kvm_vcpu_wake_up(vcpu);
vcpu             5210 arch/x86/kvm/svm.c static bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
vcpu             5243 arch/x86/kvm/svm.c 		struct kvm *kvm = svm->vcpu.kvm;
vcpu             5291 arch/x86/kvm/svm.c 	struct kvm_vcpu *vcpu = NULL;
vcpu             5295 arch/x86/kvm/svm.c 	if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) ||
vcpu             5304 arch/x86/kvm/svm.c 	*svm = to_svm(vcpu);
vcpu             5353 arch/x86/kvm/svm.c 		    kvm_vcpu_apicv_active(&svm->vcpu)) {
vcpu             5360 arch/x86/kvm/svm.c 						     svm->vcpu.vcpu_id);
vcpu             5394 arch/x86/kvm/svm.c 				struct kvm_vcpu *vcpu;
vcpu             5396 arch/x86/kvm/svm.c 				vcpu = kvm_get_vcpu_by_id(kvm, id);
vcpu             5397 arch/x86/kvm/svm.c 				if (vcpu)
vcpu             5398 arch/x86/kvm/svm.c 					svm_ir_list_del(to_svm(vcpu), &pi);
vcpu             5403 arch/x86/kvm/svm.c 			trace_kvm_pi_irte_update(host_irq, svm->vcpu.vcpu_id,
vcpu             5420 arch/x86/kvm/svm.c static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
vcpu             5422 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             5426 arch/x86/kvm/svm.c 	      !(svm->vcpu.arch.hflags & HF_NMI_MASK);
vcpu             5432 arch/x86/kvm/svm.c static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
vcpu             5434 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             5436 arch/x86/kvm/svm.c 	return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
vcpu             5439 arch/x86/kvm/svm.c static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
vcpu             5441 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             5444 arch/x86/kvm/svm.c 		svm->vcpu.arch.hflags |= HF_NMI_MASK;
vcpu             5447 arch/x86/kvm/svm.c 		svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
vcpu             5452 arch/x86/kvm/svm.c static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
vcpu             5454 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             5462 arch/x86/kvm/svm.c 	ret = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF);
vcpu             5464 arch/x86/kvm/svm.c 	if (is_guest_mode(vcpu))
vcpu             5465 arch/x86/kvm/svm.c 		return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);
vcpu             5470 arch/x86/kvm/svm.c static void enable_irq_window(struct kvm_vcpu *vcpu)
vcpu             5472 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             5474 arch/x86/kvm/svm.c 	if (kvm_vcpu_apicv_active(vcpu))
vcpu             5491 arch/x86/kvm/svm.c static void enable_nmi_window(struct kvm_vcpu *vcpu)
vcpu             5493 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             5495 arch/x86/kvm/svm.c 	if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
vcpu             5512 arch/x86/kvm/svm.c 	svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu);
vcpu             5527 arch/x86/kvm/svm.c static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
vcpu             5529 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             5537 arch/x86/kvm/svm.c static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
vcpu             5539 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             5544 arch/x86/kvm/svm.c static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
vcpu             5548 arch/x86/kvm/svm.c static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
vcpu             5550 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             5552 arch/x86/kvm/svm.c 	if (svm_nested_virtualize_tpr(vcpu))
vcpu             5557 arch/x86/kvm/svm.c 		kvm_set_cr8(vcpu, cr8);
vcpu             5561 arch/x86/kvm/svm.c static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
vcpu             5563 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             5566 arch/x86/kvm/svm.c 	if (svm_nested_virtualize_tpr(vcpu) ||
vcpu             5567 arch/x86/kvm/svm.c 	    kvm_vcpu_apicv_active(vcpu))
vcpu             5570 arch/x86/kvm/svm.c 	cr8 = kvm_get_cr8(vcpu);
vcpu             5588 arch/x86/kvm/svm.c 	if ((svm->vcpu.arch.hflags & HF_IRET_MASK)
vcpu             5589 arch/x86/kvm/svm.c 	    && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) {
vcpu             5590 arch/x86/kvm/svm.c 		svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
vcpu             5591 arch/x86/kvm/svm.c 		kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
vcpu             5594 arch/x86/kvm/svm.c 	svm->vcpu.arch.nmi_injected = false;
vcpu             5595 arch/x86/kvm/svm.c 	kvm_clear_exception_queue(&svm->vcpu);
vcpu             5596 arch/x86/kvm/svm.c 	kvm_clear_interrupt_queue(&svm->vcpu);
vcpu             5601 arch/x86/kvm/svm.c 	kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
vcpu             5608 arch/x86/kvm/svm.c 		svm->vcpu.arch.nmi_injected = true;
vcpu             5618 arch/x86/kvm/svm.c 			    kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
vcpu             5619 arch/x86/kvm/svm.c 				kvm_rip_write(&svm->vcpu,
vcpu             5620 arch/x86/kvm/svm.c 					      kvm_rip_read(&svm->vcpu) -
vcpu             5626 arch/x86/kvm/svm.c 			kvm_requeue_exception_e(&svm->vcpu, vector, err);
vcpu             5629 arch/x86/kvm/svm.c 			kvm_requeue_exception(&svm->vcpu, vector);
vcpu             5632 arch/x86/kvm/svm.c 		kvm_queue_interrupt(&svm->vcpu, vector, false);
vcpu             5639 arch/x86/kvm/svm.c static void svm_cancel_injection(struct kvm_vcpu *vcpu)
vcpu             5641 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             5650 arch/x86/kvm/svm.c static void svm_vcpu_run(struct kvm_vcpu *vcpu)
vcpu             5652 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             5654 arch/x86/kvm/svm.c 	svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
vcpu             5655 arch/x86/kvm/svm.c 	svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
vcpu             5656 arch/x86/kvm/svm.c 	svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
vcpu             5678 arch/x86/kvm/svm.c 		smp_send_reschedule(vcpu->cpu);
vcpu             5683 arch/x86/kvm/svm.c 	sync_lapic_to_cr8(vcpu);
vcpu             5685 arch/x86/kvm/svm.c 	svm->vmcb->save.cr2 = vcpu->arch.cr2;
vcpu             5688 arch/x86/kvm/svm.c 	kvm_load_guest_xcr0(vcpu);
vcpu             5690 arch/x86/kvm/svm.c 	if (lapic_in_kernel(vcpu) &&
vcpu             5691 arch/x86/kvm/svm.c 		vcpu->arch.apic->lapic_timer.timer_advance_ns)
vcpu             5692 arch/x86/kvm/svm.c 		kvm_wait_lapic_expire(vcpu);
vcpu             5769 arch/x86/kvm/svm.c 		  [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
vcpu             5770 arch/x86/kvm/svm.c 		  [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
vcpu             5771 arch/x86/kvm/svm.c 		  [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
vcpu             5772 arch/x86/kvm/svm.c 		  [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
vcpu             5773 arch/x86/kvm/svm.c 		  [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
vcpu             5774 arch/x86/kvm/svm.c 		  [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
vcpu             5776 arch/x86/kvm/svm.c 		  , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
vcpu             5777 arch/x86/kvm/svm.c 		  [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
vcpu             5778 arch/x86/kvm/svm.c 		  [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
vcpu             5779 arch/x86/kvm/svm.c 		  [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
vcpu             5780 arch/x86/kvm/svm.c 		  [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
vcpu             5781 arch/x86/kvm/svm.c 		  [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
vcpu             5782 arch/x86/kvm/svm.c 		  [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
vcpu             5783 arch/x86/kvm/svm.c 		  [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
vcpu             5821 arch/x86/kvm/svm.c 	if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
vcpu             5824 arch/x86/kvm/svm.c 	reload_tss(vcpu);
vcpu             5830 arch/x86/kvm/svm.c 	vcpu->arch.cr2 = svm->vmcb->save.cr2;
vcpu             5831 arch/x86/kvm/svm.c 	vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
vcpu             5832 arch/x86/kvm/svm.c 	vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
vcpu             5833 arch/x86/kvm/svm.c 	vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
vcpu             5836 arch/x86/kvm/svm.c 		kvm_before_interrupt(&svm->vcpu);
vcpu             5838 arch/x86/kvm/svm.c 	kvm_put_guest_xcr0(vcpu);
vcpu             5844 arch/x86/kvm/svm.c 		kvm_after_interrupt(&svm->vcpu);
vcpu             5846 arch/x86/kvm/svm.c 	sync_cr8_to_lapic(vcpu);
vcpu             5854 arch/x86/kvm/svm.c 		svm->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
vcpu             5857 arch/x86/kvm/svm.c 		vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
vcpu             5858 arch/x86/kvm/svm.c 		vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
vcpu             5873 arch/x86/kvm/svm.c static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
vcpu             5875 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             5881 arch/x86/kvm/svm.c static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
vcpu             5883 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             5889 arch/x86/kvm/svm.c 	svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
vcpu             5905 arch/x86/kvm/svm.c svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
vcpu             5938 arch/x86/kvm/svm.c static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
vcpu             5943 arch/x86/kvm/svm.c static void svm_cpuid_update(struct kvm_vcpu *vcpu)
vcpu             5945 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             5948 arch/x86/kvm/svm.c 	svm->nrips_enabled = !!guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS);
vcpu             5950 arch/x86/kvm/svm.c 	if (!kvm_vcpu_apicv_active(vcpu))
vcpu             5953 arch/x86/kvm/svm.c 	guest_cpuid_clear(vcpu, X86_FEATURE_X2APIC);
vcpu             6109 arch/x86/kvm/svm.c static int svm_check_intercept(struct kvm_vcpu *vcpu,
vcpu             6113 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             6147 arch/x86/kvm/svm.c 		cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
vcpu             6230 arch/x86/kvm/svm.c static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
vcpu             6235 arch/x86/kvm/svm.c static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
vcpu             6238 arch/x86/kvm/svm.c 		shrink_ple_window(vcpu);
vcpu             6241 arch/x86/kvm/svm.c static inline void avic_post_state_restore(struct kvm_vcpu *vcpu)
vcpu             6243 arch/x86/kvm/svm.c 	if (avic_handle_apic_id_update(vcpu) != 0)
vcpu             6245 arch/x86/kvm/svm.c 	avic_handle_dfr_update(vcpu);
vcpu             6246 arch/x86/kvm/svm.c 	avic_handle_ldr_update(vcpu);
vcpu             6249 arch/x86/kvm/svm.c static void svm_setup_mce(struct kvm_vcpu *vcpu)
vcpu             6252 arch/x86/kvm/svm.c 	vcpu->arch.mcg_cap &= 0x1ff;
vcpu             6255 arch/x86/kvm/svm.c static int svm_smi_allowed(struct kvm_vcpu *vcpu)
vcpu             6257 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             6263 arch/x86/kvm/svm.c 	if (is_guest_mode(&svm->vcpu) &&
vcpu             6274 arch/x86/kvm/svm.c static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
vcpu             6276 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             6279 arch/x86/kvm/svm.c 	if (is_guest_mode(vcpu)) {
vcpu             6285 arch/x86/kvm/svm.c 		svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
vcpu             6286 arch/x86/kvm/svm.c 		svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
vcpu             6287 arch/x86/kvm/svm.c 		svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
vcpu             6296 arch/x86/kvm/svm.c static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
vcpu             6298 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             6308 arch/x86/kvm/svm.c 		if (kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb), &map) == -EINVAL)
vcpu             6316 arch/x86/kvm/svm.c static int enable_smi_window(struct kvm_vcpu *vcpu)
vcpu             6318 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu             7164 arch/x86/kvm/svm.c static bool svm_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
vcpu             7166 arch/x86/kvm/svm.c 	unsigned long cr4 = kvm_read_cr4(vcpu);
vcpu             7169 arch/x86/kvm/svm.c 	bool is_user = svm_get_cpl(vcpu) == 3;
vcpu             7212 arch/x86/kvm/svm.c 		if (!sev_guest(vcpu->kvm))
vcpu             7216 arch/x86/kvm/svm.c 		kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
vcpu             7222 arch/x86/kvm/svm.c static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
vcpu             7224 arch/x86/kvm/svm.c 	struct vcpu_svm *svm = to_svm(vcpu);
vcpu              226 arch/x86/kvm/trace.h 	TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu, u32 isa),
vcpu              227 arch/x86/kvm/trace.h 	TP_ARGS(exit_reason, vcpu, isa),
vcpu              240 arch/x86/kvm/trace.h 		__entry->guest_rip	= kvm_rip_read(vcpu);
vcpu              242 arch/x86/kvm/trace.h 		__entry->vcpu_id        = vcpu->vcpu_id;
vcpu              243 arch/x86/kvm/trace.h 		kvm_x86_ops->get_exit_info(vcpu, &__entry->info1,
vcpu              477 arch/x86/kvm/trace.h 		__entry->apicid		= apic->vcpu->vcpu_id;
vcpu              494 arch/x86/kvm/trace.h 		__entry->apicid		= apic->vcpu->vcpu_id;
vcpu              734 arch/x86/kvm/trace.h 	TP_PROTO(struct kvm_vcpu *vcpu, __u8 failed),
vcpu              735 arch/x86/kvm/trace.h 	TP_ARGS(vcpu, failed),
vcpu              747 arch/x86/kvm/trace.h 		__entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS);
vcpu              748 arch/x86/kvm/trace.h 		__entry->len = vcpu->arch.emulate_ctxt.fetch.ptr
vcpu              749 arch/x86/kvm/trace.h 			       - vcpu->arch.emulate_ctxt.fetch.data;
vcpu              750 arch/x86/kvm/trace.h 		__entry->rip = vcpu->arch.emulate_ctxt._eip - __entry->len;
vcpu              752 arch/x86/kvm/trace.h 		       vcpu->arch.emulate_ctxt.fetch.data,
vcpu              754 arch/x86/kvm/trace.h 		__entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt.mode);
vcpu              767 arch/x86/kvm/trace.h #define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0)
vcpu              768 arch/x86/kvm/trace.h #define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1)
vcpu             1298 arch/x86/kvm/trace.h 	    TP_PROTO(u32 vcpu, u32 icrh, u32 icrl, u32 id, u32 index),
vcpu             1299 arch/x86/kvm/trace.h 	    TP_ARGS(vcpu, icrh, icrl, id, index),
vcpu             1302 arch/x86/kvm/trace.h 		__field(u32, vcpu)
vcpu             1310 arch/x86/kvm/trace.h 		__entry->vcpu = vcpu;
vcpu             1318 arch/x86/kvm/trace.h 		  __entry->vcpu, __entry->icrh, __entry->icrl,
vcpu             1323 arch/x86/kvm/trace.h 	    TP_PROTO(u32 vcpu, u32 offset, bool ft, bool rw, u32 vec),
vcpu             1324 arch/x86/kvm/trace.h 	    TP_ARGS(vcpu, offset, ft, rw, vec),
vcpu             1327 arch/x86/kvm/trace.h 		__field(u32, vcpu)
vcpu             1335 arch/x86/kvm/trace.h 		__entry->vcpu = vcpu;
vcpu             1343 arch/x86/kvm/trace.h 		  __entry->vcpu,
vcpu              115 arch/x86/kvm/vmx/capabilities.h static inline bool cpu_need_tpr_shadow(struct kvm_vcpu *vcpu)
vcpu              117 arch/x86/kvm/vmx/capabilities.h 	return cpu_has_vmx_tpr_shadow() && lapic_in_kernel(vcpu);
vcpu              317 arch/x86/kvm/vmx/evmcs.c bool nested_enlightened_vmentry(struct kvm_vcpu *vcpu, u64 *evmcs_gpa)
vcpu              323 arch/x86/kvm/vmx/evmcs.c 	if (unlikely(!kvm_hv_get_assist_page(vcpu, &assist_page)))
vcpu              334 arch/x86/kvm/vmx/evmcs.c uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu)
vcpu              336 arch/x86/kvm/vmx/evmcs.c        struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu              349 arch/x86/kvm/vmx/evmcs.c int nested_enable_evmcs(struct kvm_vcpu *vcpu,
vcpu              352 arch/x86/kvm/vmx/evmcs.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu              358 arch/x86/kvm/vmx/evmcs.c 		*vmcs_version = nested_get_evmcs_version(vcpu);
vcpu              200 arch/x86/kvm/vmx/evmcs.h bool nested_enlightened_vmentry(struct kvm_vcpu *vcpu, u64 *evmcs_gpa);
vcpu              201 arch/x86/kvm/vmx/evmcs.h uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu);
vcpu              202 arch/x86/kvm/vmx/evmcs.h int nested_enable_evmcs(struct kvm_vcpu *vcpu,
vcpu              153 arch/x86/kvm/vmx/nested.c static int nested_vmx_succeed(struct kvm_vcpu *vcpu)
vcpu              155 arch/x86/kvm/vmx/nested.c 	vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
vcpu              158 arch/x86/kvm/vmx/nested.c 	return kvm_skip_emulated_instruction(vcpu);
vcpu              161 arch/x86/kvm/vmx/nested.c static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
vcpu              163 arch/x86/kvm/vmx/nested.c 	vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
vcpu              167 arch/x86/kvm/vmx/nested.c 	return kvm_skip_emulated_instruction(vcpu);
vcpu              170 arch/x86/kvm/vmx/nested.c static int nested_vmx_failValid(struct kvm_vcpu *vcpu,
vcpu              173 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu              180 arch/x86/kvm/vmx/nested.c 		return nested_vmx_failInvalid(vcpu);
vcpu              182 arch/x86/kvm/vmx/nested.c 	vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
vcpu              186 arch/x86/kvm/vmx/nested.c 	get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
vcpu              191 arch/x86/kvm/vmx/nested.c 	return kvm_skip_emulated_instruction(vcpu);
vcpu              194 arch/x86/kvm/vmx/nested.c static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
vcpu              197 arch/x86/kvm/vmx/nested.c 	kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
vcpu              218 arch/x86/kvm/vmx/nested.c static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
vcpu              220 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu              225 arch/x86/kvm/vmx/nested.c 	kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true);
vcpu              234 arch/x86/kvm/vmx/nested.c static void free_nested(struct kvm_vcpu *vcpu)
vcpu              236 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu              241 arch/x86/kvm/vmx/nested.c 	kvm_clear_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);
vcpu              263 arch/x86/kvm/vmx/nested.c 	kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
vcpu              264 arch/x86/kvm/vmx/nested.c 	kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
vcpu              267 arch/x86/kvm/vmx/nested.c 	kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
vcpu              269 arch/x86/kvm/vmx/nested.c 	nested_release_evmcs(vcpu);
vcpu              293 arch/x86/kvm/vmx/nested.c static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
vcpu              295 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu              305 arch/x86/kvm/vmx/nested.c 	vmx_vcpu_load_vmcs(vcpu, cpu, prev);
vcpu              316 arch/x86/kvm/vmx/nested.c void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu)
vcpu              318 arch/x86/kvm/vmx/nested.c 	vcpu_load(vcpu);
vcpu              319 arch/x86/kvm/vmx/nested.c 	vmx_leave_nested(vcpu);
vcpu              320 arch/x86/kvm/vmx/nested.c 	vmx_switch_vmcs(vcpu, &to_vmx(vcpu)->vmcs01);
vcpu              321 arch/x86/kvm/vmx/nested.c 	free_nested(vcpu);
vcpu              322 arch/x86/kvm/vmx/nested.c 	vcpu_put(vcpu);
vcpu              325 arch/x86/kvm/vmx/nested.c static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
vcpu              328 arch/x86/kvm/vmx/nested.c 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
vcpu              329 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu              331 arch/x86/kvm/vmx/nested.c 	unsigned long exit_qualification = vcpu->arch.exit_qualification;
vcpu              342 arch/x86/kvm/vmx/nested.c 	nested_vmx_vmexit(vcpu, exit_reason, 0, exit_qualification);
vcpu              346 arch/x86/kvm/vmx/nested.c static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
vcpu              348 arch/x86/kvm/vmx/nested.c 	WARN_ON(mmu_is_nested(vcpu));
vcpu              350 arch/x86/kvm/vmx/nested.c 	vcpu->arch.mmu = &vcpu->arch.guest_mmu;
vcpu              351 arch/x86/kvm/vmx/nested.c 	kvm_init_shadow_ept_mmu(vcpu,
vcpu              352 arch/x86/kvm/vmx/nested.c 			to_vmx(vcpu)->nested.msrs.ept_caps &
vcpu              354 arch/x86/kvm/vmx/nested.c 			nested_ept_ad_enabled(vcpu),
vcpu              355 arch/x86/kvm/vmx/nested.c 			nested_ept_get_cr3(vcpu));
vcpu              356 arch/x86/kvm/vmx/nested.c 	vcpu->arch.mmu->set_cr3           = vmx_set_cr3;
vcpu              357 arch/x86/kvm/vmx/nested.c 	vcpu->arch.mmu->get_cr3           = nested_ept_get_cr3;
vcpu              358 arch/x86/kvm/vmx/nested.c 	vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault;
vcpu              359 arch/x86/kvm/vmx/nested.c 	vcpu->arch.mmu->get_pdptr         = kvm_pdptr_read;
vcpu              361 arch/x86/kvm/vmx/nested.c 	vcpu->arch.walk_mmu              = &vcpu->arch.nested_mmu;
vcpu              364 arch/x86/kvm/vmx/nested.c static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
vcpu              366 arch/x86/kvm/vmx/nested.c 	vcpu->arch.mmu = &vcpu->arch.root_mmu;
vcpu              367 arch/x86/kvm/vmx/nested.c 	vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
vcpu              387 arch/x86/kvm/vmx/nested.c static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual)
vcpu              389 arch/x86/kvm/vmx/nested.c 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
vcpu              390 arch/x86/kvm/vmx/nested.c 	unsigned int nr = vcpu->arch.exception.nr;
vcpu              391 arch/x86/kvm/vmx/nested.c 	bool has_payload = vcpu->arch.exception.has_payload;
vcpu              392 arch/x86/kvm/vmx/nested.c 	unsigned long payload = vcpu->arch.exception.payload;
vcpu              395 arch/x86/kvm/vmx/nested.c 		if (vcpu->arch.exception.nested_apf) {
vcpu              396 arch/x86/kvm/vmx/nested.c 			*exit_qual = vcpu->arch.apf.nested_apf_token;
vcpu              400 arch/x86/kvm/vmx/nested.c 						    vcpu->arch.exception.error_code)) {
vcpu              401 arch/x86/kvm/vmx/nested.c 			*exit_qual = has_payload ? payload : vcpu->arch.cr2;
vcpu              407 arch/x86/kvm/vmx/nested.c 				payload = vcpu->arch.dr6;
vcpu              421 arch/x86/kvm/vmx/nested.c static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
vcpu              424 arch/x86/kvm/vmx/nested.c 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
vcpu              426 arch/x86/kvm/vmx/nested.c 	WARN_ON(!is_guest_mode(vcpu));
vcpu              429 arch/x86/kvm/vmx/nested.c 		!to_vmx(vcpu)->nested.nested_run_pending) {
vcpu              431 arch/x86/kvm/vmx/nested.c 		nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
vcpu              436 arch/x86/kvm/vmx/nested.c 		kvm_inject_page_fault(vcpu, fault);
vcpu              440 arch/x86/kvm/vmx/nested.c static bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
vcpu              442 arch/x86/kvm/vmx/nested.c 	return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu));
vcpu              445 arch/x86/kvm/vmx/nested.c static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu,
vcpu              451 arch/x86/kvm/vmx/nested.c 	if (CC(!page_address_valid(vcpu, vmcs12->io_bitmap_a)) ||
vcpu              452 arch/x86/kvm/vmx/nested.c 	    CC(!page_address_valid(vcpu, vmcs12->io_bitmap_b)))
vcpu              458 arch/x86/kvm/vmx/nested.c static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu,
vcpu              464 arch/x86/kvm/vmx/nested.c 	if (CC(!page_address_valid(vcpu, vmcs12->msr_bitmap)))
vcpu              470 arch/x86/kvm/vmx/nested.c static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu,
vcpu              476 arch/x86/kvm/vmx/nested.c 	if (CC(!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr)))
vcpu              485 arch/x86/kvm/vmx/nested.c static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr)
vcpu              493 arch/x86/kvm/vmx/nested.c 	msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap;
vcpu              561 arch/x86/kvm/vmx/nested.c static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
vcpu              566 arch/x86/kvm/vmx/nested.c 	unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap;
vcpu              567 arch/x86/kvm/vmx/nested.c 	struct kvm_host_map *map = &to_vmx(vcpu)->nested.msr_bitmap_map;
vcpu              574 arch/x86/kvm/vmx/nested.c 	if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->msr_bitmap), map))
vcpu              641 arch/x86/kvm/vmx/nested.c 	if (!msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL))
vcpu              647 arch/x86/kvm/vmx/nested.c 	if (!msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD))
vcpu              653 arch/x86/kvm/vmx/nested.c 	kvm_vcpu_unmap(vcpu, &to_vmx(vcpu)->nested.msr_bitmap_map, false);
vcpu              658 arch/x86/kvm/vmx/nested.c static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu,
vcpu              668 arch/x86/kvm/vmx/nested.c 	shadow = get_shadow_vmcs12(vcpu);
vcpu              670 arch/x86/kvm/vmx/nested.c 	if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map))
vcpu              674 arch/x86/kvm/vmx/nested.c 	kvm_vcpu_unmap(vcpu, &map, false);
vcpu              677 arch/x86/kvm/vmx/nested.c static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu,
vcpu              680 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu              686 arch/x86/kvm/vmx/nested.c 	kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer,
vcpu              687 arch/x86/kvm/vmx/nested.c 			get_shadow_vmcs12(vcpu), VMCS12_SIZE);
vcpu              694 arch/x86/kvm/vmx/nested.c static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu)
vcpu              696 arch/x86/kvm/vmx/nested.c 	return get_vmcs12(vcpu)->vm_exit_controls &
vcpu              700 arch/x86/kvm/vmx/nested.c static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu)
vcpu              702 arch/x86/kvm/vmx/nested.c 	return nested_cpu_has_nmi_exiting(get_vmcs12(vcpu));
vcpu              705 arch/x86/kvm/vmx/nested.c static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu,
vcpu              709 arch/x86/kvm/vmx/nested.c 	    CC(!page_address_valid(vcpu, vmcs12->apic_access_addr)))
vcpu              715 arch/x86/kvm/vmx/nested.c static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
vcpu              736 arch/x86/kvm/vmx/nested.c 	if (CC(nested_cpu_has_vid(vmcs12) && !nested_exit_on_intr(vcpu)))
vcpu              748 arch/x86/kvm/vmx/nested.c 	    CC(!nested_exit_intr_ack_set(vcpu)) ||
vcpu              751 arch/x86/kvm/vmx/nested.c 	    CC((vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu)))))
vcpu              761 arch/x86/kvm/vmx/nested.c static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
vcpu              768 arch/x86/kvm/vmx/nested.c 	maxphyaddr = cpuid_maxphyaddr(vcpu);
vcpu              776 arch/x86/kvm/vmx/nested.c static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu,
vcpu              779 arch/x86/kvm/vmx/nested.c 	if (CC(nested_vmx_check_msr_switch(vcpu,
vcpu              782 arch/x86/kvm/vmx/nested.c 	    CC(nested_vmx_check_msr_switch(vcpu,
vcpu              790 arch/x86/kvm/vmx/nested.c static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu *vcpu,
vcpu              793 arch/x86/kvm/vmx/nested.c 	if (CC(nested_vmx_check_msr_switch(vcpu,
vcpu              801 arch/x86/kvm/vmx/nested.c static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu,
vcpu              808 arch/x86/kvm/vmx/nested.c 	    CC(!page_address_valid(vcpu, vmcs12->pml_address)))
vcpu              814 arch/x86/kvm/vmx/nested.c static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu,
vcpu              823 arch/x86/kvm/vmx/nested.c static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu *vcpu,
vcpu              832 arch/x86/kvm/vmx/nested.c static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu,
vcpu              838 arch/x86/kvm/vmx/nested.c 	if (CC(!page_address_valid(vcpu, vmcs12->vmread_bitmap)) ||
vcpu              839 arch/x86/kvm/vmx/nested.c 	    CC(!page_address_valid(vcpu, vmcs12->vmwrite_bitmap)))
vcpu              845 arch/x86/kvm/vmx/nested.c static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu,
vcpu              849 arch/x86/kvm/vmx/nested.c 	if (CC(vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8))
vcpu              859 arch/x86/kvm/vmx/nested.c static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu,
vcpu              865 arch/x86/kvm/vmx/nested.c 	    nested_vmx_msr_check_common(vcpu, e))
vcpu              870 arch/x86/kvm/vmx/nested.c static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu,
vcpu              874 arch/x86/kvm/vmx/nested.c 	    nested_vmx_msr_check_common(vcpu, e))
vcpu              879 arch/x86/kvm/vmx/nested.c static u32 nested_vmx_max_atomic_switch_msrs(struct kvm_vcpu *vcpu)
vcpu              881 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu              897 arch/x86/kvm/vmx/nested.c static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
vcpu              901 arch/x86/kvm/vmx/nested.c 	u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu);
vcpu              907 arch/x86/kvm/vmx/nested.c 		if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e),
vcpu              914 arch/x86/kvm/vmx/nested.c 		if (nested_vmx_load_msr_check(vcpu, &e)) {
vcpu              920 arch/x86/kvm/vmx/nested.c 		if (kvm_set_msr(vcpu, e.index, e.value)) {
vcpu              932 arch/x86/kvm/vmx/nested.c static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
vcpu              937 arch/x86/kvm/vmx/nested.c 	u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu);
vcpu              943 arch/x86/kvm/vmx/nested.c 		if (kvm_vcpu_read_guest(vcpu,
vcpu              951 arch/x86/kvm/vmx/nested.c 		if (nested_vmx_store_msr_check(vcpu, &e)) {
vcpu              957 arch/x86/kvm/vmx/nested.c 		if (kvm_get_msr(vcpu, e.index, &data)) {
vcpu              963 arch/x86/kvm/vmx/nested.c 		if (kvm_vcpu_write_guest(vcpu,
vcpu              976 arch/x86/kvm/vmx/nested.c static bool nested_cr3_valid(struct kvm_vcpu *vcpu, unsigned long val)
vcpu              980 arch/x86/kvm/vmx/nested.c 	invalid_mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
vcpu              990 arch/x86/kvm/vmx/nested.c static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept,
vcpu              993 arch/x86/kvm/vmx/nested.c 	if (cr3 != kvm_read_cr3(vcpu) || (!nested_ept && pdptrs_changed(vcpu))) {
vcpu              994 arch/x86/kvm/vmx/nested.c 		if (CC(!nested_cr3_valid(vcpu, cr3))) {
vcpu             1003 arch/x86/kvm/vmx/nested.c 		if (is_pae_paging(vcpu) && !nested_ept) {
vcpu             1004 arch/x86/kvm/vmx/nested.c 			if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))) {
vcpu             1012 arch/x86/kvm/vmx/nested.c 		kvm_mmu_new_cr3(vcpu, cr3, false);
vcpu             1014 arch/x86/kvm/vmx/nested.c 	vcpu->arch.cr3 = cr3;
vcpu             1015 arch/x86/kvm/vmx/nested.c 	__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
vcpu             1017 arch/x86/kvm/vmx/nested.c 	kvm_init_mmu(vcpu, false);
vcpu             1033 arch/x86/kvm/vmx/nested.c static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu)
vcpu             1035 arch/x86/kvm/vmx/nested.c 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
vcpu             1038 arch/x86/kvm/vmx/nested.c 	       (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02);
vcpu             1041 arch/x86/kvm/vmx/nested.c static u16 nested_get_vpid02(struct kvm_vcpu *vcpu)
vcpu             1043 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             1216 arch/x86/kvm/vmx/nested.c int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
vcpu             1218 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             1369 arch/x86/kvm/vmx/nested.c 	struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu);
vcpu             1404 arch/x86/kvm/vmx/nested.c 	struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu);
vcpu             1818 arch/x86/kvm/vmx/nested.c static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu,
vcpu             1821 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             1828 arch/x86/kvm/vmx/nested.c 	if (!nested_enlightened_vmentry(vcpu, &evmcs_gpa))
vcpu             1836 arch/x86/kvm/vmx/nested.c 		nested_release_evmcs(vcpu);
vcpu             1838 arch/x86/kvm/vmx/nested.c 		if (kvm_vcpu_map(vcpu, gpa_to_gfn(evmcs_gpa),
vcpu             1868 arch/x86/kvm/vmx/nested.c 			nested_release_evmcs(vcpu);
vcpu             1883 arch/x86/kvm/vmx/nested.c 			struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
vcpu             1901 arch/x86/kvm/vmx/nested.c void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu)
vcpu             1903 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             1911 arch/x86/kvm/vmx/nested.c 		nested_vmx_handle_enlightened_vmptrld(vcpu, false);
vcpu             1931 arch/x86/kvm/vmx/nested.c 	kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu);
vcpu             1932 arch/x86/kvm/vmx/nested.c 	kvm_vcpu_kick(&vmx->vcpu);
vcpu             1937 arch/x86/kvm/vmx/nested.c static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
vcpu             1939 arch/x86/kvm/vmx/nested.c 	u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value;
vcpu             1940 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             1951 arch/x86/kvm/vmx/nested.c 	if (vcpu->arch.virtual_tsc_khz == 0)
vcpu             1956 arch/x86/kvm/vmx/nested.c 	do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz);
vcpu             1967 arch/x86/kvm/vmx/nested.c 		return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME);
vcpu             1969 arch/x86/kvm/vmx/nested.c 		return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME);
vcpu             1990 arch/x86/kvm/vmx/nested.c 		vmcs_write64(EPT_POINTER, construct_eptp(&vmx->vcpu, 0));
vcpu             2306 arch/x86/kvm/vmx/nested.c static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vcpu             2309 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             2324 arch/x86/kvm/vmx/nested.c 		kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
vcpu             2327 arch/x86/kvm/vmx/nested.c 		kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
vcpu             2333 arch/x86/kvm/vmx/nested.c 	vmx_set_rflags(vcpu, vmcs12->guest_rflags);
vcpu             2339 arch/x86/kvm/vmx/nested.c 	update_exception_bitmap(vcpu);
vcpu             2340 arch/x86/kvm/vmx/nested.c 	vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
vcpu             2341 arch/x86/kvm/vmx/nested.c 	vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
vcpu             2346 arch/x86/kvm/vmx/nested.c 		vcpu->arch.pat = vmcs12->guest_ia32_pat;
vcpu             2348 arch/x86/kvm/vmx/nested.c 		vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
vcpu             2351 arch/x86/kvm/vmx/nested.c 	vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
vcpu             2365 arch/x86/kvm/vmx/nested.c 		if (nested_cpu_has_vpid(vmcs12) && nested_has_guest_tlb_tag(vcpu)) {
vcpu             2368 arch/x86/kvm/vmx/nested.c 				__vmx_flush_tlb(vcpu, nested_get_vpid02(vcpu), false);
vcpu             2379 arch/x86/kvm/vmx/nested.c 			kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
vcpu             2384 arch/x86/kvm/vmx/nested.c 		nested_ept_init_mmu_context(vcpu);
vcpu             2387 arch/x86/kvm/vmx/nested.c 		vmx_flush_tlb(vcpu, true);
vcpu             2397 arch/x86/kvm/vmx/nested.c 	vmx_set_cr0(vcpu, vmcs12->guest_cr0);
vcpu             2400 arch/x86/kvm/vmx/nested.c 	vmx_set_cr4(vcpu, vmcs12->guest_cr4);
vcpu             2403 arch/x86/kvm/vmx/nested.c 	vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12);
vcpu             2405 arch/x86/kvm/vmx/nested.c 	vmx_set_efer(vcpu, vcpu->arch.efer);
vcpu             2418 arch/x86/kvm/vmx/nested.c 	if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
vcpu             2434 arch/x86/kvm/vmx/nested.c 	    is_pae_paging(vcpu)) {
vcpu             2442 arch/x86/kvm/vmx/nested.c 		vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
vcpu             2444 arch/x86/kvm/vmx/nested.c 	kvm_rsp_write(vcpu, vmcs12->guest_rsp);
vcpu             2445 arch/x86/kvm/vmx/nested.c 	kvm_rip_write(vcpu, vmcs12->guest_rip);
vcpu             2462 arch/x86/kvm/vmx/nested.c static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
vcpu             2464 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             2465 arch/x86/kvm/vmx/nested.c 	int maxphyaddr = cpuid_maxphyaddr(vcpu);
vcpu             2501 arch/x86/kvm/vmx/nested.c static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu,
vcpu             2504 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             2520 arch/x86/kvm/vmx/nested.c 	if (CC(vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu)) ||
vcpu             2521 arch/x86/kvm/vmx/nested.c 	    nested_vmx_check_io_bitmap_controls(vcpu, vmcs12) ||
vcpu             2522 arch/x86/kvm/vmx/nested.c 	    nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12) ||
vcpu             2523 arch/x86/kvm/vmx/nested.c 	    nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12) ||
vcpu             2524 arch/x86/kvm/vmx/nested.c 	    nested_vmx_check_apic_access_controls(vcpu, vmcs12) ||
vcpu             2525 arch/x86/kvm/vmx/nested.c 	    nested_vmx_check_apicv_controls(vcpu, vmcs12) ||
vcpu             2527 arch/x86/kvm/vmx/nested.c 	    nested_vmx_check_pml_controls(vcpu, vmcs12) ||
vcpu             2528 arch/x86/kvm/vmx/nested.c 	    nested_vmx_check_unrestricted_guest_controls(vcpu, vmcs12) ||
vcpu             2529 arch/x86/kvm/vmx/nested.c 	    nested_vmx_check_mode_based_ept_exec_controls(vcpu, vmcs12) ||
vcpu             2530 arch/x86/kvm/vmx/nested.c 	    nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12) ||
vcpu             2539 arch/x86/kvm/vmx/nested.c 	    CC(!valid_ept_address(vcpu, vmcs12->ept_pointer)))
vcpu             2549 arch/x86/kvm/vmx/nested.c 			    CC(!page_address_valid(vcpu, vmcs12->eptp_list_address)))
vcpu             2560 arch/x86/kvm/vmx/nested.c static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu,
vcpu             2563 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             2568 arch/x86/kvm/vmx/nested.c 	    CC(nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12)))
vcpu             2577 arch/x86/kvm/vmx/nested.c static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
vcpu             2580 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             2606 arch/x86/kvm/vmx/nested.c 		       !nested_cpu_supports_monitor_trap_flag(vcpu)))
vcpu             2638 arch/x86/kvm/vmx/nested.c 			    CC(!nested_cpu_has_zero_length_injection(vcpu))))
vcpu             2643 arch/x86/kvm/vmx/nested.c 	if (nested_vmx_check_entry_msr_switch_controls(vcpu, vmcs12))
vcpu             2649 arch/x86/kvm/vmx/nested.c static int nested_vmx_check_controls(struct kvm_vcpu *vcpu,
vcpu             2652 arch/x86/kvm/vmx/nested.c 	if (nested_check_vm_execution_controls(vcpu, vmcs12) ||
vcpu             2653 arch/x86/kvm/vmx/nested.c 	    nested_check_vm_exit_controls(vcpu, vmcs12) ||
vcpu             2654 arch/x86/kvm/vmx/nested.c 	    nested_check_vm_entry_controls(vcpu, vmcs12))
vcpu             2660 arch/x86/kvm/vmx/nested.c static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
vcpu             2665 arch/x86/kvm/vmx/nested.c 	if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) ||
vcpu             2666 arch/x86/kvm/vmx/nested.c 	    CC(!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) ||
vcpu             2667 arch/x86/kvm/vmx/nested.c 	    CC(!nested_cr3_valid(vcpu, vmcs12->host_cr3)))
vcpu             2670 arch/x86/kvm/vmx/nested.c 	if (CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu)) ||
vcpu             2671 arch/x86/kvm/vmx/nested.c 	    CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu)))
vcpu             2679 arch/x86/kvm/vmx/nested.c 	ia32e = !!(vcpu->arch.efer & EFER_LMA);
vcpu             2709 arch/x86/kvm/vmx/nested.c 	if (CC(is_noncanonical_address(vmcs12->host_fs_base, vcpu)) ||
vcpu             2710 arch/x86/kvm/vmx/nested.c 	    CC(is_noncanonical_address(vmcs12->host_gs_base, vcpu)) ||
vcpu             2711 arch/x86/kvm/vmx/nested.c 	    CC(is_noncanonical_address(vmcs12->host_gdtr_base, vcpu)) ||
vcpu             2712 arch/x86/kvm/vmx/nested.c 	    CC(is_noncanonical_address(vmcs12->host_idtr_base, vcpu)) ||
vcpu             2713 arch/x86/kvm/vmx/nested.c 	    CC(is_noncanonical_address(vmcs12->host_tr_base, vcpu)) ||
vcpu             2714 arch/x86/kvm/vmx/nested.c 	    CC(is_noncanonical_address(vmcs12->host_rip, vcpu)))
vcpu             2725 arch/x86/kvm/vmx/nested.c 		if (CC(!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer)) ||
vcpu             2734 arch/x86/kvm/vmx/nested.c static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
vcpu             2744 arch/x86/kvm/vmx/nested.c 	if (CC(!page_address_valid(vcpu, vmcs12->vmcs_link_pointer)))
vcpu             2747 arch/x86/kvm/vmx/nested.c 	if (CC(kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map)))
vcpu             2756 arch/x86/kvm/vmx/nested.c 	kvm_vcpu_unmap(vcpu, &map, false);
vcpu             2772 arch/x86/kvm/vmx/nested.c static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
vcpu             2780 arch/x86/kvm/vmx/nested.c 	if (CC(!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0)) ||
vcpu             2781 arch/x86/kvm/vmx/nested.c 	    CC(!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)))
vcpu             2788 arch/x86/kvm/vmx/nested.c 	if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) {
vcpu             2802 arch/x86/kvm/vmx/nested.c 	if (to_vmx(vcpu)->nested.nested_run_pending &&
vcpu             2805 arch/x86/kvm/vmx/nested.c 		if (CC(!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer)) ||
vcpu             2813 arch/x86/kvm/vmx/nested.c 	    (CC(is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu)) ||
vcpu             2823 arch/x86/kvm/vmx/nested.c static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
vcpu             2825 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             2839 arch/x86/kvm/vmx/nested.c 	vmx_prepare_switch_to_guest(vcpu);
vcpu             2928 arch/x86/kvm/vmx/nested.c static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
vcpu             2931 arch/x86/kvm/vmx/nested.c static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
vcpu             2933 arch/x86/kvm/vmx/nested.c 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
vcpu             2934 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             2950 arch/x86/kvm/vmx/nested.c 		page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr);
vcpu             2958 arch/x86/kvm/vmx/nested.c 			vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu             2959 arch/x86/kvm/vmx/nested.c 			vcpu->run->internal.suberror =
vcpu             2961 arch/x86/kvm/vmx/nested.c 			vcpu->run->internal.ndata = 0;
vcpu             2969 arch/x86/kvm/vmx/nested.c 		if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->virtual_apic_page_addr), map)) {
vcpu             2995 arch/x86/kvm/vmx/nested.c 		if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->posted_intr_desc_addr), map)) {
vcpu             3003 arch/x86/kvm/vmx/nested.c 	if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12))
vcpu             3017 arch/x86/kvm/vmx/nested.c static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
vcpu             3019 arch/x86/kvm/vmx/nested.c 	if (!to_vmx(vcpu)->nested.vmxon) {
vcpu             3020 arch/x86/kvm/vmx/nested.c 		kvm_queue_exception(vcpu, UD_VECTOR);
vcpu             3024 arch/x86/kvm/vmx/nested.c 	if (vmx_get_cpl(vcpu)) {
vcpu             3025 arch/x86/kvm/vmx/nested.c 		kvm_inject_gp(vcpu, 0);
vcpu             3032 arch/x86/kvm/vmx/nested.c static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
vcpu             3035 arch/x86/kvm/vmx/nested.c 	u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI);
vcpu             3040 arch/x86/kvm/vmx/nested.c static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
vcpu             3053 arch/x86/kvm/vmx/nested.c enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
vcpu             3056 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             3057 arch/x86/kvm/vmx/nested.c 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
vcpu             3064 arch/x86/kvm/vmx/nested.c 	if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
vcpu             3065 arch/x86/kvm/vmx/nested.c 		evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
vcpu             3090 arch/x86/kvm/vmx/nested.c 		vmcs_writel(GUEST_CR3, vcpu->arch.cr3);
vcpu             3092 arch/x86/kvm/vmx/nested.c 	vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
vcpu             3097 arch/x86/kvm/vmx/nested.c 		if (unlikely(!nested_get_vmcs12_pages(vcpu)))
vcpu             3100 arch/x86/kvm/vmx/nested.c 		if (nested_vmx_check_vmentry_hw(vcpu)) {
vcpu             3101 arch/x86/kvm/vmx/nested.c 			vmx_switch_vmcs(vcpu, &vmx->vmcs01);
vcpu             3105 arch/x86/kvm/vmx/nested.c 		if (nested_vmx_check_guest_state(vcpu, vmcs12, &exit_qual))
vcpu             3109 arch/x86/kvm/vmx/nested.c 	enter_guest_mode(vcpu);
vcpu             3111 arch/x86/kvm/vmx/nested.c 		vcpu->arch.tsc_offset += vmcs12->tsc_offset;
vcpu             3113 arch/x86/kvm/vmx/nested.c 	if (prepare_vmcs02(vcpu, vmcs12, &exit_qual))
vcpu             3118 arch/x86/kvm/vmx/nested.c 		exit_qual = nested_vmx_load_msr(vcpu,
vcpu             3131 arch/x86/kvm/vmx/nested.c 		kvm_make_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);
vcpu             3149 arch/x86/kvm/vmx/nested.c 		kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu             3158 arch/x86/kvm/vmx/nested.c 		vmx_start_preemption_timer(vcpu);
vcpu             3175 arch/x86/kvm/vmx/nested.c 		vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
vcpu             3176 arch/x86/kvm/vmx/nested.c 	leave_guest_mode(vcpu);
vcpu             3179 arch/x86/kvm/vmx/nested.c 	vmx_switch_vmcs(vcpu, &vmx->vmcs01);
vcpu             3184 arch/x86/kvm/vmx/nested.c 	load_vmcs12_host_state(vcpu, vmcs12);
vcpu             3196 arch/x86/kvm/vmx/nested.c static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
vcpu             3200 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             3201 arch/x86/kvm/vmx/nested.c 	u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
vcpu             3203 arch/x86/kvm/vmx/nested.c 	if (!nested_vmx_check_permission(vcpu))
vcpu             3206 arch/x86/kvm/vmx/nested.c 	if (!nested_vmx_handle_enlightened_vmptrld(vcpu, launch))
vcpu             3210 arch/x86/kvm/vmx/nested.c 		return nested_vmx_failInvalid(vcpu);
vcpu             3212 arch/x86/kvm/vmx/nested.c 	vmcs12 = get_vmcs12(vcpu);
vcpu             3221 arch/x86/kvm/vmx/nested.c 		return nested_vmx_failInvalid(vcpu);
vcpu             3242 arch/x86/kvm/vmx/nested.c 		return nested_vmx_failValid(vcpu,
vcpu             3246 arch/x86/kvm/vmx/nested.c 		return nested_vmx_failValid(vcpu,
vcpu             3250 arch/x86/kvm/vmx/nested.c 	if (nested_vmx_check_controls(vcpu, vmcs12))
vcpu             3251 arch/x86/kvm/vmx/nested.c 		return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
vcpu             3253 arch/x86/kvm/vmx/nested.c 	if (nested_vmx_check_host_state(vcpu, vmcs12))
vcpu             3254 arch/x86/kvm/vmx/nested.c 		return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
vcpu             3261 arch/x86/kvm/vmx/nested.c 	status = nested_vmx_enter_non_root_mode(vcpu, true);
vcpu             3266 arch/x86/kvm/vmx/nested.c 	vmx->vcpu.arch.l1tf_flush_l1d = true;
vcpu             3278 arch/x86/kvm/vmx/nested.c 	nested_cache_shadow_vmcs12(vcpu, vmcs12);
vcpu             3291 arch/x86/kvm/vmx/nested.c 		return kvm_vcpu_halt(vcpu);
vcpu             3302 arch/x86/kvm/vmx/nested.c 	return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
vcpu             3323 arch/x86/kvm/vmx/nested.c vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vcpu             3326 arch/x86/kvm/vmx/nested.c 	/*1*/	(vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) |
vcpu             3329 arch/x86/kvm/vmx/nested.c 			vcpu->arch.cr0_guest_owned_bits));
vcpu             3333 arch/x86/kvm/vmx/nested.c vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vcpu             3336 arch/x86/kvm/vmx/nested.c 	/*1*/	(vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) |
vcpu             3339 arch/x86/kvm/vmx/nested.c 			vcpu->arch.cr4_guest_owned_bits));
vcpu             3342 arch/x86/kvm/vmx/nested.c static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
vcpu             3348 arch/x86/kvm/vmx/nested.c 	if (vcpu->arch.exception.injected) {
vcpu             3349 arch/x86/kvm/vmx/nested.c 		nr = vcpu->arch.exception.nr;
vcpu             3354 arch/x86/kvm/vmx/nested.c 				vcpu->arch.event_exit_inst_len;
vcpu             3359 arch/x86/kvm/vmx/nested.c 		if (vcpu->arch.exception.has_error_code) {
vcpu             3362 arch/x86/kvm/vmx/nested.c 				vcpu->arch.exception.error_code;
vcpu             3366 arch/x86/kvm/vmx/nested.c 	} else if (vcpu->arch.nmi_injected) {
vcpu             3369 arch/x86/kvm/vmx/nested.c 	} else if (vcpu->arch.interrupt.injected) {
vcpu             3370 arch/x86/kvm/vmx/nested.c 		nr = vcpu->arch.interrupt.nr;
vcpu             3373 arch/x86/kvm/vmx/nested.c 		if (vcpu->arch.interrupt.soft) {
vcpu             3376 arch/x86/kvm/vmx/nested.c 				vcpu->arch.event_exit_inst_len;
vcpu             3385 arch/x86/kvm/vmx/nested.c static void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu)
vcpu             3387 arch/x86/kvm/vmx/nested.c 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
vcpu             3397 arch/x86/kvm/vmx/nested.c 		kvm_vcpu_mark_page_dirty(vcpu, gfn);
vcpu             3402 arch/x86/kvm/vmx/nested.c 		kvm_vcpu_mark_page_dirty(vcpu, gfn);
vcpu             3406 arch/x86/kvm/vmx/nested.c static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
vcpu             3408 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             3436 arch/x86/kvm/vmx/nested.c 	nested_mark_vmcs12_pages_dirty(vcpu);
vcpu             3439 arch/x86/kvm/vmx/nested.c static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
vcpu             3442 arch/x86/kvm/vmx/nested.c 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
vcpu             3443 arch/x86/kvm/vmx/nested.c 	unsigned int nr = vcpu->arch.exception.nr;
vcpu             3446 arch/x86/kvm/vmx/nested.c 	if (vcpu->arch.exception.has_error_code) {
vcpu             3447 arch/x86/kvm/vmx/nested.c 		vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code;
vcpu             3457 arch/x86/kvm/vmx/nested.c 	    vmx_get_nmi_mask(vcpu))
vcpu             3460 arch/x86/kvm/vmx/nested.c 	nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual);
vcpu             3463 arch/x86/kvm/vmx/nested.c static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
vcpu             3465 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             3468 arch/x86/kvm/vmx/nested.c 	    vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu);
vcpu             3469 arch/x86/kvm/vmx/nested.c 	struct kvm_lapic *apic = vcpu->arch.apic;
vcpu             3471 arch/x86/kvm/vmx/nested.c 	if (lapic_in_kernel(vcpu) &&
vcpu             3475 arch/x86/kvm/vmx/nested.c 		nested_vmx_vmexit(vcpu, EXIT_REASON_INIT_SIGNAL, 0, 0);
vcpu             3479 arch/x86/kvm/vmx/nested.c 	if (vcpu->arch.exception.pending &&
vcpu             3480 arch/x86/kvm/vmx/nested.c 		nested_vmx_check_exception(vcpu, &exit_qual)) {
vcpu             3483 arch/x86/kvm/vmx/nested.c 		nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
vcpu             3487 arch/x86/kvm/vmx/nested.c 	if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) &&
vcpu             3491 arch/x86/kvm/vmx/nested.c 		nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0);
vcpu             3495 arch/x86/kvm/vmx/nested.c 	if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) {
vcpu             3498 arch/x86/kvm/vmx/nested.c 		nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
vcpu             3505 arch/x86/kvm/vmx/nested.c 		vcpu->arch.nmi_pending = 0;
vcpu             3506 arch/x86/kvm/vmx/nested.c 		vmx_set_nmi_mask(vcpu, true);
vcpu             3510 arch/x86/kvm/vmx/nested.c 	if (kvm_cpu_has_interrupt(vcpu) && nested_exit_on_intr(vcpu)) {
vcpu             3513 arch/x86/kvm/vmx/nested.c 		nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
vcpu             3517 arch/x86/kvm/vmx/nested.c 	vmx_complete_nested_posted_interrupt(vcpu);
vcpu             3521 arch/x86/kvm/vmx/nested.c static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
vcpu             3524 arch/x86/kvm/vmx/nested.c 		hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer);
vcpu             3530 arch/x86/kvm/vmx/nested.c 	value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz;
vcpu             3582 arch/x86/kvm/vmx/nested.c static void sync_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu,
vcpu             3585 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             3629 arch/x86/kvm/vmx/nested.c static void copy_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu,
vcpu             3632 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             3643 arch/x86/kvm/vmx/nested.c 	vmx_vcpu_load(&vmx->vcpu, cpu);
vcpu             3645 arch/x86/kvm/vmx/nested.c 	sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
vcpu             3648 arch/x86/kvm/vmx/nested.c 	vmx_vcpu_load(&vmx->vcpu, cpu);
vcpu             3658 arch/x86/kvm/vmx/nested.c static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vcpu             3660 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             3663 arch/x86/kvm/vmx/nested.c 		sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
vcpu             3667 arch/x86/kvm/vmx/nested.c 	vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
vcpu             3668 arch/x86/kvm/vmx/nested.c 	vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
vcpu             3670 arch/x86/kvm/vmx/nested.c 	vmcs12->guest_rsp = kvm_rsp_read(vcpu);
vcpu             3671 arch/x86/kvm/vmx/nested.c 	vmcs12->guest_rip = kvm_rip_read(vcpu);
vcpu             3684 arch/x86/kvm/vmx/nested.c 	if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
vcpu             3692 arch/x86/kvm/vmx/nested.c 				vmx_get_preemption_timer_value(vcpu);
vcpu             3704 arch/x86/kvm/vmx/nested.c 		if (nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) {
vcpu             3719 arch/x86/kvm/vmx/nested.c 		(vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
vcpu             3722 arch/x86/kvm/vmx/nested.c 		kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
vcpu             3725 arch/x86/kvm/vmx/nested.c 		vmcs12->guest_ia32_efer = vcpu->arch.efer;
vcpu             3739 arch/x86/kvm/vmx/nested.c static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vcpu             3763 arch/x86/kvm/vmx/nested.c 		vmcs12_save_pending_event(vcpu, vmcs12);
vcpu             3771 arch/x86/kvm/vmx/nested.c 		if (nested_vmx_store_msr(vcpu,
vcpu             3774 arch/x86/kvm/vmx/nested.c 			nested_vmx_abort(vcpu,
vcpu             3782 arch/x86/kvm/vmx/nested.c 	vcpu->arch.nmi_injected = false;
vcpu             3783 arch/x86/kvm/vmx/nested.c 	kvm_clear_exception_queue(vcpu);
vcpu             3784 arch/x86/kvm/vmx/nested.c 	kvm_clear_interrupt_queue(vcpu);
vcpu             3796 arch/x86/kvm/vmx/nested.c static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
vcpu             3803 arch/x86/kvm/vmx/nested.c 		vcpu->arch.efer = vmcs12->host_ia32_efer;
vcpu             3805 arch/x86/kvm/vmx/nested.c 		vcpu->arch.efer |= (EFER_LMA | EFER_LME);
vcpu             3807 arch/x86/kvm/vmx/nested.c 		vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
vcpu             3808 arch/x86/kvm/vmx/nested.c 	vmx_set_efer(vcpu, vcpu->arch.efer);
vcpu             3810 arch/x86/kvm/vmx/nested.c 	kvm_rsp_write(vcpu, vmcs12->host_rsp);
vcpu             3811 arch/x86/kvm/vmx/nested.c 	kvm_rip_write(vcpu, vmcs12->host_rip);
vcpu             3812 arch/x86/kvm/vmx/nested.c 	vmx_set_rflags(vcpu, X86_EFLAGS_FIXED);
vcpu             3813 arch/x86/kvm/vmx/nested.c 	vmx_set_interrupt_shadow(vcpu, 0);
vcpu             3822 arch/x86/kvm/vmx/nested.c 	vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
vcpu             3823 arch/x86/kvm/vmx/nested.c 	vmx_set_cr0(vcpu, vmcs12->host_cr0);
vcpu             3826 arch/x86/kvm/vmx/nested.c 	vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
vcpu             3827 arch/x86/kvm/vmx/nested.c 	vmx_set_cr4(vcpu, vmcs12->host_cr4);
vcpu             3829 arch/x86/kvm/vmx/nested.c 	nested_ept_uninit_mmu_context(vcpu);
vcpu             3835 arch/x86/kvm/vmx/nested.c 	if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
vcpu             3836 arch/x86/kvm/vmx/nested.c 		nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
vcpu             3839 arch/x86/kvm/vmx/nested.c 		vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
vcpu             3856 arch/x86/kvm/vmx/nested.c 	    (!nested_cpu_has_vpid(vmcs12) || !nested_has_guest_tlb_tag(vcpu))) {
vcpu             3857 arch/x86/kvm/vmx/nested.c 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
vcpu             3874 arch/x86/kvm/vmx/nested.c 		vcpu->arch.pat = vmcs12->host_ia32_pat;
vcpu             3895 arch/x86/kvm/vmx/nested.c 	vmx_set_segment(vcpu, &seg, VCPU_SREG_CS);
vcpu             3906 arch/x86/kvm/vmx/nested.c 	vmx_set_segment(vcpu, &seg, VCPU_SREG_DS);
vcpu             3908 arch/x86/kvm/vmx/nested.c 	vmx_set_segment(vcpu, &seg, VCPU_SREG_ES);
vcpu             3910 arch/x86/kvm/vmx/nested.c 	vmx_set_segment(vcpu, &seg, VCPU_SREG_SS);
vcpu             3913 arch/x86/kvm/vmx/nested.c 	vmx_set_segment(vcpu, &seg, VCPU_SREG_FS);
vcpu             3916 arch/x86/kvm/vmx/nested.c 	vmx_set_segment(vcpu, &seg, VCPU_SREG_GS);
vcpu             3924 arch/x86/kvm/vmx/nested.c 	vmx_set_segment(vcpu, &seg, VCPU_SREG_TR);
vcpu             3926 arch/x86/kvm/vmx/nested.c 	kvm_set_dr(vcpu, 7, 0x400);
vcpu             3930 arch/x86/kvm/vmx/nested.c 		vmx_update_msr_bitmap(vcpu);
vcpu             3932 arch/x86/kvm/vmx/nested.c 	if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
vcpu             3934 arch/x86/kvm/vmx/nested.c 		nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
vcpu             3960 arch/x86/kvm/vmx/nested.c static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
vcpu             3962 arch/x86/kvm/vmx/nested.c 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
vcpu             3963 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             3968 arch/x86/kvm/vmx/nested.c 	vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT);
vcpu             3977 arch/x86/kvm/vmx/nested.c 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
vcpu             3978 arch/x86/kvm/vmx/nested.c 			kvm_set_dr(vcpu, 7, DR7_FIXED_1);
vcpu             3980 arch/x86/kvm/vmx/nested.c 			WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7)));
vcpu             3987 arch/x86/kvm/vmx/nested.c 	vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx));
vcpu             3989 arch/x86/kvm/vmx/nested.c 	vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
vcpu             3990 arch/x86/kvm/vmx/nested.c 	vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW));
vcpu             3992 arch/x86/kvm/vmx/nested.c 	vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
vcpu             3993 arch/x86/kvm/vmx/nested.c 	vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW));
vcpu             3995 arch/x86/kvm/vmx/nested.c 	nested_ept_uninit_mmu_context(vcpu);
vcpu             3996 arch/x86/kvm/vmx/nested.c 	vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
vcpu             3997 arch/x86/kvm/vmx/nested.c 	__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
vcpu             4006 arch/x86/kvm/vmx/nested.c 		ept_save_pdptrs(vcpu);
vcpu             4008 arch/x86/kvm/vmx/nested.c 	kvm_mmu_reset_context(vcpu);
vcpu             4011 arch/x86/kvm/vmx/nested.c 		vmx_update_msr_bitmap(vcpu);
vcpu             4026 arch/x86/kvm/vmx/nested.c 		if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) {
vcpu             4035 arch/x86/kvm/vmx/nested.c 			if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) {
vcpu             4046 arch/x86/kvm/vmx/nested.c 			if (nested_vmx_load_msr_check(vcpu, &h)) {
vcpu             4053 arch/x86/kvm/vmx/nested.c 			if (kvm_set_msr(vcpu, h.index, h.value)) {
vcpu             4065 arch/x86/kvm/vmx/nested.c 	nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
vcpu             4073 arch/x86/kvm/vmx/nested.c void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
vcpu             4076 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             4077 arch/x86/kvm/vmx/nested.c 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
vcpu             4082 arch/x86/kvm/vmx/nested.c 	leave_guest_mode(vcpu);
vcpu             4085 arch/x86/kvm/vmx/nested.c 		hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
vcpu             4088 arch/x86/kvm/vmx/nested.c 		vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
vcpu             4091 arch/x86/kvm/vmx/nested.c 		sync_vmcs02_to_vmcs12(vcpu, vmcs12);
vcpu             4094 arch/x86/kvm/vmx/nested.c 			prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info,
vcpu             4106 arch/x86/kvm/vmx/nested.c 		nested_flush_cached_shadow_vmcs12(vcpu, vmcs12);
vcpu             4119 arch/x86/kvm/vmx/nested.c 	vmx_switch_vmcs(vcpu, &vmx->vmcs01);
vcpu             4124 arch/x86/kvm/vmx/nested.c 	vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
vcpu             4131 arch/x86/kvm/vmx/nested.c 		vmx_set_virtual_apic_mode(vcpu);
vcpu             4135 arch/x86/kvm/vmx/nested.c 		vmx_flush_tlb(vcpu, true);
vcpu             4143 arch/x86/kvm/vmx/nested.c 	kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
vcpu             4144 arch/x86/kvm/vmx/nested.c 	kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
vcpu             4151 arch/x86/kvm/vmx/nested.c 	kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
vcpu             4157 arch/x86/kvm/vmx/nested.c 	vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
vcpu             4161 arch/x86/kvm/vmx/nested.c 		    nested_exit_intr_ack_set(vcpu)) {
vcpu             4162 arch/x86/kvm/vmx/nested.c 			int irq = kvm_cpu_get_interrupt(vcpu);
vcpu             4176 arch/x86/kvm/vmx/nested.c 		load_vmcs12_host_state(vcpu, vmcs12);
vcpu             4188 arch/x86/kvm/vmx/nested.c 	(void)nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
vcpu             4196 arch/x86/kvm/vmx/nested.c 	nested_vmx_restore_host_state(vcpu);
vcpu             4207 arch/x86/kvm/vmx/nested.c int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
vcpu             4232 arch/x86/kvm/vmx/nested.c 		kvm_queue_exception(vcpu, UD_VECTOR);
vcpu             4244 arch/x86/kvm/vmx/nested.c 		off += kvm_register_read(vcpu, base_reg);
vcpu             4246 arch/x86/kvm/vmx/nested.c 		off += kvm_register_read(vcpu, index_reg)<<scaling;
vcpu             4247 arch/x86/kvm/vmx/nested.c 	vmx_get_segment(vcpu, &s, seg_reg);
vcpu             4262 arch/x86/kvm/vmx/nested.c 	if (is_long_mode(vcpu)) {
vcpu             4277 arch/x86/kvm/vmx/nested.c 		exn = is_noncanonical_address(*ret, vcpu);
vcpu             4303 arch/x86/kvm/vmx/nested.c 			kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
vcpu             4321 arch/x86/kvm/vmx/nested.c 		kvm_queue_exception_e(vcpu,
vcpu             4331 arch/x86/kvm/vmx/nested.c static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
vcpu             4336 arch/x86/kvm/vmx/nested.c 	if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
vcpu             4341 arch/x86/kvm/vmx/nested.c 	if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) {
vcpu             4342 arch/x86/kvm/vmx/nested.c 		kvm_inject_page_fault(vcpu, &e);
vcpu             4354 arch/x86/kvm/vmx/nested.c static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu)
vcpu             4356 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             4375 arch/x86/kvm/vmx/nested.c static int enter_vmx_operation(struct kvm_vcpu *vcpu)
vcpu             4377 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             4392 arch/x86/kvm/vmx/nested.c 	if (enable_shadow_vmcs && !alloc_shadow_vmcs(vcpu))
vcpu             4432 arch/x86/kvm/vmx/nested.c static int handle_vmon(struct kvm_vcpu *vcpu)
vcpu             4437 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             4450 arch/x86/kvm/vmx/nested.c 	if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) {
vcpu             4451 arch/x86/kvm/vmx/nested.c 		kvm_queue_exception(vcpu, UD_VECTOR);
vcpu             4456 arch/x86/kvm/vmx/nested.c 	if (vmx_get_cpl(vcpu)) {
vcpu             4457 arch/x86/kvm/vmx/nested.c 		kvm_inject_gp(vcpu, 0);
vcpu             4462 arch/x86/kvm/vmx/nested.c 		return nested_vmx_failValid(vcpu,
vcpu             4467 arch/x86/kvm/vmx/nested.c 		kvm_inject_gp(vcpu, 0);
vcpu             4471 arch/x86/kvm/vmx/nested.c 	if (nested_vmx_get_vmptr(vcpu, &vmptr))
vcpu             4482 arch/x86/kvm/vmx/nested.c 	if (!page_address_valid(vcpu, vmptr))
vcpu             4483 arch/x86/kvm/vmx/nested.c 		return nested_vmx_failInvalid(vcpu);
vcpu             4485 arch/x86/kvm/vmx/nested.c 	if (kvm_read_guest(vcpu->kvm, vmptr, &revision, sizeof(revision)) ||
vcpu             4487 arch/x86/kvm/vmx/nested.c 		return nested_vmx_failInvalid(vcpu);
vcpu             4490 arch/x86/kvm/vmx/nested.c 	ret = enter_vmx_operation(vcpu);
vcpu             4494 arch/x86/kvm/vmx/nested.c 	return nested_vmx_succeed(vcpu);
vcpu             4497 arch/x86/kvm/vmx/nested.c static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu)
vcpu             4499 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             4504 arch/x86/kvm/vmx/nested.c 	copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu));
vcpu             4515 arch/x86/kvm/vmx/nested.c 	kvm_vcpu_write_guest_page(vcpu,
vcpu             4519 arch/x86/kvm/vmx/nested.c 	kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
vcpu             4525 arch/x86/kvm/vmx/nested.c static int handle_vmoff(struct kvm_vcpu *vcpu)
vcpu             4527 arch/x86/kvm/vmx/nested.c 	if (!nested_vmx_check_permission(vcpu))
vcpu             4530 arch/x86/kvm/vmx/nested.c 	free_nested(vcpu);
vcpu             4533 arch/x86/kvm/vmx/nested.c 	kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu             4535 arch/x86/kvm/vmx/nested.c 	return nested_vmx_succeed(vcpu);
vcpu             4539 arch/x86/kvm/vmx/nested.c static int handle_vmclear(struct kvm_vcpu *vcpu)
vcpu             4541 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             4546 arch/x86/kvm/vmx/nested.c 	if (!nested_vmx_check_permission(vcpu))
vcpu             4549 arch/x86/kvm/vmx/nested.c 	if (nested_vmx_get_vmptr(vcpu, &vmptr))
vcpu             4552 arch/x86/kvm/vmx/nested.c 	if (!page_address_valid(vcpu, vmptr))
vcpu             4553 arch/x86/kvm/vmx/nested.c 		return nested_vmx_failValid(vcpu,
vcpu             4557 arch/x86/kvm/vmx/nested.c 		return nested_vmx_failValid(vcpu,
vcpu             4571 arch/x86/kvm/vmx/nested.c 		   !nested_enlightened_vmentry(vcpu, &evmcs_gpa))) {
vcpu             4573 arch/x86/kvm/vmx/nested.c 			nested_release_vmcs12(vcpu);
vcpu             4575 arch/x86/kvm/vmx/nested.c 		kvm_vcpu_write_guest(vcpu,
vcpu             4581 arch/x86/kvm/vmx/nested.c 	return nested_vmx_succeed(vcpu);
vcpu             4584 arch/x86/kvm/vmx/nested.c static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch);
vcpu             4587 arch/x86/kvm/vmx/nested.c static int handle_vmlaunch(struct kvm_vcpu *vcpu)
vcpu             4589 arch/x86/kvm/vmx/nested.c 	return nested_vmx_run(vcpu, true);
vcpu             4593 arch/x86/kvm/vmx/nested.c static int handle_vmresume(struct kvm_vcpu *vcpu)
vcpu             4596 arch/x86/kvm/vmx/nested.c 	return nested_vmx_run(vcpu, false);
vcpu             4599 arch/x86/kvm/vmx/nested.c static int handle_vmread(struct kvm_vcpu *vcpu)
vcpu             4603 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             4608 arch/x86/kvm/vmx/nested.c 	struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
vcpu             4609 arch/x86/kvm/vmx/nested.c 						    : get_vmcs12(vcpu);
vcpu             4613 arch/x86/kvm/vmx/nested.c 	if (!nested_vmx_check_permission(vcpu))
vcpu             4621 arch/x86/kvm/vmx/nested.c 	    (is_guest_mode(vcpu) &&
vcpu             4622 arch/x86/kvm/vmx/nested.c 	     get_vmcs12(vcpu)->vmcs_link_pointer == -1ull))
vcpu             4623 arch/x86/kvm/vmx/nested.c 		return nested_vmx_failInvalid(vcpu);
vcpu             4626 arch/x86/kvm/vmx/nested.c 	field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
vcpu             4630 arch/x86/kvm/vmx/nested.c 		return nested_vmx_failValid(vcpu,
vcpu             4633 arch/x86/kvm/vmx/nested.c 	if (!is_guest_mode(vcpu) && is_vmcs12_ext_field(field))
vcpu             4634 arch/x86/kvm/vmx/nested.c 		copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
vcpu             4645 arch/x86/kvm/vmx/nested.c 		kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
vcpu             4648 arch/x86/kvm/vmx/nested.c 		len = is_64_bit_mode(vcpu) ? 8 : 4;
vcpu             4649 arch/x86/kvm/vmx/nested.c 		if (get_vmx_mem_address(vcpu, exit_qualification,
vcpu             4653 arch/x86/kvm/vmx/nested.c 		if (kvm_write_guest_virt_system(vcpu, gva, &field_value, len, &e)) {
vcpu             4654 arch/x86/kvm/vmx/nested.c 			kvm_inject_page_fault(vcpu, &e);
vcpu             4659 arch/x86/kvm/vmx/nested.c 	return nested_vmx_succeed(vcpu);
vcpu             4686 arch/x86/kvm/vmx/nested.c static int handle_vmwrite(struct kvm_vcpu *vcpu)
vcpu             4691 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             4703 arch/x86/kvm/vmx/nested.c 	struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
vcpu             4704 arch/x86/kvm/vmx/nested.c 						    : get_vmcs12(vcpu);
vcpu             4707 arch/x86/kvm/vmx/nested.c 	if (!nested_vmx_check_permission(vcpu))
vcpu             4715 arch/x86/kvm/vmx/nested.c 	    (is_guest_mode(vcpu) &&
vcpu             4716 arch/x86/kvm/vmx/nested.c 	     get_vmcs12(vcpu)->vmcs_link_pointer == -1ull))
vcpu             4717 arch/x86/kvm/vmx/nested.c 		return nested_vmx_failInvalid(vcpu);
vcpu             4720 arch/x86/kvm/vmx/nested.c 		field_value = kvm_register_readl(vcpu,
vcpu             4723 arch/x86/kvm/vmx/nested.c 		len = is_64_bit_mode(vcpu) ? 8 : 4;
vcpu             4724 arch/x86/kvm/vmx/nested.c 		if (get_vmx_mem_address(vcpu, exit_qualification,
vcpu             4727 arch/x86/kvm/vmx/nested.c 		if (kvm_read_guest_virt(vcpu, gva, &field_value, len, &e)) {
vcpu             4728 arch/x86/kvm/vmx/nested.c 			kvm_inject_page_fault(vcpu, &e);
vcpu             4734 arch/x86/kvm/vmx/nested.c 	field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
vcpu             4738 arch/x86/kvm/vmx/nested.c 		return nested_vmx_failValid(vcpu,
vcpu             4746 arch/x86/kvm/vmx/nested.c 	    !nested_cpu_has_vmwrite_any_field(vcpu))
vcpu             4747 arch/x86/kvm/vmx/nested.c 		return nested_vmx_failValid(vcpu,
vcpu             4754 arch/x86/kvm/vmx/nested.c 	if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field))
vcpu             4755 arch/x86/kvm/vmx/nested.c 		copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
vcpu             4776 arch/x86/kvm/vmx/nested.c 	if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field)) {
vcpu             4794 arch/x86/kvm/vmx/nested.c 	return nested_vmx_succeed(vcpu);
vcpu             4810 arch/x86/kvm/vmx/nested.c static int handle_vmptrld(struct kvm_vcpu *vcpu)
vcpu             4812 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             4815 arch/x86/kvm/vmx/nested.c 	if (!nested_vmx_check_permission(vcpu))
vcpu             4818 arch/x86/kvm/vmx/nested.c 	if (nested_vmx_get_vmptr(vcpu, &vmptr))
vcpu             4821 arch/x86/kvm/vmx/nested.c 	if (!page_address_valid(vcpu, vmptr))
vcpu             4822 arch/x86/kvm/vmx/nested.c 		return nested_vmx_failValid(vcpu,
vcpu             4826 arch/x86/kvm/vmx/nested.c 		return nested_vmx_failValid(vcpu,
vcpu             4837 arch/x86/kvm/vmx/nested.c 		if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmptr), &map)) {
vcpu             4844 arch/x86/kvm/vmx/nested.c 			return nested_vmx_failValid(vcpu,
vcpu             4852 arch/x86/kvm/vmx/nested.c 		     !nested_cpu_has_vmx_shadow_vmcs(vcpu))) {
vcpu             4853 arch/x86/kvm/vmx/nested.c 			kvm_vcpu_unmap(vcpu, &map, false);
vcpu             4854 arch/x86/kvm/vmx/nested.c 			return nested_vmx_failValid(vcpu,
vcpu             4858 arch/x86/kvm/vmx/nested.c 		nested_release_vmcs12(vcpu);
vcpu             4865 arch/x86/kvm/vmx/nested.c 		kvm_vcpu_unmap(vcpu, &map, false);
vcpu             4870 arch/x86/kvm/vmx/nested.c 	return nested_vmx_succeed(vcpu);
vcpu             4874 arch/x86/kvm/vmx/nested.c static int handle_vmptrst(struct kvm_vcpu *vcpu)
vcpu             4878 arch/x86/kvm/vmx/nested.c 	gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr;
vcpu             4882 arch/x86/kvm/vmx/nested.c 	if (!nested_vmx_check_permission(vcpu))
vcpu             4885 arch/x86/kvm/vmx/nested.c 	if (unlikely(to_vmx(vcpu)->nested.hv_evmcs))
vcpu             4888 arch/x86/kvm/vmx/nested.c 	if (get_vmx_mem_address(vcpu, exit_qual, instr_info,
vcpu             4892 arch/x86/kvm/vmx/nested.c 	if (kvm_write_guest_virt_system(vcpu, gva, (void *)&current_vmptr,
vcpu             4894 arch/x86/kvm/vmx/nested.c 		kvm_inject_page_fault(vcpu, &e);
vcpu             4897 arch/x86/kvm/vmx/nested.c 	return nested_vmx_succeed(vcpu);
vcpu             4901 arch/x86/kvm/vmx/nested.c static int handle_invept(struct kvm_vcpu *vcpu)
vcpu             4903 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             4915 arch/x86/kvm/vmx/nested.c 		kvm_queue_exception(vcpu, UD_VECTOR);
vcpu             4919 arch/x86/kvm/vmx/nested.c 	if (!nested_vmx_check_permission(vcpu))
vcpu             4923 arch/x86/kvm/vmx/nested.c 	type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
vcpu             4928 arch/x86/kvm/vmx/nested.c 		return nested_vmx_failValid(vcpu,
vcpu             4934 arch/x86/kvm/vmx/nested.c 	if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
vcpu             4937 arch/x86/kvm/vmx/nested.c 	if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
vcpu             4938 arch/x86/kvm/vmx/nested.c 		kvm_inject_page_fault(vcpu, &e);
vcpu             4955 arch/x86/kvm/vmx/nested.c 	return nested_vmx_succeed(vcpu);
vcpu             4958 arch/x86/kvm/vmx/nested.c static int handle_invvpid(struct kvm_vcpu *vcpu)
vcpu             4960 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             4974 arch/x86/kvm/vmx/nested.c 		kvm_queue_exception(vcpu, UD_VECTOR);
vcpu             4978 arch/x86/kvm/vmx/nested.c 	if (!nested_vmx_check_permission(vcpu))
vcpu             4982 arch/x86/kvm/vmx/nested.c 	type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
vcpu             4988 arch/x86/kvm/vmx/nested.c 		return nested_vmx_failValid(vcpu,
vcpu             4994 arch/x86/kvm/vmx/nested.c 	if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
vcpu             4997 arch/x86/kvm/vmx/nested.c 	if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
vcpu             4998 arch/x86/kvm/vmx/nested.c 		kvm_inject_page_fault(vcpu, &e);
vcpu             5002 arch/x86/kvm/vmx/nested.c 		return nested_vmx_failValid(vcpu,
vcpu             5005 arch/x86/kvm/vmx/nested.c 	vpid02 = nested_get_vpid02(vcpu);
vcpu             5009 arch/x86/kvm/vmx/nested.c 		    is_noncanonical_address(operand.gla, vcpu))
vcpu             5010 arch/x86/kvm/vmx/nested.c 			return nested_vmx_failValid(vcpu,
vcpu             5016 arch/x86/kvm/vmx/nested.c 			__vmx_flush_tlb(vcpu, vpid02, false);
vcpu             5021 arch/x86/kvm/vmx/nested.c 			return nested_vmx_failValid(vcpu,
vcpu             5023 arch/x86/kvm/vmx/nested.c 		__vmx_flush_tlb(vcpu, vpid02, false);
vcpu             5026 arch/x86/kvm/vmx/nested.c 		__vmx_flush_tlb(vcpu, vpid02, false);
vcpu             5030 arch/x86/kvm/vmx/nested.c 		return kvm_skip_emulated_instruction(vcpu);
vcpu             5033 arch/x86/kvm/vmx/nested.c 	return nested_vmx_succeed(vcpu);
vcpu             5036 arch/x86/kvm/vmx/nested.c static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
vcpu             5039 arch/x86/kvm/vmx/nested.c 	u32 index = kvm_rcx_read(vcpu);
vcpu             5042 arch/x86/kvm/vmx/nested.c 	struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
vcpu             5052 arch/x86/kvm/vmx/nested.c 	if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT,
vcpu             5063 arch/x86/kvm/vmx/nested.c 		if (!valid_ept_address(vcpu, address))
vcpu             5066 arch/x86/kvm/vmx/nested.c 		kvm_mmu_unload(vcpu);
vcpu             5075 arch/x86/kvm/vmx/nested.c 		kvm_mmu_reload(vcpu);
vcpu             5081 arch/x86/kvm/vmx/nested.c static int handle_vmfunc(struct kvm_vcpu *vcpu)
vcpu             5083 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             5085 arch/x86/kvm/vmx/nested.c 	u32 function = kvm_rax_read(vcpu);
vcpu             5092 arch/x86/kvm/vmx/nested.c 	if (!is_guest_mode(vcpu)) {
vcpu             5093 arch/x86/kvm/vmx/nested.c 		kvm_queue_exception(vcpu, UD_VECTOR);
vcpu             5097 arch/x86/kvm/vmx/nested.c 	vmcs12 = get_vmcs12(vcpu);
vcpu             5103 arch/x86/kvm/vmx/nested.c 		if (nested_vmx_eptp_switching(vcpu, vmcs12))
vcpu             5109 arch/x86/kvm/vmx/nested.c 	return kvm_skip_emulated_instruction(vcpu);
vcpu             5112 arch/x86/kvm/vmx/nested.c 	nested_vmx_vmexit(vcpu, vmx->exit_reason,
vcpu             5122 arch/x86/kvm/vmx/nested.c bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
vcpu             5125 arch/x86/kvm/vmx/nested.c 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
vcpu             5142 arch/x86/kvm/vmx/nested.c 			if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1))
vcpu             5155 arch/x86/kvm/vmx/nested.c static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
vcpu             5170 arch/x86/kvm/vmx/nested.c 	return nested_vmx_check_io_bitmaps(vcpu, port, size);
vcpu             5179 arch/x86/kvm/vmx/nested.c static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
vcpu             5182 arch/x86/kvm/vmx/nested.c 	u32 msr_index = kvm_rcx_read(vcpu);
vcpu             5204 arch/x86/kvm/vmx/nested.c 		if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1))
vcpu             5216 arch/x86/kvm/vmx/nested.c static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
vcpu             5227 arch/x86/kvm/vmx/nested.c 		val = kvm_register_readl(vcpu, reg);
vcpu             5295 arch/x86/kvm/vmx/nested.c static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
vcpu             5307 arch/x86/kvm/vmx/nested.c 	field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
vcpu             5313 arch/x86/kvm/vmx/nested.c 	if (kvm_vcpu_read_guest(vcpu, bitmap + field/8, &b, 1))
vcpu             5324 arch/x86/kvm/vmx/nested.c bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
vcpu             5327 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             5328 arch/x86/kvm/vmx/nested.c 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
vcpu             5351 arch/x86/kvm/vmx/nested.c 	nested_mark_vmcs12_pages_dirty(vcpu);
vcpu             5353 arch/x86/kvm/vmx/nested.c 	trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason,
vcpu             5365 arch/x86/kvm/vmx/nested.c 			return !vmx->vcpu.arch.apf.host_apf_reason && enable_ept;
vcpu             5367 arch/x86/kvm/vmx/nested.c 			 vcpu->guest_debug &
vcpu             5371 arch/x86/kvm/vmx/nested.c 			 vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
vcpu             5402 arch/x86/kvm/vmx/nested.c 		return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
vcpu             5405 arch/x86/kvm/vmx/nested.c 		return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
vcpu             5418 arch/x86/kvm/vmx/nested.c 		return nested_vmx_exit_handled_cr(vcpu, vmcs12);
vcpu             5422 arch/x86/kvm/vmx/nested.c 		return nested_vmx_exit_handled_io(vcpu, vmcs12);
vcpu             5427 arch/x86/kvm/vmx/nested.c 		return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
vcpu             5506 arch/x86/kvm/vmx/nested.c static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
vcpu             5522 arch/x86/kvm/vmx/nested.c 	if (!vcpu)
vcpu             5525 arch/x86/kvm/vmx/nested.c 	vmx = to_vmx(vcpu);
vcpu             5526 arch/x86/kvm/vmx/nested.c 	vmcs12 = get_vmcs12(vcpu);
vcpu             5528 arch/x86/kvm/vmx/nested.c 	if (nested_vmx_allowed(vcpu) &&
vcpu             5533 arch/x86/kvm/vmx/nested.c 		if (vmx_has_valid_vmcs12(vcpu)) {
vcpu             5539 arch/x86/kvm/vmx/nested.c 			if (is_guest_mode(vcpu) &&
vcpu             5551 arch/x86/kvm/vmx/nested.c 		if (is_guest_mode(vcpu)) {
vcpu             5565 arch/x86/kvm/vmx/nested.c 	if (!vmx_has_valid_vmcs12(vcpu))
vcpu             5575 arch/x86/kvm/vmx/nested.c 	if (is_guest_mode(vcpu)) {
vcpu             5576 arch/x86/kvm/vmx/nested.c 		sync_vmcs02_to_vmcs12(vcpu, vmcs12);
vcpu             5577 arch/x86/kvm/vmx/nested.c 		sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
vcpu             5598 arch/x86/kvm/vmx/nested.c 				 get_shadow_vmcs12(vcpu), VMCS12_SIZE))
vcpu             5609 arch/x86/kvm/vmx/nested.c void vmx_leave_nested(struct kvm_vcpu *vcpu)
vcpu             5611 arch/x86/kvm/vmx/nested.c 	if (is_guest_mode(vcpu)) {
vcpu             5612 arch/x86/kvm/vmx/nested.c 		to_vmx(vcpu)->nested.nested_run_pending = 0;
vcpu             5613 arch/x86/kvm/vmx/nested.c 		nested_vmx_vmexit(vcpu, -1, 0, 0);
vcpu             5615 arch/x86/kvm/vmx/nested.c 	free_nested(vcpu);
vcpu             5618 arch/x86/kvm/vmx/nested.c static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
vcpu             5622 arch/x86/kvm/vmx/nested.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             5651 arch/x86/kvm/vmx/nested.c 		if (!nested_vmx_allowed(vcpu))
vcpu             5654 arch/x86/kvm/vmx/nested.c 		if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa))
vcpu             5671 arch/x86/kvm/vmx/nested.c 	if (is_smm(vcpu) ?
vcpu             5682 arch/x86/kvm/vmx/nested.c 		(!nested_vmx_allowed(vcpu) || !vmx->nested.enlightened_vmcs_enabled))
vcpu             5685 arch/x86/kvm/vmx/nested.c 	vmx_leave_nested(vcpu);
vcpu             5691 arch/x86/kvm/vmx/nested.c 	ret = enter_vmx_operation(vcpu);
vcpu             5701 arch/x86/kvm/vmx/nested.c 		    !page_address_valid(vcpu, kvm_state->hdr.vmx.vmcs12_pa))
vcpu             5723 arch/x86/kvm/vmx/nested.c 	vmcs12 = get_vmcs12(vcpu);
vcpu             5739 arch/x86/kvm/vmx/nested.c 		struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);
vcpu             5758 arch/x86/kvm/vmx/nested.c 	if (nested_vmx_check_controls(vcpu, vmcs12) ||
vcpu             5759 arch/x86/kvm/vmx/nested.c 	    nested_vmx_check_host_state(vcpu, vmcs12) ||
vcpu             5760 arch/x86/kvm/vmx/nested.c 	    nested_vmx_check_guest_state(vcpu, vmcs12, &exit_qual))
vcpu             5764 arch/x86/kvm/vmx/nested.c 	ret = nested_vmx_enter_non_root_mode(vcpu, false);
vcpu               19 arch/x86/kvm/vmx/nested.h void vmx_leave_nested(struct kvm_vcpu *vcpu);
vcpu               24 arch/x86/kvm/vmx/nested.h void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu);
vcpu               25 arch/x86/kvm/vmx/nested.h enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
vcpu               27 arch/x86/kvm/vmx/nested.h bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason);
vcpu               28 arch/x86/kvm/vmx/nested.h void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
vcpu               30 arch/x86/kvm/vmx/nested.h void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu);
vcpu               31 arch/x86/kvm/vmx/nested.h int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
vcpu               33 arch/x86/kvm/vmx/nested.h int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
vcpu               35 arch/x86/kvm/vmx/nested.h bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
vcpu               38 arch/x86/kvm/vmx/nested.h static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
vcpu               40 arch/x86/kvm/vmx/nested.h 	return to_vmx(vcpu)->nested.cached_vmcs12;
vcpu               43 arch/x86/kvm/vmx/nested.h static inline struct vmcs12 *get_shadow_vmcs12(struct kvm_vcpu *vcpu)
vcpu               45 arch/x86/kvm/vmx/nested.h 	return to_vmx(vcpu)->nested.cached_shadow_vmcs12;
vcpu               48 arch/x86/kvm/vmx/nested.h static inline int vmx_has_valid_vmcs12(struct kvm_vcpu *vcpu)
vcpu               50 arch/x86/kvm/vmx/nested.h 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu               58 arch/x86/kvm/vmx/nested.h 	return is_guest_mode(vcpu) || vmx->nested.current_vmptr != -1ull ||
vcpu               62 arch/x86/kvm/vmx/nested.h static inline unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
vcpu               65 arch/x86/kvm/vmx/nested.h 	return get_vmcs12(vcpu)->ept_pointer;
vcpu               68 arch/x86/kvm/vmx/nested.h static inline bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu)
vcpu               70 arch/x86/kvm/vmx/nested.h 	return nested_ept_get_cr3(vcpu) & VMX_EPTP_AD_ENABLE_BIT;
vcpu               76 arch/x86/kvm/vmx/nested.h static inline int nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu,
vcpu               90 arch/x86/kvm/vmx/nested.h 		struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
vcpu               96 arch/x86/kvm/vmx/nested.h 	nested_vmx_vmexit(vcpu, exit_reason, exit_intr_info,
vcpu              117 arch/x86/kvm/vmx/nested.h static inline unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu)
vcpu              119 arch/x86/kvm/vmx/nested.h 	return vmx_misc_cr3_count(to_vmx(vcpu)->nested.msrs.misc_low);
vcpu              127 arch/x86/kvm/vmx/nested.h static inline bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu)
vcpu              129 arch/x86/kvm/vmx/nested.h 	return to_vmx(vcpu)->nested.msrs.misc_low &
vcpu              133 arch/x86/kvm/vmx/nested.h static inline bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu)
vcpu              135 arch/x86/kvm/vmx/nested.h 	return to_vmx(vcpu)->nested.msrs.misc_low & VMX_MISC_ZERO_LEN_INS;
vcpu              138 arch/x86/kvm/vmx/nested.h static inline bool nested_cpu_supports_monitor_trap_flag(struct kvm_vcpu *vcpu)
vcpu              140 arch/x86/kvm/vmx/nested.h 	return to_vmx(vcpu)->nested.msrs.procbased_ctls_high &
vcpu              144 arch/x86/kvm/vmx/nested.h static inline bool nested_cpu_has_vmx_shadow_vmcs(struct kvm_vcpu *vcpu)
vcpu              146 arch/x86/kvm/vmx/nested.h 	return to_vmx(vcpu)->nested.msrs.secondary_ctls_high &
vcpu              245 arch/x86/kvm/vmx/nested.h static inline bool nested_exit_on_intr(struct kvm_vcpu *vcpu)
vcpu              247 arch/x86/kvm/vmx/nested.h 	return get_vmcs12(vcpu)->pin_based_vm_exec_control &
vcpu              260 arch/x86/kvm/vmx/nested.h static bool nested_guest_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
vcpu              262 arch/x86/kvm/vmx/nested.h 	u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0;
vcpu              263 arch/x86/kvm/vmx/nested.h 	u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1;
vcpu              264 arch/x86/kvm/vmx/nested.h 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
vcpu              266 arch/x86/kvm/vmx/nested.h 	if (to_vmx(vcpu)->nested.msrs.secondary_ctls_high &
vcpu              274 arch/x86/kvm/vmx/nested.h static bool nested_host_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
vcpu              276 arch/x86/kvm/vmx/nested.h 	u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0;
vcpu              277 arch/x86/kvm/vmx/nested.h 	u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1;
vcpu              282 arch/x86/kvm/vmx/nested.h static bool nested_cr4_valid(struct kvm_vcpu *vcpu, unsigned long val)
vcpu              284 arch/x86/kvm/vmx/nested.h 	u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr4_fixed0;
vcpu              285 arch/x86/kvm/vmx/nested.h 	u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr4_fixed1;
vcpu              118 arch/x86/kvm/vmx/pmu_intel.c static int intel_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
vcpu              120 arch/x86/kvm/vmx/pmu_intel.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
vcpu              129 arch/x86/kvm/vmx/pmu_intel.c static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu,
vcpu              132 arch/x86/kvm/vmx/pmu_intel.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
vcpu              151 arch/x86/kvm/vmx/pmu_intel.c static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
vcpu              153 arch/x86/kvm/vmx/pmu_intel.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
vcpu              173 arch/x86/kvm/vmx/pmu_intel.c static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
vcpu              175 arch/x86/kvm/vmx/pmu_intel.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
vcpu              209 arch/x86/kvm/vmx/pmu_intel.c static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vcpu              211 arch/x86/kvm/vmx/pmu_intel.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
vcpu              270 arch/x86/kvm/vmx/pmu_intel.c static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
vcpu              272 arch/x86/kvm/vmx/pmu_intel.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
vcpu              285 arch/x86/kvm/vmx/pmu_intel.c 	entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
vcpu              323 arch/x86/kvm/vmx/pmu_intel.c 	entry = kvm_find_cpuid_entry(vcpu, 7, 0);
vcpu              330 arch/x86/kvm/vmx/pmu_intel.c static void intel_pmu_init(struct kvm_vcpu *vcpu)
vcpu              333 arch/x86/kvm/vmx/pmu_intel.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
vcpu              337 arch/x86/kvm/vmx/pmu_intel.c 		pmu->gp_counters[i].vcpu = vcpu;
vcpu              343 arch/x86/kvm/vmx/pmu_intel.c 		pmu->fixed_counters[i].vcpu = vcpu;
vcpu              348 arch/x86/kvm/vmx/pmu_intel.c static void intel_pmu_reset(struct kvm_vcpu *vcpu)
vcpu              350 arch/x86/kvm/vmx/pmu_intel.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
vcpu              150 arch/x86/kvm/vmx/vmx.c 	(~((1UL << cpuid_query_maxphyaddr(vcpu)) - 1) | 0x7f)
vcpu              344 arch/x86/kvm/vmx/vmx.c static bool guest_state_valid(struct kvm_vcpu *vcpu);
vcpu              462 arch/x86/kvm/vmx/vmx.c 	struct kvm_vcpu *vcpu;
vcpu              466 arch/x86/kvm/vmx/vmx.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu              468 arch/x86/kvm/vmx/vmx.c 			tmp_eptp = to_vmx(vcpu)->ept_pointer;
vcpu              469 arch/x86/kvm/vmx/vmx.c 		} else if (tmp_eptp != to_vmx(vcpu)->ept_pointer) {
vcpu              489 arch/x86/kvm/vmx/vmx.c 		struct kvm_vcpu *vcpu, struct kvm_tlb_range *range)
vcpu              491 arch/x86/kvm/vmx/vmx.c 	u64 ept_pointer = to_vmx(vcpu)->ept_pointer;
vcpu              508 arch/x86/kvm/vmx/vmx.c 	struct kvm_vcpu *vcpu;
vcpu              517 arch/x86/kvm/vmx/vmx.c 		kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu              519 arch/x86/kvm/vmx/vmx.c 			if (VALID_PAGE(to_vmx(vcpu)->ept_pointer))
vcpu              521 arch/x86/kvm/vmx/vmx.c 					kvm, vcpu, range);
vcpu              536 arch/x86/kvm/vmx/vmx.c static int hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu)
vcpu              540 arch/x86/kvm/vmx/vmx.c 			&vcpu->kvm->arch.hyperv.hv_pa_pg;
vcpu              551 arch/x86/kvm/vmx/vmx.c 	evmcs = (struct hv_enlightened_vmcs *)to_vmx(vcpu)->loaded_vmcs->vmcs;
vcpu              555 arch/x86/kvm/vmx/vmx.c 	evmcs->hv_vm_id = (unsigned long)vcpu->kvm;
vcpu              611 arch/x86/kvm/vmx/vmx.c static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
vcpu              613 arch/x86/kvm/vmx/vmx.c 	return flexpriority_enabled && lapic_in_kernel(vcpu);
vcpu              706 arch/x86/kvm/vmx/vmx.c 	if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) {
vcpu              707 arch/x86/kvm/vmx/vmx.c 		vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS);
vcpu              751 arch/x86/kvm/vmx/vmx.c void update_exception_bitmap(struct kvm_vcpu *vcpu)
vcpu              765 arch/x86/kvm/vmx/vmx.c 	if ((vcpu->guest_debug &
vcpu              769 arch/x86/kvm/vmx/vmx.c 	if (to_vmx(vcpu)->rmode.vm86_active)
vcpu              779 arch/x86/kvm/vmx/vmx.c 	if (is_guest_mode(vcpu))
vcpu              780 arch/x86/kvm/vmx/vmx.c 		eb |= get_vmcs12(vcpu)->exception_bitmap;
vcpu              788 arch/x86/kvm/vmx/vmx.c static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
vcpu              796 arch/x86/kvm/vmx/vmx.c 	msr_bitmap = to_vmx(vcpu)->loaded_vmcs->msr_bitmap;
vcpu              946 arch/x86/kvm/vmx/vmx.c 	u64 guest_efer = vmx->vcpu.arch.efer;
vcpu              970 arch/x86/kvm/vmx/vmx.c 	    (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
vcpu             1107 arch/x86/kvm/vmx/vmx.c void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
vcpu             1109 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             1183 arch/x86/kvm/vmx/vmx.c 	++vmx->vcpu.stat.host_state_reload;
vcpu             1233 arch/x86/kvm/vmx/vmx.c static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
vcpu             1235 arch/x86/kvm/vmx/vmx.c 	struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
vcpu             1245 arch/x86/kvm/vmx/vmx.c 	if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
vcpu             1255 arch/x86/kvm/vmx/vmx.c 	if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR || vcpu->cpu == cpu) {
vcpu             1289 arch/x86/kvm/vmx/vmx.c void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
vcpu             1292 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             1331 arch/x86/kvm/vmx/vmx.c 		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
vcpu             1357 arch/x86/kvm/vmx/vmx.c 	    vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio)
vcpu             1365 arch/x86/kvm/vmx/vmx.c void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu             1367 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             1369 arch/x86/kvm/vmx/vmx.c 	vmx_vcpu_load_vmcs(vcpu, cpu, NULL);
vcpu             1371 arch/x86/kvm/vmx/vmx.c 	vmx_vcpu_pi_load(vcpu, cpu);
vcpu             1376 arch/x86/kvm/vmx/vmx.c static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
vcpu             1378 arch/x86/kvm/vmx/vmx.c 	struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
vcpu             1380 arch/x86/kvm/vmx/vmx.c 	if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
vcpu             1382 arch/x86/kvm/vmx/vmx.c 		!kvm_vcpu_apicv_active(vcpu))
vcpu             1386 arch/x86/kvm/vmx/vmx.c 	if (vcpu->preempted)
vcpu             1390 arch/x86/kvm/vmx/vmx.c static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
vcpu             1392 arch/x86/kvm/vmx/vmx.c 	vmx_vcpu_pi_put(vcpu);
vcpu             1394 arch/x86/kvm/vmx/vmx.c 	vmx_prepare_switch_to_host(to_vmx(vcpu));
vcpu             1397 arch/x86/kvm/vmx/vmx.c static bool emulation_required(struct kvm_vcpu *vcpu)
vcpu             1399 arch/x86/kvm/vmx/vmx.c 	return emulate_invalid_guest_state && !guest_state_valid(vcpu);
vcpu             1402 arch/x86/kvm/vmx/vmx.c static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
vcpu             1404 arch/x86/kvm/vmx/vmx.c unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
vcpu             1408 arch/x86/kvm/vmx/vmx.c 	if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) {
vcpu             1409 arch/x86/kvm/vmx/vmx.c 		__set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
vcpu             1411 arch/x86/kvm/vmx/vmx.c 		if (to_vmx(vcpu)->rmode.vm86_active) {
vcpu             1413 arch/x86/kvm/vmx/vmx.c 			save_rflags = to_vmx(vcpu)->rmode.save_rflags;
vcpu             1416 arch/x86/kvm/vmx/vmx.c 		to_vmx(vcpu)->rflags = rflags;
vcpu             1418 arch/x86/kvm/vmx/vmx.c 	return to_vmx(vcpu)->rflags;
vcpu             1421 arch/x86/kvm/vmx/vmx.c void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
vcpu             1423 arch/x86/kvm/vmx/vmx.c 	unsigned long old_rflags = vmx_get_rflags(vcpu);
vcpu             1425 arch/x86/kvm/vmx/vmx.c 	__set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
vcpu             1426 arch/x86/kvm/vmx/vmx.c 	to_vmx(vcpu)->rflags = rflags;
vcpu             1427 arch/x86/kvm/vmx/vmx.c 	if (to_vmx(vcpu)->rmode.vm86_active) {
vcpu             1428 arch/x86/kvm/vmx/vmx.c 		to_vmx(vcpu)->rmode.save_rflags = rflags;
vcpu             1433 arch/x86/kvm/vmx/vmx.c 	if ((old_rflags ^ to_vmx(vcpu)->rflags) & X86_EFLAGS_VM)
vcpu             1434 arch/x86/kvm/vmx/vmx.c 		to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
vcpu             1437 arch/x86/kvm/vmx/vmx.c u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
vcpu             1450 arch/x86/kvm/vmx/vmx.c void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
vcpu             1466 arch/x86/kvm/vmx/vmx.c static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 data)
vcpu             1468 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             1538 arch/x86/kvm/vmx/vmx.c static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
vcpu             1551 arch/x86/kvm/vmx/vmx.c 	    to_vmx(vcpu)->exit_reason != EXIT_REASON_EPT_MISCONFIG) {
vcpu             1552 arch/x86/kvm/vmx/vmx.c 		rip = kvm_rip_read(vcpu);
vcpu             1554 arch/x86/kvm/vmx/vmx.c 		kvm_rip_write(vcpu, rip);
vcpu             1556 arch/x86/kvm/vmx/vmx.c 		if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP))
vcpu             1561 arch/x86/kvm/vmx/vmx.c 	vmx_set_interrupt_shadow(vcpu, 0);
vcpu             1566 arch/x86/kvm/vmx/vmx.c static void vmx_clear_hlt(struct kvm_vcpu *vcpu)
vcpu             1574 arch/x86/kvm/vmx/vmx.c 	if (kvm_hlt_in_guest(vcpu->kvm) &&
vcpu             1579 arch/x86/kvm/vmx/vmx.c static void vmx_queue_exception(struct kvm_vcpu *vcpu)
vcpu             1581 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             1582 arch/x86/kvm/vmx/vmx.c 	unsigned nr = vcpu->arch.exception.nr;
vcpu             1583 arch/x86/kvm/vmx/vmx.c 	bool has_error_code = vcpu->arch.exception.has_error_code;
vcpu             1584 arch/x86/kvm/vmx/vmx.c 	u32 error_code = vcpu->arch.exception.error_code;
vcpu             1587 arch/x86/kvm/vmx/vmx.c 	kvm_deliver_exception_payload(vcpu);
vcpu             1597 arch/x86/kvm/vmx/vmx.c 			inc_eip = vcpu->arch.event_exit_inst_len;
vcpu             1598 arch/x86/kvm/vmx/vmx.c 		kvm_inject_realmode_interrupt(vcpu, nr, inc_eip);
vcpu             1606 arch/x86/kvm/vmx/vmx.c 			     vmx->vcpu.arch.event_exit_inst_len);
vcpu             1613 arch/x86/kvm/vmx/vmx.c 	vmx_clear_hlt(vcpu);
vcpu             1653 arch/x86/kvm/vmx/vmx.c 	if (is_long_mode(&vmx->vcpu) && (vmx->vcpu.arch.efer & EFER_SCE)) {
vcpu             1669 arch/x86/kvm/vmx/vmx.c 	if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP))
vcpu             1676 arch/x86/kvm/vmx/vmx.c 		vmx_update_msr_bitmap(&vmx->vcpu);
vcpu             1679 arch/x86/kvm/vmx/vmx.c static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
vcpu             1681 arch/x86/kvm/vmx/vmx.c 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
vcpu             1683 arch/x86/kvm/vmx/vmx.c 	if (is_guest_mode(vcpu) &&
vcpu             1685 arch/x86/kvm/vmx/vmx.c 		return vcpu->arch.tsc_offset - vmcs12->tsc_offset;
vcpu             1687 arch/x86/kvm/vmx/vmx.c 	return vcpu->arch.tsc_offset;
vcpu             1690 arch/x86/kvm/vmx/vmx.c static u64 vmx_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
vcpu             1692 arch/x86/kvm/vmx/vmx.c 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
vcpu             1701 arch/x86/kvm/vmx/vmx.c 	if (is_guest_mode(vcpu) &&
vcpu             1705 arch/x86/kvm/vmx/vmx.c 	trace_kvm_write_tsc_offset(vcpu->vcpu_id,
vcpu             1706 arch/x86/kvm/vmx/vmx.c 				   vcpu->arch.tsc_offset - g_tsc_offset,
vcpu             1718 arch/x86/kvm/vmx/vmx.c bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
vcpu             1720 arch/x86/kvm/vmx/vmx.c 	return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX);
vcpu             1723 arch/x86/kvm/vmx/vmx.c static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu,
vcpu             1726 arch/x86/kvm/vmx/vmx.c 	uint64_t valid_bits = to_vmx(vcpu)->msr_ia32_feature_control_valid_bits;
vcpu             1750 arch/x86/kvm/vmx/vmx.c static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vcpu             1752 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             1769 arch/x86/kvm/vmx/vmx.c 		return kvm_get_msr_common(vcpu, msr_info);
vcpu             1778 arch/x86/kvm/vmx/vmx.c 		    !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
vcpu             1781 arch/x86/kvm/vmx/vmx.c 		msr_info->data = to_vmx(vcpu)->spec_ctrl;
vcpu             1795 arch/x86/kvm/vmx/vmx.c 		     !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
vcpu             1804 arch/x86/kvm/vmx/vmx.c 		msr_info->data = vcpu->arch.mcg_ext_ctl;
vcpu             1810 arch/x86/kvm/vmx/vmx.c 		if (!nested_vmx_allowed(vcpu))
vcpu             1817 arch/x86/kvm/vmx/vmx.c 		     !(guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
vcpu             1818 arch/x86/kvm/vmx/vmx.c 		       guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))))
vcpu             1820 arch/x86/kvm/vmx/vmx.c 		msr_info->data = vcpu->arch.ia32_xss;
vcpu             1870 arch/x86/kvm/vmx/vmx.c 		    !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
vcpu             1879 arch/x86/kvm/vmx/vmx.c 		return kvm_get_msr_common(vcpu, msr_info);
vcpu             1890 arch/x86/kvm/vmx/vmx.c static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vcpu             1892 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             1901 arch/x86/kvm/vmx/vmx.c 		ret = kvm_set_msr_common(vcpu, msr_info);
vcpu             1917 arch/x86/kvm/vmx/vmx.c 		if (is_guest_mode(vcpu))
vcpu             1918 arch/x86/kvm/vmx/vmx.c 			get_vmcs12(vcpu)->guest_sysenter_cs = data;
vcpu             1922 arch/x86/kvm/vmx/vmx.c 		if (is_guest_mode(vcpu))
vcpu             1923 arch/x86/kvm/vmx/vmx.c 			get_vmcs12(vcpu)->guest_sysenter_eip = data;
vcpu             1927 arch/x86/kvm/vmx/vmx.c 		if (is_guest_mode(vcpu))
vcpu             1928 arch/x86/kvm/vmx/vmx.c 			get_vmcs12(vcpu)->guest_sysenter_esp = data;
vcpu             1932 arch/x86/kvm/vmx/vmx.c 		if (is_guest_mode(vcpu) && get_vmcs12(vcpu)->vm_exit_controls &
vcpu             1934 arch/x86/kvm/vmx/vmx.c 			get_vmcs12(vcpu)->guest_ia32_debugctl = data;
vcpu             1936 arch/x86/kvm/vmx/vmx.c 		ret = kvm_set_msr_common(vcpu, msr_info);
vcpu             1942 arch/x86/kvm/vmx/vmx.c 		     !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
vcpu             1944 arch/x86/kvm/vmx/vmx.c 		if (is_noncanonical_address(data & PAGE_MASK, vcpu) ||
vcpu             1961 arch/x86/kvm/vmx/vmx.c 		    !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
vcpu             1991 arch/x86/kvm/vmx/vmx.c 		    !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
vcpu             2020 arch/x86/kvm/vmx/vmx.c 		if (is_guest_mode(vcpu) &&
vcpu             2021 arch/x86/kvm/vmx/vmx.c 		    get_vmcs12(vcpu)->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT)
vcpu             2022 arch/x86/kvm/vmx/vmx.c 			get_vmcs12(vcpu)->guest_ia32_pat = data;
vcpu             2026 arch/x86/kvm/vmx/vmx.c 			vcpu->arch.pat = data;
vcpu             2029 arch/x86/kvm/vmx/vmx.c 		ret = kvm_set_msr_common(vcpu, msr_info);
vcpu             2032 arch/x86/kvm/vmx/vmx.c 		ret = kvm_set_msr_common(vcpu, msr_info);
vcpu             2036 arch/x86/kvm/vmx/vmx.c 		     !(to_vmx(vcpu)->msr_ia32_feature_control &
vcpu             2040 arch/x86/kvm/vmx/vmx.c 		vcpu->arch.mcg_ext_ctl = data;
vcpu             2043 arch/x86/kvm/vmx/vmx.c 		if (!vmx_feature_control_msr_valid(vcpu, data) ||
vcpu             2044 arch/x86/kvm/vmx/vmx.c 		    (to_vmx(vcpu)->msr_ia32_feature_control &
vcpu             2049 arch/x86/kvm/vmx/vmx.c 			vmx_leave_nested(vcpu);
vcpu             2054 arch/x86/kvm/vmx/vmx.c 		if (!nested_vmx_allowed(vcpu))
vcpu             2056 arch/x86/kvm/vmx/vmx.c 		return vmx_set_vmx_msr(vcpu, msr_index, data);
vcpu             2060 arch/x86/kvm/vmx/vmx.c 		     !(guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
vcpu             2061 arch/x86/kvm/vmx/vmx.c 		       guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))))
vcpu             2069 arch/x86/kvm/vmx/vmx.c 		vcpu->arch.ia32_xss = data;
vcpu             2070 arch/x86/kvm/vmx/vmx.c 		if (vcpu->arch.ia32_xss != host_xss)
vcpu             2072 arch/x86/kvm/vmx/vmx.c 				vcpu->arch.ia32_xss, host_xss, false);
vcpu             2078 arch/x86/kvm/vmx/vmx.c 			vmx_rtit_ctl_check(vcpu, data) ||
vcpu             2128 arch/x86/kvm/vmx/vmx.c 		if (is_noncanonical_address(data, vcpu))
vcpu             2137 arch/x86/kvm/vmx/vmx.c 		    !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
vcpu             2158 arch/x86/kvm/vmx/vmx.c 		ret = kvm_set_msr_common(vcpu, msr_info);
vcpu             2164 arch/x86/kvm/vmx/vmx.c static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
vcpu             2166 arch/x86/kvm/vmx/vmx.c 	__set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
vcpu             2169 arch/x86/kvm/vmx/vmx.c 		vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
vcpu             2172 arch/x86/kvm/vmx/vmx.c 		vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
vcpu             2176 arch/x86/kvm/vmx/vmx.c 			ept_save_pdptrs(vcpu);
vcpu             2624 arch/x86/kvm/vmx/vmx.c static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg,
vcpu             2640 arch/x86/kvm/vmx/vmx.c 	vmx_set_segment(vcpu, save, seg);
vcpu             2643 arch/x86/kvm/vmx/vmx.c static void enter_pmode(struct kvm_vcpu *vcpu)
vcpu             2646 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             2652 arch/x86/kvm/vmx/vmx.c 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
vcpu             2653 arch/x86/kvm/vmx/vmx.c 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
vcpu             2654 arch/x86/kvm/vmx/vmx.c 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
vcpu             2655 arch/x86/kvm/vmx/vmx.c 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
vcpu             2656 arch/x86/kvm/vmx/vmx.c 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
vcpu             2657 arch/x86/kvm/vmx/vmx.c 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
vcpu             2663 arch/x86/kvm/vmx/vmx.c 	vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
vcpu             2673 arch/x86/kvm/vmx/vmx.c 	update_exception_bitmap(vcpu);
vcpu             2675 arch/x86/kvm/vmx/vmx.c 	fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
vcpu             2676 arch/x86/kvm/vmx/vmx.c 	fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
vcpu             2677 arch/x86/kvm/vmx/vmx.c 	fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
vcpu             2678 arch/x86/kvm/vmx/vmx.c 	fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
vcpu             2679 arch/x86/kvm/vmx/vmx.c 	fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
vcpu             2680 arch/x86/kvm/vmx/vmx.c 	fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
vcpu             2716 arch/x86/kvm/vmx/vmx.c static void enter_rmode(struct kvm_vcpu *vcpu)
vcpu             2719 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             2720 arch/x86/kvm/vmx/vmx.c 	struct kvm_vmx *kvm_vmx = to_kvm_vmx(vcpu->kvm);
vcpu             2722 arch/x86/kvm/vmx/vmx.c 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
vcpu             2723 arch/x86/kvm/vmx/vmx.c 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
vcpu             2724 arch/x86/kvm/vmx/vmx.c 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
vcpu             2725 arch/x86/kvm/vmx/vmx.c 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
vcpu             2726 arch/x86/kvm/vmx/vmx.c 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
vcpu             2727 arch/x86/kvm/vmx/vmx.c 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
vcpu             2728 arch/x86/kvm/vmx/vmx.c 	vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
vcpu             2753 arch/x86/kvm/vmx/vmx.c 	update_exception_bitmap(vcpu);
vcpu             2762 arch/x86/kvm/vmx/vmx.c 	kvm_mmu_reset_context(vcpu);
vcpu             2765 arch/x86/kvm/vmx/vmx.c void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
vcpu             2767 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             2773 arch/x86/kvm/vmx/vmx.c 	vcpu->arch.efer = efer;
vcpu             2775 arch/x86/kvm/vmx/vmx.c 		vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
vcpu             2778 arch/x86/kvm/vmx/vmx.c 		vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
vcpu             2787 arch/x86/kvm/vmx/vmx.c static void enter_lmode(struct kvm_vcpu *vcpu)
vcpu             2791 arch/x86/kvm/vmx/vmx.c 	vmx_segment_cache_clear(to_vmx(vcpu));
vcpu             2801 arch/x86/kvm/vmx/vmx.c 	vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA);
vcpu             2804 arch/x86/kvm/vmx/vmx.c static void exit_lmode(struct kvm_vcpu *vcpu)
vcpu             2806 arch/x86/kvm/vmx/vmx.c 	vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
vcpu             2807 arch/x86/kvm/vmx/vmx.c 	vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
vcpu             2812 arch/x86/kvm/vmx/vmx.c static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr)
vcpu             2814 arch/x86/kvm/vmx/vmx.c 	int vpid = to_vmx(vcpu)->vpid;
vcpu             2826 arch/x86/kvm/vmx/vmx.c static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
vcpu             2828 arch/x86/kvm/vmx/vmx.c 	ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
vcpu             2830 arch/x86/kvm/vmx/vmx.c 	vcpu->arch.cr0 &= ~cr0_guest_owned_bits;
vcpu             2831 arch/x86/kvm/vmx/vmx.c 	vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits;
vcpu             2834 arch/x86/kvm/vmx/vmx.c static void vmx_decache_cr3(struct kvm_vcpu *vcpu)
vcpu             2836 arch/x86/kvm/vmx/vmx.c 	if (enable_unrestricted_guest || (enable_ept && is_paging(vcpu)))
vcpu             2837 arch/x86/kvm/vmx/vmx.c 		vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
vcpu             2838 arch/x86/kvm/vmx/vmx.c 	__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
vcpu             2841 arch/x86/kvm/vmx/vmx.c static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
vcpu             2843 arch/x86/kvm/vmx/vmx.c 	ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
vcpu             2845 arch/x86/kvm/vmx/vmx.c 	vcpu->arch.cr4 &= ~cr4_guest_owned_bits;
vcpu             2846 arch/x86/kvm/vmx/vmx.c 	vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits;
vcpu             2849 arch/x86/kvm/vmx/vmx.c static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
vcpu             2851 arch/x86/kvm/vmx/vmx.c 	struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
vcpu             2854 arch/x86/kvm/vmx/vmx.c 		      (unsigned long *)&vcpu->arch.regs_dirty))
vcpu             2857 arch/x86/kvm/vmx/vmx.c 	if (is_pae_paging(vcpu)) {
vcpu             2865 arch/x86/kvm/vmx/vmx.c void ept_save_pdptrs(struct kvm_vcpu *vcpu)
vcpu             2867 arch/x86/kvm/vmx/vmx.c 	struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
vcpu             2869 arch/x86/kvm/vmx/vmx.c 	if (is_pae_paging(vcpu)) {
vcpu             2877 arch/x86/kvm/vmx/vmx.c 		  (unsigned long *)&vcpu->arch.regs_avail);
vcpu             2879 arch/x86/kvm/vmx/vmx.c 		  (unsigned long *)&vcpu->arch.regs_dirty);
vcpu             2884 arch/x86/kvm/vmx/vmx.c 					struct kvm_vcpu *vcpu)
vcpu             2886 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             2888 arch/x86/kvm/vmx/vmx.c 	if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
vcpu             2889 arch/x86/kvm/vmx/vmx.c 		vmx_decache_cr3(vcpu);
vcpu             2894 arch/x86/kvm/vmx/vmx.c 		vcpu->arch.cr0 = cr0;
vcpu             2895 arch/x86/kvm/vmx/vmx.c 		vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
vcpu             2896 arch/x86/kvm/vmx/vmx.c 	} else if (!is_paging(vcpu)) {
vcpu             2900 arch/x86/kvm/vmx/vmx.c 		vcpu->arch.cr0 = cr0;
vcpu             2901 arch/x86/kvm/vmx/vmx.c 		vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
vcpu             2908 arch/x86/kvm/vmx/vmx.c void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
vcpu             2910 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             2920 arch/x86/kvm/vmx/vmx.c 			enter_pmode(vcpu);
vcpu             2923 arch/x86/kvm/vmx/vmx.c 			enter_rmode(vcpu);
vcpu             2927 arch/x86/kvm/vmx/vmx.c 	if (vcpu->arch.efer & EFER_LME) {
vcpu             2928 arch/x86/kvm/vmx/vmx.c 		if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
vcpu             2929 arch/x86/kvm/vmx/vmx.c 			enter_lmode(vcpu);
vcpu             2930 arch/x86/kvm/vmx/vmx.c 		if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
vcpu             2931 arch/x86/kvm/vmx/vmx.c 			exit_lmode(vcpu);
vcpu             2936 arch/x86/kvm/vmx/vmx.c 		ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu);
vcpu             2940 arch/x86/kvm/vmx/vmx.c 	vcpu->arch.cr0 = cr0;
vcpu             2943 arch/x86/kvm/vmx/vmx.c 	vmx->emulation_required = emulation_required(vcpu);
vcpu             2946 arch/x86/kvm/vmx/vmx.c static int get_ept_level(struct kvm_vcpu *vcpu)
vcpu             2949 arch/x86/kvm/vmx/vmx.c 	if (is_guest_mode(vcpu) && nested_cpu_has_ept(get_vmcs12(vcpu)))
vcpu             2951 arch/x86/kvm/vmx/vmx.c 	if (cpu_has_vmx_ept_5levels() && (cpuid_maxphyaddr(vcpu) > 48))
vcpu             2956 arch/x86/kvm/vmx/vmx.c u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa)
vcpu             2960 arch/x86/kvm/vmx/vmx.c 	eptp |= (get_ept_level(vcpu) == 5) ? VMX_EPTP_PWL_5 : VMX_EPTP_PWL_4;
vcpu             2963 arch/x86/kvm/vmx/vmx.c 	    (!is_guest_mode(vcpu) || nested_ept_ad_enabled(vcpu)))
vcpu             2970 arch/x86/kvm/vmx/vmx.c void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
vcpu             2972 arch/x86/kvm/vmx/vmx.c 	struct kvm *kvm = vcpu->kvm;
vcpu             2979 arch/x86/kvm/vmx/vmx.c 		eptp = construct_eptp(vcpu, cr3);
vcpu             2984 arch/x86/kvm/vmx/vmx.c 			to_vmx(vcpu)->ept_pointer = eptp;
vcpu             2991 arch/x86/kvm/vmx/vmx.c 		if (is_guest_mode(vcpu))
vcpu             2993 arch/x86/kvm/vmx/vmx.c 		else if (enable_unrestricted_guest || is_paging(vcpu))
vcpu             2994 arch/x86/kvm/vmx/vmx.c 			guest_cr3 = kvm_read_cr3(vcpu);
vcpu             2997 arch/x86/kvm/vmx/vmx.c 		ept_load_pdptrs(vcpu);
vcpu             3004 arch/x86/kvm/vmx/vmx.c int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
vcpu             3006 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             3026 arch/x86/kvm/vmx/vmx.c 		} else if (!is_guest_mode(vcpu) ||
vcpu             3027 arch/x86/kvm/vmx/vmx.c 			!nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC)) {
vcpu             3040 arch/x86/kvm/vmx/vmx.c 		if (!nested_vmx_allowed(vcpu) || is_smm(vcpu))
vcpu             3044 arch/x86/kvm/vmx/vmx.c 	if (vmx->nested.vmxon && !nested_cr4_valid(vcpu, cr4))
vcpu             3047 arch/x86/kvm/vmx/vmx.c 	vcpu->arch.cr4 = cr4;
vcpu             3051 arch/x86/kvm/vmx/vmx.c 			if (!is_paging(vcpu)) {
vcpu             3070 arch/x86/kvm/vmx/vmx.c 		if (!is_paging(vcpu))
vcpu             3079 arch/x86/kvm/vmx/vmx.c void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
vcpu             3081 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             3115 arch/x86/kvm/vmx/vmx.c static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
vcpu             3119 arch/x86/kvm/vmx/vmx.c 	if (to_vmx(vcpu)->rmode.vm86_active) {
vcpu             3120 arch/x86/kvm/vmx/vmx.c 		vmx_get_segment(vcpu, &s, seg);
vcpu             3123 arch/x86/kvm/vmx/vmx.c 	return vmx_read_guest_seg_base(to_vmx(vcpu), seg);
vcpu             3126 arch/x86/kvm/vmx/vmx.c int vmx_get_cpl(struct kvm_vcpu *vcpu)
vcpu             3128 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             3158 arch/x86/kvm/vmx/vmx.c void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
vcpu             3160 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             3195 arch/x86/kvm/vmx/vmx.c 	vmx->emulation_required = emulation_required(vcpu);
vcpu             3198 arch/x86/kvm/vmx/vmx.c static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
vcpu             3200 arch/x86/kvm/vmx/vmx.c 	u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS);
vcpu             3206 arch/x86/kvm/vmx/vmx.c static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
vcpu             3212 arch/x86/kvm/vmx/vmx.c static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
vcpu             3218 arch/x86/kvm/vmx/vmx.c static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
vcpu             3224 arch/x86/kvm/vmx/vmx.c static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
vcpu             3230 arch/x86/kvm/vmx/vmx.c static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
vcpu             3235 arch/x86/kvm/vmx/vmx.c 	vmx_get_segment(vcpu, &var, seg);
vcpu             3251 arch/x86/kvm/vmx/vmx.c static bool code_segment_valid(struct kvm_vcpu *vcpu)
vcpu             3256 arch/x86/kvm/vmx/vmx.c 	vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
vcpu             3279 arch/x86/kvm/vmx/vmx.c static bool stack_segment_valid(struct kvm_vcpu *vcpu)
vcpu             3284 arch/x86/kvm/vmx/vmx.c 	vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
vcpu             3301 arch/x86/kvm/vmx/vmx.c static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
vcpu             3306 arch/x86/kvm/vmx/vmx.c 	vmx_get_segment(vcpu, &var, seg);
vcpu             3326 arch/x86/kvm/vmx/vmx.c static bool tr_valid(struct kvm_vcpu *vcpu)
vcpu             3330 arch/x86/kvm/vmx/vmx.c 	vmx_get_segment(vcpu, &tr, VCPU_SREG_TR);
vcpu             3344 arch/x86/kvm/vmx/vmx.c static bool ldtr_valid(struct kvm_vcpu *vcpu)
vcpu             3348 arch/x86/kvm/vmx/vmx.c 	vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR);
vcpu             3362 arch/x86/kvm/vmx/vmx.c static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
vcpu             3366 arch/x86/kvm/vmx/vmx.c 	vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
vcpu             3367 arch/x86/kvm/vmx/vmx.c 	vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
vcpu             3378 arch/x86/kvm/vmx/vmx.c static bool guest_state_valid(struct kvm_vcpu *vcpu)
vcpu             3384 arch/x86/kvm/vmx/vmx.c 	if (!is_protmode(vcpu) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) {
vcpu             3385 arch/x86/kvm/vmx/vmx.c 		if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
vcpu             3387 arch/x86/kvm/vmx/vmx.c 		if (!rmode_segment_valid(vcpu, VCPU_SREG_SS))
vcpu             3389 arch/x86/kvm/vmx/vmx.c 		if (!rmode_segment_valid(vcpu, VCPU_SREG_DS))
vcpu             3391 arch/x86/kvm/vmx/vmx.c 		if (!rmode_segment_valid(vcpu, VCPU_SREG_ES))
vcpu             3393 arch/x86/kvm/vmx/vmx.c 		if (!rmode_segment_valid(vcpu, VCPU_SREG_FS))
vcpu             3395 arch/x86/kvm/vmx/vmx.c 		if (!rmode_segment_valid(vcpu, VCPU_SREG_GS))
vcpu             3399 arch/x86/kvm/vmx/vmx.c 		if (!cs_ss_rpl_check(vcpu))
vcpu             3401 arch/x86/kvm/vmx/vmx.c 		if (!code_segment_valid(vcpu))
vcpu             3403 arch/x86/kvm/vmx/vmx.c 		if (!stack_segment_valid(vcpu))
vcpu             3405 arch/x86/kvm/vmx/vmx.c 		if (!data_segment_valid(vcpu, VCPU_SREG_DS))
vcpu             3407 arch/x86/kvm/vmx/vmx.c 		if (!data_segment_valid(vcpu, VCPU_SREG_ES))
vcpu             3409 arch/x86/kvm/vmx/vmx.c 		if (!data_segment_valid(vcpu, VCPU_SREG_FS))
vcpu             3411 arch/x86/kvm/vmx/vmx.c 		if (!data_segment_valid(vcpu, VCPU_SREG_GS))
vcpu             3413 arch/x86/kvm/vmx/vmx.c 		if (!tr_valid(vcpu))
vcpu             3415 arch/x86/kvm/vmx/vmx.c 		if (!ldtr_valid(vcpu))
vcpu             3657 arch/x86/kvm/vmx/vmx.c static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu)
vcpu             3662 arch/x86/kvm/vmx/vmx.c 	    (secondary_exec_controls_get(to_vmx(vcpu)) &
vcpu             3665 arch/x86/kvm/vmx/vmx.c 		if (enable_apicv && kvm_vcpu_apicv_active(vcpu))
vcpu             3697 arch/x86/kvm/vmx/vmx.c void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu)
vcpu             3699 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             3701 arch/x86/kvm/vmx/vmx.c 	u8 mode = vmx_msr_bitmap_mode(vcpu);
vcpu             3735 arch/x86/kvm/vmx/vmx.c static bool vmx_get_enable_apicv(struct kvm_vcpu *vcpu)
vcpu             3740 arch/x86/kvm/vmx/vmx.c static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
vcpu             3742 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             3747 arch/x86/kvm/vmx/vmx.c 	if (WARN_ON_ONCE(!is_guest_mode(vcpu)) ||
vcpu             3748 arch/x86/kvm/vmx/vmx.c 		!nested_cpu_has_vid(get_vmcs12(vcpu)) ||
vcpu             3760 arch/x86/kvm/vmx/vmx.c static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
vcpu             3766 arch/x86/kvm/vmx/vmx.c 	if (vcpu->mode == IN_GUEST_MODE) {
vcpu             3792 arch/x86/kvm/vmx/vmx.c 		apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec);
vcpu             3799 arch/x86/kvm/vmx/vmx.c static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
vcpu             3802 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             3804 arch/x86/kvm/vmx/vmx.c 	if (is_guest_mode(vcpu) &&
vcpu             3811 arch/x86/kvm/vmx/vmx.c 		kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu             3813 arch/x86/kvm/vmx/vmx.c 		if (!kvm_vcpu_trigger_posted_interrupt(vcpu, true))
vcpu             3814 arch/x86/kvm/vmx/vmx.c 			kvm_vcpu_kick(vcpu);
vcpu             3826 arch/x86/kvm/vmx/vmx.c static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
vcpu             3828 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             3831 arch/x86/kvm/vmx/vmx.c 	r = vmx_deliver_nested_posted_interrupt(vcpu, vector);
vcpu             3835 arch/x86/kvm/vmx/vmx.c 	if (!vcpu->arch.apicv_active)
vcpu             3845 arch/x86/kvm/vmx/vmx.c 	if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false))
vcpu             3846 arch/x86/kvm/vmx/vmx.c 		kvm_vcpu_kick(vcpu);
vcpu             3916 arch/x86/kvm/vmx/vmx.c 	vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
vcpu             3918 arch/x86/kvm/vmx/vmx.c 		vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
vcpu             3919 arch/x86/kvm/vmx/vmx.c 	if (is_guest_mode(&vmx->vcpu))
vcpu             3920 arch/x86/kvm/vmx/vmx.c 		vmx->vcpu.arch.cr4_guest_owned_bits &=
vcpu             3921 arch/x86/kvm/vmx/vmx.c 			~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask;
vcpu             3922 arch/x86/kvm/vmx/vmx.c 	vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);
vcpu             3929 arch/x86/kvm/vmx/vmx.c 	if (!kvm_vcpu_apicv_active(&vmx->vcpu))
vcpu             3941 arch/x86/kvm/vmx/vmx.c static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
vcpu             3943 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             3947 arch/x86/kvm/vmx/vmx.c 		if (kvm_vcpu_apicv_active(vcpu))
vcpu             3958 arch/x86/kvm/vmx/vmx.c 		vmx_update_msr_bitmap(vcpu);
vcpu             3965 arch/x86/kvm/vmx/vmx.c 	if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)
vcpu             3968 arch/x86/kvm/vmx/vmx.c 	if (!cpu_need_tpr_shadow(&vmx->vcpu)) {
vcpu             3979 arch/x86/kvm/vmx/vmx.c 	if (kvm_mwait_in_guest(vmx->vcpu.kvm))
vcpu             3982 arch/x86/kvm/vmx/vmx.c 	if (kvm_hlt_in_guest(vmx->vcpu.kvm))
vcpu             3990 arch/x86/kvm/vmx/vmx.c 	struct kvm_vcpu *vcpu = &vmx->vcpu;
vcpu             3996 arch/x86/kvm/vmx/vmx.c 	if (!cpu_need_virtualize_apic_accesses(vcpu))
vcpu             4006 arch/x86/kvm/vmx/vmx.c 	if (kvm_pause_in_guest(vmx->vcpu.kvm))
vcpu             4008 arch/x86/kvm/vmx/vmx.c 	if (!kvm_vcpu_apicv_active(vcpu))
vcpu             4030 arch/x86/kvm/vmx/vmx.c 			guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
vcpu             4031 arch/x86/kvm/vmx/vmx.c 			guest_cpuid_has(vcpu, X86_FEATURE_XSAVES);
vcpu             4047 arch/x86/kvm/vmx/vmx.c 		bool rdtscp_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP);
vcpu             4064 arch/x86/kvm/vmx/vmx.c 			guest_cpuid_has(vcpu, X86_FEATURE_INVPCID) &&
vcpu             4065 arch/x86/kvm/vmx/vmx.c 			guest_cpuid_has(vcpu, X86_FEATURE_PCID);
vcpu             4069 arch/x86/kvm/vmx/vmx.c 			guest_cpuid_clear(vcpu, X86_FEATURE_INVPCID);
vcpu             4083 arch/x86/kvm/vmx/vmx.c 		bool rdrand_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDRAND);
vcpu             4098 arch/x86/kvm/vmx/vmx.c 		bool rdseed_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDSEED);
vcpu             4114 arch/x86/kvm/vmx/vmx.c 			guest_cpuid_has(vcpu, X86_FEATURE_WAITPKG);
vcpu             4170 arch/x86/kvm/vmx/vmx.c 	if (kvm_vcpu_apicv_active(&vmx->vcpu)) {
vcpu             4182 arch/x86/kvm/vmx/vmx.c 	if (!kvm_pause_in_guest(vmx->vcpu.kvm)) {
vcpu             4208 arch/x86/kvm/vmx/vmx.c 		vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
vcpu             4230 arch/x86/kvm/vmx/vmx.c 	vmx->vcpu.arch.cr0_guest_owned_bits = X86_CR0_TS;
vcpu             4254 arch/x86/kvm/vmx/vmx.c static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
vcpu             4256 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             4265 arch/x86/kvm/vmx/vmx.c 	vcpu->arch.microcode_version = 0x100000000ULL;
vcpu             4266 arch/x86/kvm/vmx/vmx.c 	vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
vcpu             4268 arch/x86/kvm/vmx/vmx.c 	kvm_set_cr8(vcpu, 0);
vcpu             4273 arch/x86/kvm/vmx/vmx.c 		if (kvm_vcpu_is_reset_bsp(vcpu))
vcpu             4276 arch/x86/kvm/vmx/vmx.c 		kvm_set_apic_base(vcpu, &apic_base_msr);
vcpu             4308 arch/x86/kvm/vmx/vmx.c 	kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
vcpu             4309 arch/x86/kvm/vmx/vmx.c 	kvm_rip_write(vcpu, 0xfff0);
vcpu             4329 arch/x86/kvm/vmx/vmx.c 		if (cpu_need_tpr_shadow(vcpu))
vcpu             4331 arch/x86/kvm/vmx/vmx.c 				     __pa(vcpu->arch.apic->regs));
vcpu             4335 arch/x86/kvm/vmx/vmx.c 	kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
vcpu             4341 arch/x86/kvm/vmx/vmx.c 	vmx->vcpu.arch.cr0 = cr0;
vcpu             4342 arch/x86/kvm/vmx/vmx.c 	vmx_set_cr0(vcpu, cr0); /* enter rmode */
vcpu             4343 arch/x86/kvm/vmx/vmx.c 	vmx_set_cr4(vcpu, 0);
vcpu             4344 arch/x86/kvm/vmx/vmx.c 	vmx_set_efer(vcpu, 0);
vcpu             4346 arch/x86/kvm/vmx/vmx.c 	update_exception_bitmap(vcpu);
vcpu             4350 arch/x86/kvm/vmx/vmx.c 		vmx_clear_hlt(vcpu);
vcpu             4353 arch/x86/kvm/vmx/vmx.c static void enable_irq_window(struct kvm_vcpu *vcpu)
vcpu             4355 arch/x86/kvm/vmx/vmx.c 	exec_controls_setbit(to_vmx(vcpu), CPU_BASED_VIRTUAL_INTR_PENDING);
vcpu             4358 arch/x86/kvm/vmx/vmx.c static void enable_nmi_window(struct kvm_vcpu *vcpu)
vcpu             4362 arch/x86/kvm/vmx/vmx.c 		enable_irq_window(vcpu);
vcpu             4366 arch/x86/kvm/vmx/vmx.c 	exec_controls_setbit(to_vmx(vcpu), CPU_BASED_VIRTUAL_NMI_PENDING);
vcpu             4369 arch/x86/kvm/vmx/vmx.c static void vmx_inject_irq(struct kvm_vcpu *vcpu)
vcpu             4371 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             4373 arch/x86/kvm/vmx/vmx.c 	int irq = vcpu->arch.interrupt.nr;
vcpu             4377 arch/x86/kvm/vmx/vmx.c 	++vcpu->stat.irq_injections;
vcpu             4380 arch/x86/kvm/vmx/vmx.c 		if (vcpu->arch.interrupt.soft)
vcpu             4381 arch/x86/kvm/vmx/vmx.c 			inc_eip = vcpu->arch.event_exit_inst_len;
vcpu             4382 arch/x86/kvm/vmx/vmx.c 		kvm_inject_realmode_interrupt(vcpu, irq, inc_eip);
vcpu             4386 arch/x86/kvm/vmx/vmx.c 	if (vcpu->arch.interrupt.soft) {
vcpu             4389 arch/x86/kvm/vmx/vmx.c 			     vmx->vcpu.arch.event_exit_inst_len);
vcpu             4394 arch/x86/kvm/vmx/vmx.c 	vmx_clear_hlt(vcpu);
vcpu             4397 arch/x86/kvm/vmx/vmx.c static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
vcpu             4399 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             4414 arch/x86/kvm/vmx/vmx.c 	++vcpu->stat.nmi_injections;
vcpu             4418 arch/x86/kvm/vmx/vmx.c 		kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0);
vcpu             4425 arch/x86/kvm/vmx/vmx.c 	vmx_clear_hlt(vcpu);
vcpu             4428 arch/x86/kvm/vmx/vmx.c bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
vcpu             4430 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             4442 arch/x86/kvm/vmx/vmx.c void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
vcpu             4444 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             4462 arch/x86/kvm/vmx/vmx.c static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
vcpu             4464 arch/x86/kvm/vmx/vmx.c 	if (to_vmx(vcpu)->nested.nested_run_pending)
vcpu             4468 arch/x86/kvm/vmx/vmx.c 	    to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked)
vcpu             4476 arch/x86/kvm/vmx/vmx.c static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
vcpu             4478 arch/x86/kvm/vmx/vmx.c 	if (to_vmx(vcpu)->nested.nested_run_pending)
vcpu             4481 arch/x86/kvm/vmx/vmx.c 	if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
vcpu             4510 arch/x86/kvm/vmx/vmx.c static bool rmode_exception(struct kvm_vcpu *vcpu, int vec)
vcpu             4518 arch/x86/kvm/vmx/vmx.c 		to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
vcpu             4520 arch/x86/kvm/vmx/vmx.c 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
vcpu             4524 arch/x86/kvm/vmx/vmx.c 		if (vcpu->guest_debug &
vcpu             4542 arch/x86/kvm/vmx/vmx.c static int handle_rmode_exception(struct kvm_vcpu *vcpu,
vcpu             4550 arch/x86/kvm/vmx/vmx.c 		if (kvm_emulate_instruction(vcpu, 0)) {
vcpu             4551 arch/x86/kvm/vmx/vmx.c 			if (vcpu->arch.halt_request) {
vcpu             4552 arch/x86/kvm/vmx/vmx.c 				vcpu->arch.halt_request = 0;
vcpu             4553 arch/x86/kvm/vmx/vmx.c 				return kvm_vcpu_halt(vcpu);
vcpu             4565 arch/x86/kvm/vmx/vmx.c 	kvm_queue_exception(vcpu, vec);
vcpu             4588 arch/x86/kvm/vmx/vmx.c static int handle_machine_check(struct kvm_vcpu *vcpu)
vcpu             4594 arch/x86/kvm/vmx/vmx.c static int handle_exception_nmi(struct kvm_vcpu *vcpu)
vcpu             4596 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             4597 arch/x86/kvm/vmx/vmx.c 	struct kvm_run *kvm_run = vcpu->run;
vcpu             4609 arch/x86/kvm/vmx/vmx.c 		return handle_ud(vcpu);
vcpu             4624 arch/x86/kvm/vmx/vmx.c 			kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
vcpu             4627 arch/x86/kvm/vmx/vmx.c 		return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP);
vcpu             4637 arch/x86/kvm/vmx/vmx.c 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu             4638 arch/x86/kvm/vmx/vmx.c 		vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX;
vcpu             4639 arch/x86/kvm/vmx/vmx.c 		vcpu->run->internal.ndata = 3;
vcpu             4640 arch/x86/kvm/vmx/vmx.c 		vcpu->run->internal.data[0] = vect_info;
vcpu             4641 arch/x86/kvm/vmx/vmx.c 		vcpu->run->internal.data[1] = intr_info;
vcpu             4642 arch/x86/kvm/vmx/vmx.c 		vcpu->run->internal.data[2] = error_code;
vcpu             4649 arch/x86/kvm/vmx/vmx.c 		WARN_ON_ONCE(!vcpu->arch.apf.host_apf_reason && enable_ept);
vcpu             4650 arch/x86/kvm/vmx/vmx.c 		return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0);
vcpu             4655 arch/x86/kvm/vmx/vmx.c 	if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no))
vcpu             4656 arch/x86/kvm/vmx/vmx.c 		return handle_rmode_exception(vcpu, ex_no, error_code);
vcpu             4660 arch/x86/kvm/vmx/vmx.c 		kvm_queue_exception_e(vcpu, AC_VECTOR, error_code);
vcpu             4664 arch/x86/kvm/vmx/vmx.c 		if (!(vcpu->guest_debug &
vcpu             4666 arch/x86/kvm/vmx/vmx.c 			vcpu->arch.dr6 &= ~DR_TRAP_BITS;
vcpu             4667 arch/x86/kvm/vmx/vmx.c 			vcpu->arch.dr6 |= dr6 | DR6_RTM;
vcpu             4669 arch/x86/kvm/vmx/vmx.c 				WARN_ON(!skip_emulated_instruction(vcpu));
vcpu             4671 arch/x86/kvm/vmx/vmx.c 			kvm_queue_exception(vcpu, DB_VECTOR);
vcpu             4683 arch/x86/kvm/vmx/vmx.c 		vmx->vcpu.arch.event_exit_inst_len =
vcpu             4686 arch/x86/kvm/vmx/vmx.c 		rip = kvm_rip_read(vcpu);
vcpu             4699 arch/x86/kvm/vmx/vmx.c static int handle_external_interrupt(struct kvm_vcpu *vcpu)
vcpu             4701 arch/x86/kvm/vmx/vmx.c 	++vcpu->stat.irq_exits;
vcpu             4705 arch/x86/kvm/vmx/vmx.c static int handle_triple_fault(struct kvm_vcpu *vcpu)
vcpu             4707 arch/x86/kvm/vmx/vmx.c 	vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
vcpu             4708 arch/x86/kvm/vmx/vmx.c 	vcpu->mmio_needed = 0;
vcpu             4712 arch/x86/kvm/vmx/vmx.c static int handle_io(struct kvm_vcpu *vcpu)
vcpu             4721 arch/x86/kvm/vmx/vmx.c 	++vcpu->stat.io_exits;
vcpu             4724 arch/x86/kvm/vmx/vmx.c 		return kvm_emulate_instruction(vcpu, 0);
vcpu             4730 arch/x86/kvm/vmx/vmx.c 	return kvm_fast_pio(vcpu, size, port, in);
vcpu             4734 arch/x86/kvm/vmx/vmx.c vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
vcpu             4745 arch/x86/kvm/vmx/vmx.c static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
vcpu             4747 arch/x86/kvm/vmx/vmx.c 	if (is_guest_mode(vcpu)) {
vcpu             4748 arch/x86/kvm/vmx/vmx.c 		struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
vcpu             4762 arch/x86/kvm/vmx/vmx.c 		if (!nested_guest_cr0_valid(vcpu, val))
vcpu             4765 arch/x86/kvm/vmx/vmx.c 		if (kvm_set_cr0(vcpu, val))
vcpu             4770 arch/x86/kvm/vmx/vmx.c 		if (to_vmx(vcpu)->nested.vmxon &&
vcpu             4771 arch/x86/kvm/vmx/vmx.c 		    !nested_host_cr0_valid(vcpu, val))
vcpu             4774 arch/x86/kvm/vmx/vmx.c 		return kvm_set_cr0(vcpu, val);
vcpu             4778 arch/x86/kvm/vmx/vmx.c static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val)
vcpu             4780 arch/x86/kvm/vmx/vmx.c 	if (is_guest_mode(vcpu)) {
vcpu             4781 arch/x86/kvm/vmx/vmx.c 		struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
vcpu             4787 arch/x86/kvm/vmx/vmx.c 		if (kvm_set_cr4(vcpu, val))
vcpu             4792 arch/x86/kvm/vmx/vmx.c 		return kvm_set_cr4(vcpu, val);
vcpu             4795 arch/x86/kvm/vmx/vmx.c static int handle_desc(struct kvm_vcpu *vcpu)
vcpu             4797 arch/x86/kvm/vmx/vmx.c 	WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP));
vcpu             4798 arch/x86/kvm/vmx/vmx.c 	return kvm_emulate_instruction(vcpu, 0);
vcpu             4801 arch/x86/kvm/vmx/vmx.c static int handle_cr(struct kvm_vcpu *vcpu)
vcpu             4814 arch/x86/kvm/vmx/vmx.c 		val = kvm_register_readl(vcpu, reg);
vcpu             4818 arch/x86/kvm/vmx/vmx.c 			err = handle_set_cr0(vcpu, val);
vcpu             4819 arch/x86/kvm/vmx/vmx.c 			return kvm_complete_insn_gp(vcpu, err);
vcpu             4822 arch/x86/kvm/vmx/vmx.c 			err = kvm_set_cr3(vcpu, val);
vcpu             4823 arch/x86/kvm/vmx/vmx.c 			return kvm_complete_insn_gp(vcpu, err);
vcpu             4825 arch/x86/kvm/vmx/vmx.c 			err = handle_set_cr4(vcpu, val);
vcpu             4826 arch/x86/kvm/vmx/vmx.c 			return kvm_complete_insn_gp(vcpu, err);
vcpu             4828 arch/x86/kvm/vmx/vmx.c 				u8 cr8_prev = kvm_get_cr8(vcpu);
vcpu             4830 arch/x86/kvm/vmx/vmx.c 				err = kvm_set_cr8(vcpu, cr8);
vcpu             4831 arch/x86/kvm/vmx/vmx.c 				ret = kvm_complete_insn_gp(vcpu, err);
vcpu             4832 arch/x86/kvm/vmx/vmx.c 				if (lapic_in_kernel(vcpu))
vcpu             4841 arch/x86/kvm/vmx/vmx.c 				vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
vcpu             4848 arch/x86/kvm/vmx/vmx.c 		vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
vcpu             4849 arch/x86/kvm/vmx/vmx.c 		trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
vcpu             4850 arch/x86/kvm/vmx/vmx.c 		return kvm_skip_emulated_instruction(vcpu);
vcpu             4855 arch/x86/kvm/vmx/vmx.c 			val = kvm_read_cr3(vcpu);
vcpu             4856 arch/x86/kvm/vmx/vmx.c 			kvm_register_write(vcpu, reg, val);
vcpu             4858 arch/x86/kvm/vmx/vmx.c 			return kvm_skip_emulated_instruction(vcpu);
vcpu             4860 arch/x86/kvm/vmx/vmx.c 			val = kvm_get_cr8(vcpu);
vcpu             4861 arch/x86/kvm/vmx/vmx.c 			kvm_register_write(vcpu, reg, val);
vcpu             4863 arch/x86/kvm/vmx/vmx.c 			return kvm_skip_emulated_instruction(vcpu);
vcpu             4868 arch/x86/kvm/vmx/vmx.c 		trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val);
vcpu             4869 arch/x86/kvm/vmx/vmx.c 		kvm_lmsw(vcpu, val);
vcpu             4871 arch/x86/kvm/vmx/vmx.c 		return kvm_skip_emulated_instruction(vcpu);
vcpu             4875 arch/x86/kvm/vmx/vmx.c 	vcpu->run->exit_reason = 0;
vcpu             4876 arch/x86/kvm/vmx/vmx.c 	vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
vcpu             4881 arch/x86/kvm/vmx/vmx.c static int handle_dr(struct kvm_vcpu *vcpu)
vcpu             4890 arch/x86/kvm/vmx/vmx.c 	if (!kvm_require_dr(vcpu, dr))
vcpu             4894 arch/x86/kvm/vmx/vmx.c 	if (!kvm_require_cpl(vcpu, 0))
vcpu             4903 arch/x86/kvm/vmx/vmx.c 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
vcpu             4904 arch/x86/kvm/vmx/vmx.c 			vcpu->run->debug.arch.dr6 = vcpu->arch.dr6;
vcpu             4905 arch/x86/kvm/vmx/vmx.c 			vcpu->run->debug.arch.dr7 = dr7;
vcpu             4906 arch/x86/kvm/vmx/vmx.c 			vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu);
vcpu             4907 arch/x86/kvm/vmx/vmx.c 			vcpu->run->debug.arch.exception = DB_VECTOR;
vcpu             4908 arch/x86/kvm/vmx/vmx.c 			vcpu->run->exit_reason = KVM_EXIT_DEBUG;
vcpu             4911 arch/x86/kvm/vmx/vmx.c 			vcpu->arch.dr6 &= ~DR_TRAP_BITS;
vcpu             4912 arch/x86/kvm/vmx/vmx.c 			vcpu->arch.dr6 |= DR6_BD | DR6_RTM;
vcpu             4913 arch/x86/kvm/vmx/vmx.c 			kvm_queue_exception(vcpu, DB_VECTOR);
vcpu             4918 arch/x86/kvm/vmx/vmx.c 	if (vcpu->guest_debug == 0) {
vcpu             4919 arch/x86/kvm/vmx/vmx.c 		exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING);
vcpu             4926 arch/x86/kvm/vmx/vmx.c 		vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
vcpu             4934 arch/x86/kvm/vmx/vmx.c 		if (kvm_get_dr(vcpu, dr, &val))
vcpu             4936 arch/x86/kvm/vmx/vmx.c 		kvm_register_write(vcpu, reg, val);
vcpu             4938 arch/x86/kvm/vmx/vmx.c 		if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg)))
vcpu             4941 arch/x86/kvm/vmx/vmx.c 	return kvm_skip_emulated_instruction(vcpu);
vcpu             4944 arch/x86/kvm/vmx/vmx.c static u64 vmx_get_dr6(struct kvm_vcpu *vcpu)
vcpu             4946 arch/x86/kvm/vmx/vmx.c 	return vcpu->arch.dr6;
vcpu             4949 arch/x86/kvm/vmx/vmx.c static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val)
vcpu             4953 arch/x86/kvm/vmx/vmx.c static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
vcpu             4955 arch/x86/kvm/vmx/vmx.c 	get_debugreg(vcpu->arch.db[0], 0);
vcpu             4956 arch/x86/kvm/vmx/vmx.c 	get_debugreg(vcpu->arch.db[1], 1);
vcpu             4957 arch/x86/kvm/vmx/vmx.c 	get_debugreg(vcpu->arch.db[2], 2);
vcpu             4958 arch/x86/kvm/vmx/vmx.c 	get_debugreg(vcpu->arch.db[3], 3);
vcpu             4959 arch/x86/kvm/vmx/vmx.c 	get_debugreg(vcpu->arch.dr6, 6);
vcpu             4960 arch/x86/kvm/vmx/vmx.c 	vcpu->arch.dr7 = vmcs_readl(GUEST_DR7);
vcpu             4962 arch/x86/kvm/vmx/vmx.c 	vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
vcpu             4963 arch/x86/kvm/vmx/vmx.c 	exec_controls_setbit(to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING);
vcpu             4966 arch/x86/kvm/vmx/vmx.c static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
vcpu             4971 arch/x86/kvm/vmx/vmx.c static int handle_cpuid(struct kvm_vcpu *vcpu)
vcpu             4973 arch/x86/kvm/vmx/vmx.c 	return kvm_emulate_cpuid(vcpu);
vcpu             4976 arch/x86/kvm/vmx/vmx.c static int handle_rdmsr(struct kvm_vcpu *vcpu)
vcpu             4978 arch/x86/kvm/vmx/vmx.c 	return kvm_emulate_rdmsr(vcpu);
vcpu             4981 arch/x86/kvm/vmx/vmx.c static int handle_wrmsr(struct kvm_vcpu *vcpu)
vcpu             4983 arch/x86/kvm/vmx/vmx.c 	return kvm_emulate_wrmsr(vcpu);
vcpu             4986 arch/x86/kvm/vmx/vmx.c static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
vcpu             4988 arch/x86/kvm/vmx/vmx.c 	kvm_apic_update_ppr(vcpu);
vcpu             4992 arch/x86/kvm/vmx/vmx.c static int handle_interrupt_window(struct kvm_vcpu *vcpu)
vcpu             4994 arch/x86/kvm/vmx/vmx.c 	exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_VIRTUAL_INTR_PENDING);
vcpu             4996 arch/x86/kvm/vmx/vmx.c 	kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu             4998 arch/x86/kvm/vmx/vmx.c 	++vcpu->stat.irq_window_exits;
vcpu             5002 arch/x86/kvm/vmx/vmx.c static int handle_halt(struct kvm_vcpu *vcpu)
vcpu             5004 arch/x86/kvm/vmx/vmx.c 	return kvm_emulate_halt(vcpu);
vcpu             5007 arch/x86/kvm/vmx/vmx.c static int handle_vmcall(struct kvm_vcpu *vcpu)
vcpu             5009 arch/x86/kvm/vmx/vmx.c 	return kvm_emulate_hypercall(vcpu);
vcpu             5012 arch/x86/kvm/vmx/vmx.c static int handle_invd(struct kvm_vcpu *vcpu)
vcpu             5014 arch/x86/kvm/vmx/vmx.c 	return kvm_emulate_instruction(vcpu, 0);
vcpu             5017 arch/x86/kvm/vmx/vmx.c static int handle_invlpg(struct kvm_vcpu *vcpu)
vcpu             5021 arch/x86/kvm/vmx/vmx.c 	kvm_mmu_invlpg(vcpu, exit_qualification);
vcpu             5022 arch/x86/kvm/vmx/vmx.c 	return kvm_skip_emulated_instruction(vcpu);
vcpu             5025 arch/x86/kvm/vmx/vmx.c static int handle_rdpmc(struct kvm_vcpu *vcpu)
vcpu             5029 arch/x86/kvm/vmx/vmx.c 	err = kvm_rdpmc(vcpu);
vcpu             5030 arch/x86/kvm/vmx/vmx.c 	return kvm_complete_insn_gp(vcpu, err);
vcpu             5033 arch/x86/kvm/vmx/vmx.c static int handle_wbinvd(struct kvm_vcpu *vcpu)
vcpu             5035 arch/x86/kvm/vmx/vmx.c 	return kvm_emulate_wbinvd(vcpu);
vcpu             5038 arch/x86/kvm/vmx/vmx.c static int handle_xsetbv(struct kvm_vcpu *vcpu)
vcpu             5040 arch/x86/kvm/vmx/vmx.c 	u64 new_bv = kvm_read_edx_eax(vcpu);
vcpu             5041 arch/x86/kvm/vmx/vmx.c 	u32 index = kvm_rcx_read(vcpu);
vcpu             5043 arch/x86/kvm/vmx/vmx.c 	if (kvm_set_xcr(vcpu, index, new_bv) == 0)
vcpu             5044 arch/x86/kvm/vmx/vmx.c 		return kvm_skip_emulated_instruction(vcpu);
vcpu             5048 arch/x86/kvm/vmx/vmx.c static int handle_apic_access(struct kvm_vcpu *vcpu)
vcpu             5063 arch/x86/kvm/vmx/vmx.c 			kvm_lapic_set_eoi(vcpu);
vcpu             5064 arch/x86/kvm/vmx/vmx.c 			return kvm_skip_emulated_instruction(vcpu);
vcpu             5067 arch/x86/kvm/vmx/vmx.c 	return kvm_emulate_instruction(vcpu, 0);
vcpu             5070 arch/x86/kvm/vmx/vmx.c static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
vcpu             5076 arch/x86/kvm/vmx/vmx.c 	kvm_apic_set_eoi_accelerated(vcpu, vector);
vcpu             5080 arch/x86/kvm/vmx/vmx.c static int handle_apic_write(struct kvm_vcpu *vcpu)
vcpu             5086 arch/x86/kvm/vmx/vmx.c 	kvm_apic_write_nodecode(vcpu, offset);
vcpu             5090 arch/x86/kvm/vmx/vmx.c static int handle_task_switch(struct kvm_vcpu *vcpu)
vcpu             5092 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             5109 arch/x86/kvm/vmx/vmx.c 			vcpu->arch.nmi_injected = false;
vcpu             5110 arch/x86/kvm/vmx/vmx.c 			vmx_set_nmi_mask(vcpu, true);
vcpu             5114 arch/x86/kvm/vmx/vmx.c 			kvm_clear_interrupt_queue(vcpu);
vcpu             5125 arch/x86/kvm/vmx/vmx.c 			kvm_clear_exception_queue(vcpu);
vcpu             5136 arch/x86/kvm/vmx/vmx.c 		WARN_ON(!skip_emulated_instruction(vcpu));
vcpu             5142 arch/x86/kvm/vmx/vmx.c 	return kvm_task_switch(vcpu, tss_selector,
vcpu             5147 arch/x86/kvm/vmx/vmx.c static int handle_ept_violation(struct kvm_vcpu *vcpu)
vcpu             5161 arch/x86/kvm/vmx/vmx.c 	if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
vcpu             5187 arch/x86/kvm/vmx/vmx.c 	vcpu->arch.exit_qualification = exit_qualification;
vcpu             5188 arch/x86/kvm/vmx/vmx.c 	return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
vcpu             5191 arch/x86/kvm/vmx/vmx.c static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
vcpu             5200 arch/x86/kvm/vmx/vmx.c 	if (!is_guest_mode(vcpu) &&
vcpu             5201 arch/x86/kvm/vmx/vmx.c 	    !kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
vcpu             5203 arch/x86/kvm/vmx/vmx.c 		return kvm_skip_emulated_instruction(vcpu);
vcpu             5206 arch/x86/kvm/vmx/vmx.c 	return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
vcpu             5209 arch/x86/kvm/vmx/vmx.c static int handle_nmi_window(struct kvm_vcpu *vcpu)
vcpu             5212 arch/x86/kvm/vmx/vmx.c 	exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_VIRTUAL_NMI_PENDING);
vcpu             5213 arch/x86/kvm/vmx/vmx.c 	++vcpu->stat.nmi_window_exits;
vcpu             5214 arch/x86/kvm/vmx/vmx.c 	kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu             5219 arch/x86/kvm/vmx/vmx.c static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
vcpu             5221 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             5236 arch/x86/kvm/vmx/vmx.c 		if (intr_window_requested && vmx_interrupt_allowed(vcpu))
vcpu             5237 arch/x86/kvm/vmx/vmx.c 			return handle_interrupt_window(&vmx->vcpu);
vcpu             5239 arch/x86/kvm/vmx/vmx.c 		if (kvm_test_request(KVM_REQ_EVENT, vcpu))
vcpu             5242 arch/x86/kvm/vmx/vmx.c 		if (!kvm_emulate_instruction(vcpu, 0))
vcpu             5246 arch/x86/kvm/vmx/vmx.c 		    vcpu->arch.exception.pending) {
vcpu             5247 arch/x86/kvm/vmx/vmx.c 			vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu             5248 arch/x86/kvm/vmx/vmx.c 			vcpu->run->internal.suberror =
vcpu             5250 arch/x86/kvm/vmx/vmx.c 			vcpu->run->internal.ndata = 0;
vcpu             5254 arch/x86/kvm/vmx/vmx.c 		if (vcpu->arch.halt_request) {
vcpu             5255 arch/x86/kvm/vmx/vmx.c 			vcpu->arch.halt_request = 0;
vcpu             5256 arch/x86/kvm/vmx/vmx.c 			return kvm_vcpu_halt(vcpu);
vcpu             5273 arch/x86/kvm/vmx/vmx.c static void grow_ple_window(struct kvm_vcpu *vcpu)
vcpu             5275 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             5284 arch/x86/kvm/vmx/vmx.c 		trace_kvm_ple_window_update(vcpu->vcpu_id,
vcpu             5289 arch/x86/kvm/vmx/vmx.c static void shrink_ple_window(struct kvm_vcpu *vcpu)
vcpu             5291 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             5300 arch/x86/kvm/vmx/vmx.c 		trace_kvm_ple_window_update(vcpu->vcpu_id,
vcpu             5310 arch/x86/kvm/vmx/vmx.c 	struct kvm_vcpu *vcpu;
vcpu             5314 arch/x86/kvm/vmx/vmx.c 	list_for_each_entry(vcpu, &per_cpu(blocked_vcpu_on_cpu, cpu),
vcpu             5316 arch/x86/kvm/vmx/vmx.c 		struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
vcpu             5319 arch/x86/kvm/vmx/vmx.c 			kvm_vcpu_kick(vcpu);
vcpu             5341 arch/x86/kvm/vmx/vmx.c static int handle_pause(struct kvm_vcpu *vcpu)
vcpu             5343 arch/x86/kvm/vmx/vmx.c 	if (!kvm_pause_in_guest(vcpu->kvm))
vcpu             5344 arch/x86/kvm/vmx/vmx.c 		grow_ple_window(vcpu);
vcpu             5352 arch/x86/kvm/vmx/vmx.c 	kvm_vcpu_on_spin(vcpu, true);
vcpu             5353 arch/x86/kvm/vmx/vmx.c 	return kvm_skip_emulated_instruction(vcpu);
vcpu             5356 arch/x86/kvm/vmx/vmx.c static int handle_nop(struct kvm_vcpu *vcpu)
vcpu             5358 arch/x86/kvm/vmx/vmx.c 	return kvm_skip_emulated_instruction(vcpu);
vcpu             5361 arch/x86/kvm/vmx/vmx.c static int handle_mwait(struct kvm_vcpu *vcpu)
vcpu             5364 arch/x86/kvm/vmx/vmx.c 	return handle_nop(vcpu);
vcpu             5367 arch/x86/kvm/vmx/vmx.c static int handle_invalid_op(struct kvm_vcpu *vcpu)
vcpu             5369 arch/x86/kvm/vmx/vmx.c 	kvm_queue_exception(vcpu, UD_VECTOR);
vcpu             5373 arch/x86/kvm/vmx/vmx.c static int handle_monitor_trap(struct kvm_vcpu *vcpu)
vcpu             5378 arch/x86/kvm/vmx/vmx.c static int handle_monitor(struct kvm_vcpu *vcpu)
vcpu             5381 arch/x86/kvm/vmx/vmx.c 	return handle_nop(vcpu);
vcpu             5384 arch/x86/kvm/vmx/vmx.c static int handle_invpcid(struct kvm_vcpu *vcpu)
vcpu             5398 arch/x86/kvm/vmx/vmx.c 	if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) {
vcpu             5399 arch/x86/kvm/vmx/vmx.c 		kvm_queue_exception(vcpu, UD_VECTOR);
vcpu             5404 arch/x86/kvm/vmx/vmx.c 	type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
vcpu             5407 arch/x86/kvm/vmx/vmx.c 		kvm_inject_gp(vcpu, 0);
vcpu             5414 arch/x86/kvm/vmx/vmx.c 	if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
vcpu             5419 arch/x86/kvm/vmx/vmx.c 	if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
vcpu             5420 arch/x86/kvm/vmx/vmx.c 		kvm_inject_page_fault(vcpu, &e);
vcpu             5425 arch/x86/kvm/vmx/vmx.c 		kvm_inject_gp(vcpu, 0);
vcpu             5429 arch/x86/kvm/vmx/vmx.c 	pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);
vcpu             5434 arch/x86/kvm/vmx/vmx.c 		    is_noncanonical_address(operand.gla, vcpu)) {
vcpu             5435 arch/x86/kvm/vmx/vmx.c 			kvm_inject_gp(vcpu, 0);
vcpu             5438 arch/x86/kvm/vmx/vmx.c 		kvm_mmu_invpcid_gva(vcpu, operand.gla, operand.pcid);
vcpu             5439 arch/x86/kvm/vmx/vmx.c 		return kvm_skip_emulated_instruction(vcpu);
vcpu             5443 arch/x86/kvm/vmx/vmx.c 			kvm_inject_gp(vcpu, 0);
vcpu             5447 arch/x86/kvm/vmx/vmx.c 		if (kvm_get_active_pcid(vcpu) == operand.pcid) {
vcpu             5448 arch/x86/kvm/vmx/vmx.c 			kvm_mmu_sync_roots(vcpu);
vcpu             5449 arch/x86/kvm/vmx/vmx.c 			kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
vcpu             5453 arch/x86/kvm/vmx/vmx.c 			if (kvm_get_pcid(vcpu, vcpu->arch.mmu->prev_roots[i].cr3)
vcpu             5457 arch/x86/kvm/vmx/vmx.c 		kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, roots_to_free);
vcpu             5464 arch/x86/kvm/vmx/vmx.c 		return kvm_skip_emulated_instruction(vcpu);
vcpu             5476 arch/x86/kvm/vmx/vmx.c 		kvm_mmu_unload(vcpu);
vcpu             5477 arch/x86/kvm/vmx/vmx.c 		return kvm_skip_emulated_instruction(vcpu);
vcpu             5484 arch/x86/kvm/vmx/vmx.c static int handle_pml_full(struct kvm_vcpu *vcpu)
vcpu             5488 arch/x86/kvm/vmx/vmx.c 	trace_kvm_pml_full(vcpu->vcpu_id);
vcpu             5496 arch/x86/kvm/vmx/vmx.c 	if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
vcpu             5509 arch/x86/kvm/vmx/vmx.c static int handle_preemption_timer(struct kvm_vcpu *vcpu)
vcpu             5511 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             5515 arch/x86/kvm/vmx/vmx.c 		kvm_lapic_expired_hv_timer(vcpu);
vcpu             5524 arch/x86/kvm/vmx/vmx.c static int handle_vmx_instruction(struct kvm_vcpu *vcpu)
vcpu             5526 arch/x86/kvm/vmx/vmx.c 	kvm_queue_exception(vcpu, UD_VECTOR);
vcpu             5530 arch/x86/kvm/vmx/vmx.c static int handle_encls(struct kvm_vcpu *vcpu)
vcpu             5537 arch/x86/kvm/vmx/vmx.c 	kvm_queue_exception(vcpu, UD_VECTOR);
vcpu             5546 arch/x86/kvm/vmx/vmx.c static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
vcpu             5602 arch/x86/kvm/vmx/vmx.c static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
vcpu             5616 arch/x86/kvm/vmx/vmx.c static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu)
vcpu             5618 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             5640 arch/x86/kvm/vmx/vmx.c 		kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
vcpu             5654 arch/x86/kvm/vmx/vmx.c 	struct kvm_vcpu *vcpu;
vcpu             5661 arch/x86/kvm/vmx/vmx.c 	kvm_for_each_vcpu(i, vcpu, kvm)
vcpu             5662 arch/x86/kvm/vmx/vmx.c 		kvm_vcpu_kick(vcpu);
vcpu             5844 arch/x86/kvm/vmx/vmx.c static int vmx_handle_exit(struct kvm_vcpu *vcpu)
vcpu             5846 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             5850 arch/x86/kvm/vmx/vmx.c 	trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX);
vcpu             5860 arch/x86/kvm/vmx/vmx.c 		vmx_flush_pml_buffer(vcpu);
vcpu             5864 arch/x86/kvm/vmx/vmx.c 		return handle_invalid_guest_state(vcpu);
vcpu             5866 arch/x86/kvm/vmx/vmx.c 	if (is_guest_mode(vcpu) && nested_vmx_exit_reflected(vcpu, exit_reason))
vcpu             5867 arch/x86/kvm/vmx/vmx.c 		return nested_vmx_reflect_vmexit(vcpu, exit_reason);
vcpu             5871 arch/x86/kvm/vmx/vmx.c 		vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
vcpu             5872 arch/x86/kvm/vmx/vmx.c 		vcpu->run->fail_entry.hardware_entry_failure_reason
vcpu             5879 arch/x86/kvm/vmx/vmx.c 		vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
vcpu             5880 arch/x86/kvm/vmx/vmx.c 		vcpu->run->fail_entry.hardware_entry_failure_reason
vcpu             5897 arch/x86/kvm/vmx/vmx.c 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu             5898 arch/x86/kvm/vmx/vmx.c 		vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
vcpu             5899 arch/x86/kvm/vmx/vmx.c 		vcpu->run->internal.ndata = 3;
vcpu             5900 arch/x86/kvm/vmx/vmx.c 		vcpu->run->internal.data[0] = vectoring_info;
vcpu             5901 arch/x86/kvm/vmx/vmx.c 		vcpu->run->internal.data[1] = exit_reason;
vcpu             5902 arch/x86/kvm/vmx/vmx.c 		vcpu->run->internal.data[2] = vcpu->arch.exit_qualification;
vcpu             5904 arch/x86/kvm/vmx/vmx.c 			vcpu->run->internal.ndata++;
vcpu             5905 arch/x86/kvm/vmx/vmx.c 			vcpu->run->internal.data[3] =
vcpu             5913 arch/x86/kvm/vmx/vmx.c 		if (vmx_interrupt_allowed(vcpu)) {
vcpu             5916 arch/x86/kvm/vmx/vmx.c 			   vcpu->arch.nmi_pending) {
vcpu             5925 arch/x86/kvm/vmx/vmx.c 			       __func__, vcpu->vcpu_id);
vcpu             5932 arch/x86/kvm/vmx/vmx.c 		return kvm_vmx_exit_handlers[exit_reason](vcpu);
vcpu             5934 arch/x86/kvm/vmx/vmx.c 		vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
vcpu             5937 arch/x86/kvm/vmx/vmx.c 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu             5938 arch/x86/kvm/vmx/vmx.c 		vcpu->run->internal.suberror =
vcpu             5940 arch/x86/kvm/vmx/vmx.c 		vcpu->run->internal.ndata = 1;
vcpu             5941 arch/x86/kvm/vmx/vmx.c 		vcpu->run->internal.data[0] = exit_reason;
vcpu             5956 arch/x86/kvm/vmx/vmx.c static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
vcpu             5972 arch/x86/kvm/vmx/vmx.c 		flush_l1d = vcpu->arch.l1tf_flush_l1d;
vcpu             5973 arch/x86/kvm/vmx/vmx.c 		vcpu->arch.l1tf_flush_l1d = false;
vcpu             5986 arch/x86/kvm/vmx/vmx.c 	vcpu->stat.l1d_flush++;
vcpu             6016 arch/x86/kvm/vmx/vmx.c static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
vcpu             6018 arch/x86/kvm/vmx/vmx.c 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
vcpu             6020 arch/x86/kvm/vmx/vmx.c 	if (is_guest_mode(vcpu) &&
vcpu             6032 arch/x86/kvm/vmx/vmx.c void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
vcpu             6034 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             6037 arch/x86/kvm/vmx/vmx.c 	if (!lapic_in_kernel(vcpu))
vcpu             6045 arch/x86/kvm/vmx/vmx.c 	if (is_guest_mode(vcpu)) {
vcpu             6054 arch/x86/kvm/vmx/vmx.c 	switch (kvm_get_apic_mode(vcpu)) {
vcpu             6063 arch/x86/kvm/vmx/vmx.c 			vmx_flush_tlb(vcpu, true);
vcpu             6074 arch/x86/kvm/vmx/vmx.c 	vmx_update_msr_bitmap(vcpu);
vcpu             6077 arch/x86/kvm/vmx/vmx.c static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
vcpu             6079 arch/x86/kvm/vmx/vmx.c 	if (!is_guest_mode(vcpu)) {
vcpu             6081 arch/x86/kvm/vmx/vmx.c 		vmx_flush_tlb(vcpu, true);
vcpu             6085 arch/x86/kvm/vmx/vmx.c static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
vcpu             6119 arch/x86/kvm/vmx/vmx.c static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
vcpu             6129 arch/x86/kvm/vmx/vmx.c 	if (!is_guest_mode(vcpu))
vcpu             6133 arch/x86/kvm/vmx/vmx.c static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
vcpu             6135 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             6139 arch/x86/kvm/vmx/vmx.c 	WARN_ON(!vcpu->arch.apicv_active);
vcpu             6148 arch/x86/kvm/vmx/vmx.c 			kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr);
vcpu             6158 arch/x86/kvm/vmx/vmx.c 		if (is_guest_mode(vcpu) && max_irr_updated) {
vcpu             6159 arch/x86/kvm/vmx/vmx.c 			if (nested_exit_on_intr(vcpu))
vcpu             6160 arch/x86/kvm/vmx/vmx.c 				kvm_vcpu_exiting_guest_mode(vcpu);
vcpu             6162 arch/x86/kvm/vmx/vmx.c 				kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu             6165 arch/x86/kvm/vmx/vmx.c 		max_irr = kvm_lapic_find_highest_irr(vcpu);
vcpu             6167 arch/x86/kvm/vmx/vmx.c 	vmx_hwapic_irr_update(vcpu, max_irr);
vcpu             6171 arch/x86/kvm/vmx/vmx.c static bool vmx_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
vcpu             6173 arch/x86/kvm/vmx/vmx.c 	struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
vcpu             6179 arch/x86/kvm/vmx/vmx.c static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
vcpu             6181 arch/x86/kvm/vmx/vmx.c 	if (!kvm_vcpu_apicv_active(vcpu))
vcpu             6190 arch/x86/kvm/vmx/vmx.c static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
vcpu             6192 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             6204 arch/x86/kvm/vmx/vmx.c 		vmx->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
vcpu             6212 arch/x86/kvm/vmx/vmx.c 		kvm_before_interrupt(&vmx->vcpu);
vcpu             6214 arch/x86/kvm/vmx/vmx.c 		kvm_after_interrupt(&vmx->vcpu);
vcpu             6218 arch/x86/kvm/vmx/vmx.c static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
vcpu             6237 arch/x86/kvm/vmx/vmx.c 	kvm_before_interrupt(vcpu);
vcpu             6260 arch/x86/kvm/vmx/vmx.c 	kvm_after_interrupt(vcpu);
vcpu             6264 arch/x86/kvm/vmx/vmx.c static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
vcpu             6266 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             6269 arch/x86/kvm/vmx/vmx.c 		handle_external_interrupt_irqoff(vcpu);
vcpu             6341 arch/x86/kvm/vmx/vmx.c static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
vcpu             6352 arch/x86/kvm/vmx/vmx.c 	vcpu->arch.nmi_injected = false;
vcpu             6353 arch/x86/kvm/vmx/vmx.c 	kvm_clear_exception_queue(vcpu);
vcpu             6354 arch/x86/kvm/vmx/vmx.c 	kvm_clear_interrupt_queue(vcpu);
vcpu             6359 arch/x86/kvm/vmx/vmx.c 	kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu             6366 arch/x86/kvm/vmx/vmx.c 		vcpu->arch.nmi_injected = true;
vcpu             6372 arch/x86/kvm/vmx/vmx.c 		vmx_set_nmi_mask(vcpu, false);
vcpu             6375 arch/x86/kvm/vmx/vmx.c 		vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
vcpu             6380 arch/x86/kvm/vmx/vmx.c 			kvm_requeue_exception_e(vcpu, vector, err);
vcpu             6382 arch/x86/kvm/vmx/vmx.c 			kvm_requeue_exception(vcpu, vector);
vcpu             6385 arch/x86/kvm/vmx/vmx.c 		vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
vcpu             6388 arch/x86/kvm/vmx/vmx.c 		kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR);
vcpu             6397 arch/x86/kvm/vmx/vmx.c 	__vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info,
vcpu             6402 arch/x86/kvm/vmx/vmx.c static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
vcpu             6404 arch/x86/kvm/vmx/vmx.c 	__vmx_complete_interrupts(vcpu,
vcpu             6447 arch/x86/kvm/vmx/vmx.c static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
vcpu             6449 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             6483 arch/x86/kvm/vmx/vmx.c static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
vcpu             6485 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             6504 arch/x86/kvm/vmx/vmx.c 		nested_sync_vmcs12_to_shadow(vcpu);
vcpu             6506 arch/x86/kvm/vmx/vmx.c 	if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
vcpu             6507 arch/x86/kvm/vmx/vmx.c 		vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
vcpu             6508 arch/x86/kvm/vmx/vmx.c 	if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
vcpu             6509 arch/x86/kvm/vmx/vmx.c 		vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
vcpu             6528 arch/x86/kvm/vmx/vmx.c 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
vcpu             6529 arch/x86/kvm/vmx/vmx.c 		vmx_set_interrupt_shadow(vcpu, 0);
vcpu             6531 arch/x86/kvm/vmx/vmx.c 	kvm_load_guest_xcr0(vcpu);
vcpu             6539 arch/x86/kvm/vmx/vmx.c 		vmx_update_hv_timer(vcpu);
vcpu             6541 arch/x86/kvm/vmx/vmx.c 	if (lapic_in_kernel(vcpu) &&
vcpu             6542 arch/x86/kvm/vmx/vmx.c 		vcpu->arch.apic->lapic_timer.timer_advance_ns)
vcpu             6543 arch/x86/kvm/vmx/vmx.c 		kvm_wait_lapic_expire(vcpu);
vcpu             6555 arch/x86/kvm/vmx/vmx.c 		vmx_l1d_flush(vcpu);
vcpu             6559 arch/x86/kvm/vmx/vmx.c 	if (vcpu->arch.cr2 != read_cr2())
vcpu             6560 arch/x86/kvm/vmx/vmx.c 		write_cr2(vcpu->arch.cr2);
vcpu             6562 arch/x86/kvm/vmx/vmx.c 	vmx->fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
vcpu             6565 arch/x86/kvm/vmx/vmx.c 	vcpu->arch.cr2 = read_cr2();
vcpu             6582 arch/x86/kvm/vmx/vmx.c 	if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
vcpu             6593 arch/x86/kvm/vmx/vmx.c 		current_evmcs->hv_vp_id = vcpu->arch.hyperv.vp_index;
vcpu             6612 arch/x86/kvm/vmx/vmx.c 	vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
vcpu             6617 arch/x86/kvm/vmx/vmx.c 	vcpu->arch.regs_dirty = 0;
vcpu             6621 arch/x86/kvm/vmx/vmx.c 	kvm_put_guest_xcr0(vcpu);
vcpu             6658 arch/x86/kvm/vmx/vmx.c static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
vcpu             6660 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             6665 arch/x86/kvm/vmx/vmx.c 	nested_vmx_free_vcpu(vcpu);
vcpu             6668 arch/x86/kvm/vmx/vmx.c 	kvm_vcpu_uninit(vcpu);
vcpu             6669 arch/x86/kvm/vmx/vmx.c 	kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.user_fpu);
vcpu             6670 arch/x86/kvm/vmx/vmx.c 	kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.guest_fpu);
vcpu             6681 arch/x86/kvm/vmx/vmx.c 	BUILD_BUG_ON_MSG(offsetof(struct vcpu_vmx, vcpu) != 0,
vcpu             6688 arch/x86/kvm/vmx/vmx.c 	vmx->vcpu.arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache,
vcpu             6690 arch/x86/kvm/vmx/vmx.c 	if (!vmx->vcpu.arch.user_fpu) {
vcpu             6696 arch/x86/kvm/vmx/vmx.c 	vmx->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache,
vcpu             6698 arch/x86/kvm/vmx/vmx.c 	if (!vmx->vcpu.arch.guest_fpu) {
vcpu             6706 arch/x86/kvm/vmx/vmx.c 	err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
vcpu             6753 arch/x86/kvm/vmx/vmx.c 	vmx_vcpu_load(&vmx->vcpu, cpu);
vcpu             6754 arch/x86/kvm/vmx/vmx.c 	vmx->vcpu.cpu = cpu;
vcpu             6756 arch/x86/kvm/vmx/vmx.c 	vmx_vcpu_put(&vmx->vcpu);
vcpu             6758 arch/x86/kvm/vmx/vmx.c 	if (cpu_need_virtualize_apic_accesses(&vmx->vcpu)) {
vcpu             6790 arch/x86/kvm/vmx/vmx.c 	return &vmx->vcpu;
vcpu             6799 arch/x86/kvm/vmx/vmx.c 	kvm_vcpu_uninit(&vmx->vcpu);
vcpu             6802 arch/x86/kvm/vmx/vmx.c 	kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.guest_fpu);
vcpu             6804 arch/x86/kvm/vmx/vmx.c 	kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.user_fpu);
vcpu             6863 arch/x86/kvm/vmx/vmx.c static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
vcpu             6884 arch/x86/kvm/vmx/vmx.c 	if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
vcpu             6890 arch/x86/kvm/vmx/vmx.c 	if (kvm_read_cr0(vcpu) & X86_CR0_CD) {
vcpu             6892 arch/x86/kvm/vmx/vmx.c 		if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
vcpu             6899 arch/x86/kvm/vmx/vmx.c 	cache = kvm_mtrr_get_guest_memory_type(vcpu, gfn);
vcpu             6938 arch/x86/kvm/vmx/vmx.c static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
vcpu             6940 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             6951 arch/x86/kvm/vmx/vmx.c 	entry = kvm_find_cpuid_entry(vcpu, 0x1, 0);
vcpu             6967 arch/x86/kvm/vmx/vmx.c 	entry = kvm_find_cpuid_entry(vcpu, 0x7, 0);
vcpu             6977 arch/x86/kvm/vmx/vmx.c static void nested_vmx_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
vcpu             6979 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             6982 arch/x86/kvm/vmx/vmx.c 		bool mpx_enabled = guest_cpuid_has(vcpu, X86_FEATURE_MPX);
vcpu             6994 arch/x86/kvm/vmx/vmx.c static void update_intel_pt_cfg(struct kvm_vcpu *vcpu)
vcpu             6996 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             7001 arch/x86/kvm/vmx/vmx.c 		best = kvm_find_cpuid_entry(vcpu, 0x14, i);
vcpu             7063 arch/x86/kvm/vmx/vmx.c static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
vcpu             7065 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             7072 arch/x86/kvm/vmx/vmx.c 	if (nested_vmx_allowed(vcpu))
vcpu             7073 arch/x86/kvm/vmx/vmx.c 		to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |=
vcpu             7076 arch/x86/kvm/vmx/vmx.c 		to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
vcpu             7079 arch/x86/kvm/vmx/vmx.c 	if (nested_vmx_allowed(vcpu)) {
vcpu             7080 arch/x86/kvm/vmx/vmx.c 		nested_vmx_cr_fixed1_bits_update(vcpu);
vcpu             7081 arch/x86/kvm/vmx/vmx.c 		nested_vmx_entry_exit_ctls_update(vcpu);
vcpu             7085 arch/x86/kvm/vmx/vmx.c 			guest_cpuid_has(vcpu, X86_FEATURE_INTEL_PT))
vcpu             7086 arch/x86/kvm/vmx/vmx.c 		update_intel_pt_cfg(vcpu);
vcpu             7095 arch/x86/kvm/vmx/vmx.c static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu)
vcpu             7097 arch/x86/kvm/vmx/vmx.c 	to_vmx(vcpu)->req_immediate_exit = true;
vcpu             7100 arch/x86/kvm/vmx/vmx.c static int vmx_check_intercept_io(struct kvm_vcpu *vcpu,
vcpu             7103 arch/x86/kvm/vmx/vmx.c 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
vcpu             7128 arch/x86/kvm/vmx/vmx.c 		intercept = nested_vmx_check_io_bitmaps(vcpu, port, size);
vcpu             7134 arch/x86/kvm/vmx/vmx.c static int vmx_check_intercept(struct kvm_vcpu *vcpu,
vcpu             7138 arch/x86/kvm/vmx/vmx.c 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
vcpu             7139 arch/x86/kvm/vmx/vmx.c 	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
vcpu             7158 arch/x86/kvm/vmx/vmx.c 		return vmx_check_intercept_io(vcpu, info);
vcpu             7201 arch/x86/kvm/vmx/vmx.c static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
vcpu             7206 arch/x86/kvm/vmx/vmx.c 	struct kvm_timer *ktimer = &vcpu->arch.apic->lapic_timer;
vcpu             7208 arch/x86/kvm/vmx/vmx.c 	if (kvm_mwait_in_guest(vcpu->kvm) ||
vcpu             7209 arch/x86/kvm/vmx/vmx.c 		kvm_can_post_timer_interrupt(vcpu))
vcpu             7212 arch/x86/kvm/vmx/vmx.c 	vmx = to_vmx(vcpu);
vcpu             7214 arch/x86/kvm/vmx/vmx.c 	guest_tscl = kvm_read_l1_tsc(vcpu, tscl);
vcpu             7216 arch/x86/kvm/vmx/vmx.c 	lapic_timer_advance_cycles = nsec_to_cycles(vcpu,
vcpu             7225 arch/x86/kvm/vmx/vmx.c 	if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio &&
vcpu             7228 arch/x86/kvm/vmx/vmx.c 				vcpu->arch.tsc_scaling_ratio, &delta_tsc))
vcpu             7245 arch/x86/kvm/vmx/vmx.c static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu)
vcpu             7247 arch/x86/kvm/vmx/vmx.c 	to_vmx(vcpu)->hv_deadline_tsc = -1;
vcpu             7251 arch/x86/kvm/vmx/vmx.c static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu)
vcpu             7253 arch/x86/kvm/vmx/vmx.c 	if (!kvm_pause_in_guest(vcpu->kvm))
vcpu             7254 arch/x86/kvm/vmx/vmx.c 		shrink_ple_window(vcpu);
vcpu             7275 arch/x86/kvm/vmx/vmx.c static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
vcpu             7278 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             7281 arch/x86/kvm/vmx/vmx.c 	if (is_guest_mode(vcpu)) {
vcpu             7289 arch/x86/kvm/vmx/vmx.c 		vmcs12 = get_vmcs12(vcpu);
vcpu             7301 arch/x86/kvm/vmx/vmx.c 		if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa,
vcpu             7318 arch/x86/kvm/vmx/vmx.c static void __pi_post_block(struct kvm_vcpu *vcpu)
vcpu             7320 arch/x86/kvm/vmx/vmx.c 	struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
vcpu             7329 arch/x86/kvm/vmx/vmx.c 		dest = cpu_physical_id(vcpu->cpu);
vcpu             7341 arch/x86/kvm/vmx/vmx.c 	if (!WARN_ON_ONCE(vcpu->pre_pcpu == -1)) {
vcpu             7342 arch/x86/kvm/vmx/vmx.c 		spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
vcpu             7343 arch/x86/kvm/vmx/vmx.c 		list_del(&vcpu->blocked_vcpu_list);
vcpu             7344 arch/x86/kvm/vmx/vmx.c 		spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
vcpu             7345 arch/x86/kvm/vmx/vmx.c 		vcpu->pre_pcpu = -1;
vcpu             7362 arch/x86/kvm/vmx/vmx.c static int pi_pre_block(struct kvm_vcpu *vcpu)
vcpu             7366 arch/x86/kvm/vmx/vmx.c 	struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
vcpu             7368 arch/x86/kvm/vmx/vmx.c 	if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
vcpu             7370 arch/x86/kvm/vmx/vmx.c 		!kvm_vcpu_apicv_active(vcpu))
vcpu             7375 arch/x86/kvm/vmx/vmx.c 	if (!WARN_ON_ONCE(vcpu->pre_pcpu != -1)) {
vcpu             7376 arch/x86/kvm/vmx/vmx.c 		vcpu->pre_pcpu = vcpu->cpu;
vcpu             7377 arch/x86/kvm/vmx/vmx.c 		spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
vcpu             7378 arch/x86/kvm/vmx/vmx.c 		list_add_tail(&vcpu->blocked_vcpu_list,
vcpu             7380 arch/x86/kvm/vmx/vmx.c 				       vcpu->pre_pcpu));
vcpu             7381 arch/x86/kvm/vmx/vmx.c 		spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
vcpu             7399 arch/x86/kvm/vmx/vmx.c 		dest = cpu_physical_id(vcpu->pre_pcpu);
vcpu             7413 arch/x86/kvm/vmx/vmx.c 		__pi_post_block(vcpu);
vcpu             7416 arch/x86/kvm/vmx/vmx.c 	return (vcpu->pre_pcpu == -1);
vcpu             7419 arch/x86/kvm/vmx/vmx.c static int vmx_pre_block(struct kvm_vcpu *vcpu)
vcpu             7421 arch/x86/kvm/vmx/vmx.c 	if (pi_pre_block(vcpu))
vcpu             7424 arch/x86/kvm/vmx/vmx.c 	if (kvm_lapic_hv_timer_in_use(vcpu))
vcpu             7425 arch/x86/kvm/vmx/vmx.c 		kvm_lapic_switch_to_sw_timer(vcpu);
vcpu             7430 arch/x86/kvm/vmx/vmx.c static void pi_post_block(struct kvm_vcpu *vcpu)
vcpu             7432 arch/x86/kvm/vmx/vmx.c 	if (vcpu->pre_pcpu == -1)
vcpu             7437 arch/x86/kvm/vmx/vmx.c 	__pi_post_block(vcpu);
vcpu             7441 arch/x86/kvm/vmx/vmx.c static void vmx_post_block(struct kvm_vcpu *vcpu)
vcpu             7444 arch/x86/kvm/vmx/vmx.c 		kvm_lapic_switch_to_hv_timer(vcpu);
vcpu             7446 arch/x86/kvm/vmx/vmx.c 	pi_post_block(vcpu);
vcpu             7464 arch/x86/kvm/vmx/vmx.c 	struct kvm_vcpu *vcpu;
vcpu             7502 arch/x86/kvm/vmx/vmx.c 		if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) ||
vcpu             7519 arch/x86/kvm/vmx/vmx.c 		vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu));
vcpu             7522 arch/x86/kvm/vmx/vmx.c 		trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi,
vcpu             7543 arch/x86/kvm/vmx/vmx.c static void vmx_setup_mce(struct kvm_vcpu *vcpu)
vcpu             7545 arch/x86/kvm/vmx/vmx.c 	if (vcpu->arch.mcg_cap & MCG_LMCE_P)
vcpu             7546 arch/x86/kvm/vmx/vmx.c 		to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |=
vcpu             7549 arch/x86/kvm/vmx/vmx.c 		to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
vcpu             7553 arch/x86/kvm/vmx/vmx.c static int vmx_smi_allowed(struct kvm_vcpu *vcpu)
vcpu             7556 arch/x86/kvm/vmx/vmx.c 	if (to_vmx(vcpu)->nested.nested_run_pending)
vcpu             7561 arch/x86/kvm/vmx/vmx.c static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
vcpu             7563 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             7565 arch/x86/kvm/vmx/vmx.c 	vmx->nested.smm.guest_mode = is_guest_mode(vcpu);
vcpu             7567 arch/x86/kvm/vmx/vmx.c 		nested_vmx_vmexit(vcpu, -1, 0, 0);
vcpu             7571 arch/x86/kvm/vmx/vmx.c 	vmx_clear_hlt(vcpu);
vcpu             7575 arch/x86/kvm/vmx/vmx.c static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
vcpu             7577 arch/x86/kvm/vmx/vmx.c 	struct vcpu_vmx *vmx = to_vmx(vcpu);
vcpu             7586 arch/x86/kvm/vmx/vmx.c 		ret = nested_vmx_enter_non_root_mode(vcpu, false);
vcpu             7595 arch/x86/kvm/vmx/vmx.c static int enable_smi_window(struct kvm_vcpu *vcpu)
vcpu             7600 arch/x86/kvm/vmx/vmx.c static bool vmx_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
vcpu             7605 arch/x86/kvm/vmx/vmx.c static bool vmx_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
vcpu             7607 arch/x86/kvm/vmx/vmx.c 	return to_vmx(vcpu)->nested.vmxon;
vcpu              189 arch/x86/kvm/vmx/vmx.h 	struct kvm_vcpu       vcpu;
vcpu              306 arch/x86/kvm/vmx/vmx.h bool nested_vmx_allowed(struct kvm_vcpu *vcpu);
vcpu              307 arch/x86/kvm/vmx/vmx.h void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
vcpu              309 arch/x86/kvm/vmx/vmx.h void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
vcpu              313 arch/x86/kvm/vmx/vmx.h void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
vcpu              316 arch/x86/kvm/vmx/vmx.h int vmx_get_cpl(struct kvm_vcpu *vcpu);
vcpu              317 arch/x86/kvm/vmx/vmx.h unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
vcpu              318 arch/x86/kvm/vmx/vmx.h void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
vcpu              319 arch/x86/kvm/vmx/vmx.h u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
vcpu              320 arch/x86/kvm/vmx/vmx.h void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
vcpu              321 arch/x86/kvm/vmx/vmx.h void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
vcpu              322 arch/x86/kvm/vmx/vmx.h void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
vcpu              323 arch/x86/kvm/vmx/vmx.h void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
vcpu              324 arch/x86/kvm/vmx/vmx.h int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
vcpu              326 arch/x86/kvm/vmx/vmx.h void ept_save_pdptrs(struct kvm_vcpu *vcpu);
vcpu              327 arch/x86/kvm/vmx/vmx.h void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
vcpu              328 arch/x86/kvm/vmx/vmx.h void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
vcpu              329 arch/x86/kvm/vmx/vmx.h u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
vcpu              330 arch/x86/kvm/vmx/vmx.h void update_exception_bitmap(struct kvm_vcpu *vcpu);
vcpu              331 arch/x86/kvm/vmx/vmx.h void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
vcpu              332 arch/x86/kvm/vmx/vmx.h bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
vcpu              333 arch/x86/kvm/vmx/vmx.h void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
vcpu              334 arch/x86/kvm/vmx/vmx.h void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
vcpu              466 arch/x86/kvm/vmx/vmx.h static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
vcpu              468 arch/x86/kvm/vmx/vmx.h 	return container_of(vcpu, struct vcpu_vmx, vcpu);
vcpu              471 arch/x86/kvm/vmx/vmx.h static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
vcpu              473 arch/x86/kvm/vmx/vmx.h 	return &(to_vmx(vcpu)->pi_desc);
vcpu              489 arch/x86/kvm/vmx/vmx.h u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
vcpu              491 arch/x86/kvm/vmx/vmx.h static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid,
vcpu              495 arch/x86/kvm/vmx/vmx.h 		if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
vcpu              497 arch/x86/kvm/vmx/vmx.h 		ept_sync_context(construct_eptp(vcpu,
vcpu              498 arch/x86/kvm/vmx/vmx.h 						vcpu->arch.mmu->root_hpa));
vcpu              504 arch/x86/kvm/vmx/vmx.h static inline void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
vcpu              506 arch/x86/kvm/vmx/vmx.h 	__vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa);
vcpu              511 arch/x86/kvm/vmx/vmx.h 	vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
vcpu              103 arch/x86/kvm/x86.c static void update_cr8_intercept(struct kvm_vcpu *vcpu);
vcpu              104 arch/x86/kvm/x86.c static void process_nmi(struct kvm_vcpu *vcpu);
vcpu              105 arch/x86/kvm/x86.c static void enter_smm(struct kvm_vcpu *vcpu);
vcpu              106 arch/x86/kvm/x86.c static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
vcpu              107 arch/x86/kvm/x86.c static void store_regs(struct kvm_vcpu *vcpu);
vcpu              108 arch/x86/kvm/x86.c static int sync_regs(struct kvm_vcpu *vcpu);
vcpu              231 arch/x86/kvm/x86.c static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
vcpu              235 arch/x86/kvm/x86.c 		vcpu->arch.apf.gfns[i] = ~0;
vcpu              331 arch/x86/kvm/x86.c u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
vcpu              333 arch/x86/kvm/x86.c 	return vcpu->arch.apic_base;
vcpu              337 arch/x86/kvm/x86.c enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu)
vcpu              339 arch/x86/kvm/x86.c 	return kvm_apic_mode(kvm_get_apic_base(vcpu));
vcpu              343 arch/x86/kvm/x86.c int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vcpu              345 arch/x86/kvm/x86.c 	enum lapic_mode old_mode = kvm_get_apic_mode(vcpu);
vcpu              347 arch/x86/kvm/x86.c 	u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) | 0x2ff |
vcpu              348 arch/x86/kvm/x86.c 		(guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE);
vcpu              359 arch/x86/kvm/x86.c 	kvm_lapic_set_base(vcpu, msr_info->data);
vcpu              417 arch/x86/kvm/x86.c void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu)
vcpu              419 arch/x86/kvm/x86.c 	unsigned nr = vcpu->arch.exception.nr;
vcpu              420 arch/x86/kvm/x86.c 	bool has_payload = vcpu->arch.exception.has_payload;
vcpu              421 arch/x86/kvm/x86.c 	unsigned long payload = vcpu->arch.exception.payload;
vcpu              433 arch/x86/kvm/x86.c 		vcpu->arch.dr6 &= ~DR_TRAP_BITS;
vcpu              437 arch/x86/kvm/x86.c 		vcpu->arch.dr6 |= DR6_RTM;
vcpu              438 arch/x86/kvm/x86.c 		vcpu->arch.dr6 |= payload;
vcpu              447 arch/x86/kvm/x86.c 		vcpu->arch.dr6 ^= payload & DR6_RTM;
vcpu              455 arch/x86/kvm/x86.c 		vcpu->arch.dr6 &= ~BIT(12);
vcpu              458 arch/x86/kvm/x86.c 		vcpu->arch.cr2 = payload;
vcpu              462 arch/x86/kvm/x86.c 	vcpu->arch.exception.has_payload = false;
vcpu              463 arch/x86/kvm/x86.c 	vcpu->arch.exception.payload = 0;
vcpu              467 arch/x86/kvm/x86.c static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
vcpu              474 arch/x86/kvm/x86.c 	kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu              476 arch/x86/kvm/x86.c 	if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) {
vcpu              478 arch/x86/kvm/x86.c 		if (has_error && !is_protmode(vcpu))
vcpu              489 arch/x86/kvm/x86.c 			WARN_ON_ONCE(vcpu->arch.exception.pending);
vcpu              490 arch/x86/kvm/x86.c 			vcpu->arch.exception.injected = true;
vcpu              500 arch/x86/kvm/x86.c 			vcpu->arch.exception.pending = true;
vcpu              501 arch/x86/kvm/x86.c 			vcpu->arch.exception.injected = false;
vcpu              503 arch/x86/kvm/x86.c 		vcpu->arch.exception.has_error_code = has_error;
vcpu              504 arch/x86/kvm/x86.c 		vcpu->arch.exception.nr = nr;
vcpu              505 arch/x86/kvm/x86.c 		vcpu->arch.exception.error_code = error_code;
vcpu              506 arch/x86/kvm/x86.c 		vcpu->arch.exception.has_payload = has_payload;
vcpu              507 arch/x86/kvm/x86.c 		vcpu->arch.exception.payload = payload;
vcpu              519 arch/x86/kvm/x86.c 		if (!vcpu->kvm->arch.exception_payload_enabled ||
vcpu              520 arch/x86/kvm/x86.c 		    !is_guest_mode(vcpu))
vcpu              521 arch/x86/kvm/x86.c 			kvm_deliver_exception_payload(vcpu);
vcpu              526 arch/x86/kvm/x86.c 	prev_nr = vcpu->arch.exception.nr;
vcpu              529 arch/x86/kvm/x86.c 		kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
vcpu              541 arch/x86/kvm/x86.c 		vcpu->arch.exception.pending = true;
vcpu              542 arch/x86/kvm/x86.c 		vcpu->arch.exception.injected = false;
vcpu              543 arch/x86/kvm/x86.c 		vcpu->arch.exception.has_error_code = true;
vcpu              544 arch/x86/kvm/x86.c 		vcpu->arch.exception.nr = DF_VECTOR;
vcpu              545 arch/x86/kvm/x86.c 		vcpu->arch.exception.error_code = 0;
vcpu              546 arch/x86/kvm/x86.c 		vcpu->arch.exception.has_payload = false;
vcpu              547 arch/x86/kvm/x86.c 		vcpu->arch.exception.payload = 0;
vcpu              555 arch/x86/kvm/x86.c void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
vcpu              557 arch/x86/kvm/x86.c 	kvm_multiple_exception(vcpu, nr, false, 0, false, 0, false);
vcpu              561 arch/x86/kvm/x86.c void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
vcpu              563 arch/x86/kvm/x86.c 	kvm_multiple_exception(vcpu, nr, false, 0, false, 0, true);
vcpu              567 arch/x86/kvm/x86.c static void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr,
vcpu              570 arch/x86/kvm/x86.c 	kvm_multiple_exception(vcpu, nr, false, 0, true, payload, false);
vcpu              573 arch/x86/kvm/x86.c static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr,
vcpu              576 arch/x86/kvm/x86.c 	kvm_multiple_exception(vcpu, nr, true, error_code,
vcpu              580 arch/x86/kvm/x86.c int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
vcpu              583 arch/x86/kvm/x86.c 		kvm_inject_gp(vcpu, 0);
vcpu              585 arch/x86/kvm/x86.c 		return kvm_skip_emulated_instruction(vcpu);
vcpu              591 arch/x86/kvm/x86.c void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
vcpu              593 arch/x86/kvm/x86.c 	++vcpu->stat.pf_guest;
vcpu              594 arch/x86/kvm/x86.c 	vcpu->arch.exception.nested_apf =
vcpu              595 arch/x86/kvm/x86.c 		is_guest_mode(vcpu) && fault->async_page_fault;
vcpu              596 arch/x86/kvm/x86.c 	if (vcpu->arch.exception.nested_apf) {
vcpu              597 arch/x86/kvm/x86.c 		vcpu->arch.apf.nested_apf_token = fault->address;
vcpu              598 arch/x86/kvm/x86.c 		kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
vcpu              600 arch/x86/kvm/x86.c 		kvm_queue_exception_e_p(vcpu, PF_VECTOR, fault->error_code,
vcpu              606 arch/x86/kvm/x86.c static bool kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
vcpu              608 arch/x86/kvm/x86.c 	if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
vcpu              609 arch/x86/kvm/x86.c 		vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
vcpu              611 arch/x86/kvm/x86.c 		vcpu->arch.mmu->inject_page_fault(vcpu, fault);
vcpu              616 arch/x86/kvm/x86.c void kvm_inject_nmi(struct kvm_vcpu *vcpu)
vcpu              618 arch/x86/kvm/x86.c 	atomic_inc(&vcpu->arch.nmi_queued);
vcpu              619 arch/x86/kvm/x86.c 	kvm_make_request(KVM_REQ_NMI, vcpu);
vcpu              623 arch/x86/kvm/x86.c void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
vcpu              625 arch/x86/kvm/x86.c 	kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, false);
vcpu              629 arch/x86/kvm/x86.c void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
vcpu              631 arch/x86/kvm/x86.c 	kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, true);
vcpu              639 arch/x86/kvm/x86.c bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
vcpu              641 arch/x86/kvm/x86.c 	if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
vcpu              643 arch/x86/kvm/x86.c 	kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
vcpu              648 arch/x86/kvm/x86.c bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr)
vcpu              650 arch/x86/kvm/x86.c 	if ((dr != 4 && dr != 5) || !kvm_read_cr4_bits(vcpu, X86_CR4_DE))
vcpu              653 arch/x86/kvm/x86.c 	kvm_queue_exception(vcpu, UD_VECTOR);
vcpu              663 arch/x86/kvm/x86.c int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
vcpu              672 arch/x86/kvm/x86.c 	real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception);
vcpu              678 arch/x86/kvm/x86.c 	return kvm_vcpu_read_guest_page(vcpu, real_gfn, data, offset, len);
vcpu              682 arch/x86/kvm/x86.c static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
vcpu              685 arch/x86/kvm/x86.c 	return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
vcpu              689 arch/x86/kvm/x86.c static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu)
vcpu              691 arch/x86/kvm/x86.c 	return rsvd_bits(cpuid_maxphyaddr(vcpu), 63) | rsvd_bits(5, 8) |
vcpu              698 arch/x86/kvm/x86.c int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
vcpu              706 arch/x86/kvm/x86.c 	ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte,
vcpu              715 arch/x86/kvm/x86.c 		    (pdpte[i] & pdptr_rsvd_bits(vcpu))) {
vcpu              724 arch/x86/kvm/x86.c 		  (unsigned long *)&vcpu->arch.regs_avail);
vcpu              726 arch/x86/kvm/x86.c 		  (unsigned long *)&vcpu->arch.regs_dirty);
vcpu              733 arch/x86/kvm/x86.c bool pdptrs_changed(struct kvm_vcpu *vcpu)
vcpu              735 arch/x86/kvm/x86.c 	u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
vcpu              741 arch/x86/kvm/x86.c 	if (!is_pae_paging(vcpu))
vcpu              745 arch/x86/kvm/x86.c 		      (unsigned long *)&vcpu->arch.regs_avail))
vcpu              748 arch/x86/kvm/x86.c 	gfn = (kvm_read_cr3(vcpu) & 0xffffffe0ul) >> PAGE_SHIFT;
vcpu              749 arch/x86/kvm/x86.c 	offset = (kvm_read_cr3(vcpu) & 0xffffffe0ul) & (PAGE_SIZE - 1);
vcpu              750 arch/x86/kvm/x86.c 	r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
vcpu              754 arch/x86/kvm/x86.c 	changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
vcpu              761 arch/x86/kvm/x86.c int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
vcpu              763 arch/x86/kvm/x86.c 	unsigned long old_cr0 = kvm_read_cr0(vcpu);
vcpu              781 arch/x86/kvm/x86.c 	if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
vcpu              783 arch/x86/kvm/x86.c 		if ((vcpu->arch.efer & EFER_LME)) {
vcpu              786 arch/x86/kvm/x86.c 			if (!is_pae(vcpu))
vcpu              788 arch/x86/kvm/x86.c 			kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
vcpu              793 arch/x86/kvm/x86.c 		if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
vcpu              794 arch/x86/kvm/x86.c 						 kvm_read_cr3(vcpu)))
vcpu              798 arch/x86/kvm/x86.c 	if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
vcpu              801 arch/x86/kvm/x86.c 	kvm_x86_ops->set_cr0(vcpu, cr0);
vcpu              804 arch/x86/kvm/x86.c 		kvm_clear_async_pf_completion_queue(vcpu);
vcpu              805 arch/x86/kvm/x86.c 		kvm_async_pf_hash_reset(vcpu);
vcpu              809 arch/x86/kvm/x86.c 		kvm_mmu_reset_context(vcpu);
vcpu              812 arch/x86/kvm/x86.c 	    kvm_arch_has_noncoherent_dma(vcpu->kvm) &&
vcpu              813 arch/x86/kvm/x86.c 	    !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
vcpu              814 arch/x86/kvm/x86.c 		kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL);
vcpu              820 arch/x86/kvm/x86.c void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
vcpu              822 arch/x86/kvm/x86.c 	(void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
vcpu              826 arch/x86/kvm/x86.c void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
vcpu              828 arch/x86/kvm/x86.c 	if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
vcpu              829 arch/x86/kvm/x86.c 			!vcpu->guest_xcr0_loaded) {
vcpu              831 arch/x86/kvm/x86.c 		if (vcpu->arch.xcr0 != host_xcr0)
vcpu              832 arch/x86/kvm/x86.c 			xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
vcpu              833 arch/x86/kvm/x86.c 		vcpu->guest_xcr0_loaded = 1;
vcpu              837 arch/x86/kvm/x86.c 	    (kvm_read_cr4_bits(vcpu, X86_CR4_PKE) ||
vcpu              838 arch/x86/kvm/x86.c 	     (vcpu->arch.xcr0 & XFEATURE_MASK_PKRU)) &&
vcpu              839 arch/x86/kvm/x86.c 	    vcpu->arch.pkru != vcpu->arch.host_pkru)
vcpu              840 arch/x86/kvm/x86.c 		__write_pkru(vcpu->arch.pkru);
vcpu              844 arch/x86/kvm/x86.c void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
vcpu              847 arch/x86/kvm/x86.c 	    (kvm_read_cr4_bits(vcpu, X86_CR4_PKE) ||
vcpu              848 arch/x86/kvm/x86.c 	     (vcpu->arch.xcr0 & XFEATURE_MASK_PKRU))) {
vcpu              849 arch/x86/kvm/x86.c 		vcpu->arch.pkru = rdpkru();
vcpu              850 arch/x86/kvm/x86.c 		if (vcpu->arch.pkru != vcpu->arch.host_pkru)
vcpu              851 arch/x86/kvm/x86.c 			__write_pkru(vcpu->arch.host_pkru);
vcpu              854 arch/x86/kvm/x86.c 	if (vcpu->guest_xcr0_loaded) {
vcpu              855 arch/x86/kvm/x86.c 		if (vcpu->arch.xcr0 != host_xcr0)
vcpu              857 arch/x86/kvm/x86.c 		vcpu->guest_xcr0_loaded = 0;
vcpu              862 arch/x86/kvm/x86.c static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
vcpu              865 arch/x86/kvm/x86.c 	u64 old_xcr0 = vcpu->arch.xcr0;
vcpu              881 arch/x86/kvm/x86.c 	valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP;
vcpu              895 arch/x86/kvm/x86.c 	vcpu->arch.xcr0 = xcr0;
vcpu              898 arch/x86/kvm/x86.c 		kvm_update_cpuid(vcpu);
vcpu              902 arch/x86/kvm/x86.c int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
vcpu              904 arch/x86/kvm/x86.c 	if (kvm_x86_ops->get_cpl(vcpu) != 0 ||
vcpu              905 arch/x86/kvm/x86.c 	    __kvm_set_xcr(vcpu, index, xcr)) {
vcpu              906 arch/x86/kvm/x86.c 		kvm_inject_gp(vcpu, 0);
vcpu              942 arch/x86/kvm/x86.c static int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
vcpu              947 arch/x86/kvm/x86.c 	if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && (cr4 & X86_CR4_OSXSAVE))
vcpu              950 arch/x86/kvm/x86.c 	if (!guest_cpuid_has(vcpu, X86_FEATURE_SMEP) && (cr4 & X86_CR4_SMEP))
vcpu              953 arch/x86/kvm/x86.c 	if (!guest_cpuid_has(vcpu, X86_FEATURE_SMAP) && (cr4 & X86_CR4_SMAP))
vcpu              956 arch/x86/kvm/x86.c 	if (!guest_cpuid_has(vcpu, X86_FEATURE_FSGSBASE) && (cr4 & X86_CR4_FSGSBASE))
vcpu              959 arch/x86/kvm/x86.c 	if (!guest_cpuid_has(vcpu, X86_FEATURE_PKU) && (cr4 & X86_CR4_PKE))
vcpu              962 arch/x86/kvm/x86.c 	if (!guest_cpuid_has(vcpu, X86_FEATURE_LA57) && (cr4 & X86_CR4_LA57))
vcpu              965 arch/x86/kvm/x86.c 	if (!guest_cpuid_has(vcpu, X86_FEATURE_UMIP) && (cr4 & X86_CR4_UMIP))
vcpu              971 arch/x86/kvm/x86.c int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
vcpu              973 arch/x86/kvm/x86.c 	unsigned long old_cr4 = kvm_read_cr4(vcpu);
vcpu              977 arch/x86/kvm/x86.c 	if (kvm_valid_cr4(vcpu, cr4))
vcpu              980 arch/x86/kvm/x86.c 	if (is_long_mode(vcpu)) {
vcpu              983 arch/x86/kvm/x86.c 	} else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
vcpu              985 arch/x86/kvm/x86.c 		   && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
vcpu              986 arch/x86/kvm/x86.c 				   kvm_read_cr3(vcpu)))
vcpu              990 arch/x86/kvm/x86.c 		if (!guest_cpuid_has(vcpu, X86_FEATURE_PCID))
vcpu              994 arch/x86/kvm/x86.c 		if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu))
vcpu              998 arch/x86/kvm/x86.c 	if (kvm_x86_ops->set_cr4(vcpu, cr4))
vcpu             1003 arch/x86/kvm/x86.c 		kvm_mmu_reset_context(vcpu);
vcpu             1006 arch/x86/kvm/x86.c 		kvm_update_cpuid(vcpu);
vcpu             1012 arch/x86/kvm/x86.c int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
vcpu             1016 arch/x86/kvm/x86.c 	bool pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);
vcpu             1024 arch/x86/kvm/x86.c 	if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
vcpu             1026 arch/x86/kvm/x86.c 			kvm_mmu_sync_roots(vcpu);
vcpu             1027 arch/x86/kvm/x86.c 			kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
vcpu             1032 arch/x86/kvm/x86.c 	if (is_long_mode(vcpu) &&
vcpu             1033 arch/x86/kvm/x86.c 	    (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 63)))
vcpu             1035 arch/x86/kvm/x86.c 	else if (is_pae_paging(vcpu) &&
vcpu             1036 arch/x86/kvm/x86.c 		 !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
vcpu             1039 arch/x86/kvm/x86.c 	kvm_mmu_new_cr3(vcpu, cr3, skip_tlb_flush);
vcpu             1040 arch/x86/kvm/x86.c 	vcpu->arch.cr3 = cr3;
vcpu             1041 arch/x86/kvm/x86.c 	__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
vcpu             1047 arch/x86/kvm/x86.c int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
vcpu             1051 arch/x86/kvm/x86.c 	if (lapic_in_kernel(vcpu))
vcpu             1052 arch/x86/kvm/x86.c 		kvm_lapic_set_tpr(vcpu, cr8);
vcpu             1054 arch/x86/kvm/x86.c 		vcpu->arch.cr8 = cr8;
vcpu             1059 arch/x86/kvm/x86.c unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
vcpu             1061 arch/x86/kvm/x86.c 	if (lapic_in_kernel(vcpu))
vcpu             1062 arch/x86/kvm/x86.c 		return kvm_lapic_get_cr8(vcpu);
vcpu             1064 arch/x86/kvm/x86.c 		return vcpu->arch.cr8;
vcpu             1068 arch/x86/kvm/x86.c static void kvm_update_dr0123(struct kvm_vcpu *vcpu)
vcpu             1072 arch/x86/kvm/x86.c 	if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
vcpu             1074 arch/x86/kvm/x86.c 			vcpu->arch.eff_db[i] = vcpu->arch.db[i];
vcpu             1075 arch/x86/kvm/x86.c 		vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD;
vcpu             1079 arch/x86/kvm/x86.c static void kvm_update_dr6(struct kvm_vcpu *vcpu)
vcpu             1081 arch/x86/kvm/x86.c 	if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
vcpu             1082 arch/x86/kvm/x86.c 		kvm_x86_ops->set_dr6(vcpu, vcpu->arch.dr6);
vcpu             1085 arch/x86/kvm/x86.c static void kvm_update_dr7(struct kvm_vcpu *vcpu)
vcpu             1089 arch/x86/kvm/x86.c 	if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
vcpu             1090 arch/x86/kvm/x86.c 		dr7 = vcpu->arch.guest_debug_dr7;
vcpu             1092 arch/x86/kvm/x86.c 		dr7 = vcpu->arch.dr7;
vcpu             1093 arch/x86/kvm/x86.c 	kvm_x86_ops->set_dr7(vcpu, dr7);
vcpu             1094 arch/x86/kvm/x86.c 	vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED;
vcpu             1096 arch/x86/kvm/x86.c 		vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED;
vcpu             1099 arch/x86/kvm/x86.c static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
vcpu             1103 arch/x86/kvm/x86.c 	if (!guest_cpuid_has(vcpu, X86_FEATURE_RTM))
vcpu             1108 arch/x86/kvm/x86.c static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
vcpu             1110 arch/x86/kvm/x86.c 	size_t size = ARRAY_SIZE(vcpu->arch.db);
vcpu             1114 arch/x86/kvm/x86.c 		vcpu->arch.db[array_index_nospec(dr, size)] = val;
vcpu             1115 arch/x86/kvm/x86.c 		if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
vcpu             1116 arch/x86/kvm/x86.c 			vcpu->arch.eff_db[dr] = val;
vcpu             1123 arch/x86/kvm/x86.c 		vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu);
vcpu             1124 arch/x86/kvm/x86.c 		kvm_update_dr6(vcpu);
vcpu             1131 arch/x86/kvm/x86.c 		vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
vcpu             1132 arch/x86/kvm/x86.c 		kvm_update_dr7(vcpu);
vcpu             1139 arch/x86/kvm/x86.c int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
vcpu             1141 arch/x86/kvm/x86.c 	if (__kvm_set_dr(vcpu, dr, val)) {
vcpu             1142 arch/x86/kvm/x86.c 		kvm_inject_gp(vcpu, 0);
vcpu             1149 arch/x86/kvm/x86.c int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
vcpu             1151 arch/x86/kvm/x86.c 	size_t size = ARRAY_SIZE(vcpu->arch.db);
vcpu             1155 arch/x86/kvm/x86.c 		*val = vcpu->arch.db[array_index_nospec(dr, size)];
vcpu             1160 arch/x86/kvm/x86.c 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
vcpu             1161 arch/x86/kvm/x86.c 			*val = vcpu->arch.dr6;
vcpu             1163 arch/x86/kvm/x86.c 			*val = kvm_x86_ops->get_dr6(vcpu);
vcpu             1168 arch/x86/kvm/x86.c 		*val = vcpu->arch.dr7;
vcpu             1175 arch/x86/kvm/x86.c bool kvm_rdpmc(struct kvm_vcpu *vcpu)
vcpu             1177 arch/x86/kvm/x86.c 	u32 ecx = kvm_rcx_read(vcpu);
vcpu             1181 arch/x86/kvm/x86.c 	err = kvm_pmu_rdpmc(vcpu, ecx, &data);
vcpu             1184 arch/x86/kvm/x86.c 	kvm_rax_write(vcpu, (u32)data);
vcpu             1185 arch/x86/kvm/x86.c 	kvm_rdx_write(vcpu, data >> 32);
vcpu             1416 arch/x86/kvm/x86.c static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
vcpu             1431 arch/x86/kvm/x86.c static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
vcpu             1433 arch/x86/kvm/x86.c 	if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT))
vcpu             1436 arch/x86/kvm/x86.c 	if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM))
vcpu             1440 arch/x86/kvm/x86.c 	    !guest_cpuid_has(vcpu, X86_FEATURE_LM))
vcpu             1443 arch/x86/kvm/x86.c 	if (efer & EFER_NX && !guest_cpuid_has(vcpu, X86_FEATURE_NX))
vcpu             1449 arch/x86/kvm/x86.c bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
vcpu             1454 arch/x86/kvm/x86.c 	return __kvm_valid_efer(vcpu, efer);
vcpu             1458 arch/x86/kvm/x86.c static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vcpu             1460 arch/x86/kvm/x86.c 	u64 old_efer = vcpu->arch.efer;
vcpu             1467 arch/x86/kvm/x86.c 		if (!__kvm_valid_efer(vcpu, efer))
vcpu             1470 arch/x86/kvm/x86.c 		if (is_paging(vcpu) &&
vcpu             1471 arch/x86/kvm/x86.c 		    (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
vcpu             1476 arch/x86/kvm/x86.c 	efer |= vcpu->arch.efer & EFER_LMA;
vcpu             1478 arch/x86/kvm/x86.c 	kvm_x86_ops->set_efer(vcpu, efer);
vcpu             1482 arch/x86/kvm/x86.c 		kvm_mmu_reset_context(vcpu);
vcpu             1499 arch/x86/kvm/x86.c static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
vcpu             1510 arch/x86/kvm/x86.c 		if (is_noncanonical_address(data, vcpu))
vcpu             1527 arch/x86/kvm/x86.c 		data = get_canonical(data, vcpu_virt_addr_bits(vcpu));
vcpu             1534 arch/x86/kvm/x86.c 	return kvm_x86_ops->set_msr(vcpu, &msr);
vcpu             1543 arch/x86/kvm/x86.c static int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
vcpu             1552 arch/x86/kvm/x86.c 	ret = kvm_x86_ops->get_msr(vcpu, &msr);
vcpu             1558 arch/x86/kvm/x86.c int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
vcpu             1560 arch/x86/kvm/x86.c 	return __kvm_get_msr(vcpu, index, data, false);
vcpu             1564 arch/x86/kvm/x86.c int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
vcpu             1566 arch/x86/kvm/x86.c 	return __kvm_set_msr(vcpu, index, data, false);
vcpu             1570 arch/x86/kvm/x86.c int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu)
vcpu             1572 arch/x86/kvm/x86.c 	u32 ecx = kvm_rcx_read(vcpu);
vcpu             1575 arch/x86/kvm/x86.c 	if (kvm_get_msr(vcpu, ecx, &data)) {
vcpu             1577 arch/x86/kvm/x86.c 		kvm_inject_gp(vcpu, 0);
vcpu             1583 arch/x86/kvm/x86.c 	kvm_rax_write(vcpu, data & -1u);
vcpu             1584 arch/x86/kvm/x86.c 	kvm_rdx_write(vcpu, (data >> 32) & -1u);
vcpu             1585 arch/x86/kvm/x86.c 	return kvm_skip_emulated_instruction(vcpu);
vcpu             1589 arch/x86/kvm/x86.c int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu)
vcpu             1591 arch/x86/kvm/x86.c 	u32 ecx = kvm_rcx_read(vcpu);
vcpu             1592 arch/x86/kvm/x86.c 	u64 data = kvm_read_edx_eax(vcpu);
vcpu             1594 arch/x86/kvm/x86.c 	if (kvm_set_msr(vcpu, ecx, data)) {
vcpu             1596 arch/x86/kvm/x86.c 		kvm_inject_gp(vcpu, 0);
vcpu             1601 arch/x86/kvm/x86.c 	return kvm_skip_emulated_instruction(vcpu);
vcpu             1608 arch/x86/kvm/x86.c static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
vcpu             1610 arch/x86/kvm/x86.c 	return __kvm_get_msr(vcpu, index, data, true);
vcpu             1613 arch/x86/kvm/x86.c static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
vcpu             1615 arch/x86/kvm/x86.c 	return __kvm_set_msr(vcpu, index, *data, true);
vcpu             1662 arch/x86/kvm/x86.c void kvm_set_pending_timer(struct kvm_vcpu *vcpu)
vcpu             1664 arch/x86/kvm/x86.c 	kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
vcpu             1665 arch/x86/kvm/x86.c 	kvm_vcpu_kick(vcpu);
vcpu             1760 arch/x86/kvm/x86.c static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
vcpu             1766 arch/x86/kvm/x86.c 		vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
vcpu             1773 arch/x86/kvm/x86.c 			vcpu->arch.tsc_catchup = 1;
vcpu             1774 arch/x86/kvm/x86.c 			vcpu->arch.tsc_always_catchup = 1;
vcpu             1792 arch/x86/kvm/x86.c 	vcpu->arch.tsc_scaling_ratio = ratio;
vcpu             1796 arch/x86/kvm/x86.c static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
vcpu             1804 arch/x86/kvm/x86.c 		vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
vcpu             1810 arch/x86/kvm/x86.c 			   &vcpu->arch.virtual_tsc_shift,
vcpu             1811 arch/x86/kvm/x86.c 			   &vcpu->arch.virtual_tsc_mult);
vcpu             1812 arch/x86/kvm/x86.c 	vcpu->arch.virtual_tsc_khz = user_tsc_khz;
vcpu             1826 arch/x86/kvm/x86.c 	return set_tsc_khz(vcpu, user_tsc_khz, use_scaling);
vcpu             1829 arch/x86/kvm/x86.c static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
vcpu             1831 arch/x86/kvm/x86.c 	u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec,
vcpu             1832 arch/x86/kvm/x86.c 				      vcpu->arch.virtual_tsc_mult,
vcpu             1833 arch/x86/kvm/x86.c 				      vcpu->arch.virtual_tsc_shift);
vcpu             1834 arch/x86/kvm/x86.c 	tsc += vcpu->arch.this_tsc_write;
vcpu             1843 arch/x86/kvm/x86.c static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
vcpu             1847 arch/x86/kvm/x86.c 	struct kvm_arch *ka = &vcpu->kvm->arch;
vcpu             1851 arch/x86/kvm/x86.c 			 atomic_read(&vcpu->kvm->online_vcpus));
vcpu             1863 arch/x86/kvm/x86.c 		kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
vcpu             1865 arch/x86/kvm/x86.c 	trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc,
vcpu             1866 arch/x86/kvm/x86.c 			    atomic_read(&vcpu->kvm->online_vcpus),
vcpu             1871 arch/x86/kvm/x86.c static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset)
vcpu             1873 arch/x86/kvm/x86.c 	u64 curr_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu);
vcpu             1874 arch/x86/kvm/x86.c 	vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset;
vcpu             1892 arch/x86/kvm/x86.c u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
vcpu             1895 arch/x86/kvm/x86.c 	u64 ratio = vcpu->arch.tsc_scaling_ratio;
vcpu             1904 arch/x86/kvm/x86.c static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
vcpu             1908 arch/x86/kvm/x86.c 	tsc = kvm_scale_tsc(vcpu, rdtsc());
vcpu             1913 arch/x86/kvm/x86.c u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
vcpu             1915 arch/x86/kvm/x86.c 	u64 tsc_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu);
vcpu             1917 arch/x86/kvm/x86.c 	return tsc_offset + kvm_scale_tsc(vcpu, host_tsc);
vcpu             1921 arch/x86/kvm/x86.c static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
vcpu             1923 arch/x86/kvm/x86.c 	vcpu->arch.tsc_offset = kvm_x86_ops->write_l1_tsc_offset(vcpu, offset);
vcpu             1939 arch/x86/kvm/x86.c void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
vcpu             1941 arch/x86/kvm/x86.c 	struct kvm *kvm = vcpu->kvm;
vcpu             1950 arch/x86/kvm/x86.c 	offset = kvm_compute_tsc_offset(vcpu, data);
vcpu             1954 arch/x86/kvm/x86.c 	if (vcpu->arch.virtual_tsc_khz) {
vcpu             1964 arch/x86/kvm/x86.c 						nsec_to_cycles(vcpu, elapsed);
vcpu             1965 arch/x86/kvm/x86.c 			u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL;
vcpu             1983 arch/x86/kvm/x86.c 	    vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
vcpu             1987 arch/x86/kvm/x86.c 			u64 delta = nsec_to_cycles(vcpu, elapsed);
vcpu             1989 arch/x86/kvm/x86.c 			offset = kvm_compute_tsc_offset(vcpu, data);
vcpu             1992 arch/x86/kvm/x86.c 		already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation);
vcpu             2016 arch/x86/kvm/x86.c 	kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
vcpu             2018 arch/x86/kvm/x86.c 	vcpu->arch.last_guest_tsc = data;
vcpu             2021 arch/x86/kvm/x86.c 	vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
vcpu             2022 arch/x86/kvm/x86.c 	vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
vcpu             2023 arch/x86/kvm/x86.c 	vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
vcpu             2025 arch/x86/kvm/x86.c 	if (!msr->host_initiated && guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST))
vcpu             2026 arch/x86/kvm/x86.c 		update_ia32_tsc_adjust_msr(vcpu, offset);
vcpu             2028 arch/x86/kvm/x86.c 	kvm_vcpu_write_tsc_offset(vcpu, offset);
vcpu             2038 arch/x86/kvm/x86.c 	kvm_track_tsc_matching(vcpu);
vcpu             2044 arch/x86/kvm/x86.c static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
vcpu             2047 arch/x86/kvm/x86.c 	u64 tsc_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu);
vcpu             2048 arch/x86/kvm/x86.c 	kvm_vcpu_write_tsc_offset(vcpu, tsc_offset + adjustment);
vcpu             2051 arch/x86/kvm/x86.c static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
vcpu             2053 arch/x86/kvm/x86.c 	if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio)
vcpu             2055 arch/x86/kvm/x86.c 	adjustment = kvm_scale_tsc(vcpu, (u64) adjustment);
vcpu             2056 arch/x86/kvm/x86.c 	adjust_tsc_offset_guest(vcpu, adjustment);
vcpu             2261 arch/x86/kvm/x86.c 	struct kvm_vcpu *vcpu;
vcpu             2269 arch/x86/kvm/x86.c 	kvm_for_each_vcpu(i, vcpu, kvm)
vcpu             2270 arch/x86/kvm/x86.c 		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
vcpu             2273 arch/x86/kvm/x86.c 	kvm_for_each_vcpu(i, vcpu, kvm)
vcpu             2274 arch/x86/kvm/x86.c 		kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
vcpu             2314 arch/x86/kvm/x86.c 	struct kvm_vcpu_arch *vcpu = &v->arch;
vcpu             2317 arch/x86/kvm/x86.c 	if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
vcpu             2340 arch/x86/kvm/x86.c 	vcpu->hv_clock.version = guest_hv_clock.version + 1;
vcpu             2341 arch/x86/kvm/x86.c 	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
vcpu             2342 arch/x86/kvm/x86.c 				&vcpu->hv_clock,
vcpu             2343 arch/x86/kvm/x86.c 				sizeof(vcpu->hv_clock.version));
vcpu             2348 arch/x86/kvm/x86.c 	vcpu->hv_clock.flags |= (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
vcpu             2350 arch/x86/kvm/x86.c 	if (vcpu->pvclock_set_guest_stopped_request) {
vcpu             2351 arch/x86/kvm/x86.c 		vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED;
vcpu             2352 arch/x86/kvm/x86.c 		vcpu->pvclock_set_guest_stopped_request = false;
vcpu             2355 arch/x86/kvm/x86.c 	trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
vcpu             2357 arch/x86/kvm/x86.c 	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
vcpu             2358 arch/x86/kvm/x86.c 				&vcpu->hv_clock,
vcpu             2359 arch/x86/kvm/x86.c 				sizeof(vcpu->hv_clock));
vcpu             2363 arch/x86/kvm/x86.c 	vcpu->hv_clock.version++;
vcpu             2364 arch/x86/kvm/x86.c 	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
vcpu             2365 arch/x86/kvm/x86.c 				&vcpu->hv_clock,
vcpu             2366 arch/x86/kvm/x86.c 				sizeof(vcpu->hv_clock.version));
vcpu             2372 arch/x86/kvm/x86.c 	struct kvm_vcpu_arch *vcpu = &v->arch;
vcpu             2419 arch/x86/kvm/x86.c 	if (vcpu->tsc_catchup) {
vcpu             2434 arch/x86/kvm/x86.c 	if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) {
vcpu             2436 arch/x86/kvm/x86.c 				   &vcpu->hv_clock.tsc_shift,
vcpu             2437 arch/x86/kvm/x86.c 				   &vcpu->hv_clock.tsc_to_system_mul);
vcpu             2438 arch/x86/kvm/x86.c 		vcpu->hw_tsc_khz = tgt_tsc_khz;
vcpu             2441 arch/x86/kvm/x86.c 	vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
vcpu             2442 arch/x86/kvm/x86.c 	vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
vcpu             2443 arch/x86/kvm/x86.c 	vcpu->last_guest_tsc = tsc_timestamp;
vcpu             2450 arch/x86/kvm/x86.c 	vcpu->hv_clock.flags = pvclock_flags;
vcpu             2452 arch/x86/kvm/x86.c 	if (vcpu->pv_time_enabled)
vcpu             2455 arch/x86/kvm/x86.c 		kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
vcpu             2482 arch/x86/kvm/x86.c 	struct kvm_vcpu *vcpu;
vcpu             2484 arch/x86/kvm/x86.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu             2485 arch/x86/kvm/x86.c 		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
vcpu             2486 arch/x86/kvm/x86.c 		kvm_vcpu_kick(vcpu);
vcpu             2519 arch/x86/kvm/x86.c static bool can_set_mci_status(struct kvm_vcpu *vcpu)
vcpu             2522 arch/x86/kvm/x86.c 	if (guest_cpuid_is_amd(vcpu))
vcpu             2523 arch/x86/kvm/x86.c 		return !!(vcpu->arch.msr_hwcr & BIT_ULL(18));
vcpu             2528 arch/x86/kvm/x86.c static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vcpu             2530 arch/x86/kvm/x86.c 	u64 mcg_cap = vcpu->arch.mcg_cap;
vcpu             2537 arch/x86/kvm/x86.c 		vcpu->arch.mcg_status = data;
vcpu             2545 arch/x86/kvm/x86.c 		vcpu->arch.mcg_ctl = data;
vcpu             2566 arch/x86/kvm/x86.c 				if (!can_set_mci_status(vcpu))
vcpu             2570 arch/x86/kvm/x86.c 			vcpu->arch.mce_banks[offset] = data;
vcpu             2578 arch/x86/kvm/x86.c static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
vcpu             2580 arch/x86/kvm/x86.c 	struct kvm *kvm = vcpu->kvm;
vcpu             2581 arch/x86/kvm/x86.c 	int lm = is_long_mode(vcpu);
vcpu             2600 arch/x86/kvm/x86.c 	if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE))
vcpu             2609 arch/x86/kvm/x86.c static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
vcpu             2617 arch/x86/kvm/x86.c 	vcpu->arch.apf.msr_val = data;
vcpu             2620 arch/x86/kvm/x86.c 		kvm_clear_async_pf_completion_queue(vcpu);
vcpu             2621 arch/x86/kvm/x86.c 		kvm_async_pf_hash_reset(vcpu);
vcpu             2625 arch/x86/kvm/x86.c 	if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
vcpu             2629 arch/x86/kvm/x86.c 	vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
vcpu             2630 arch/x86/kvm/x86.c 	vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
vcpu             2631 arch/x86/kvm/x86.c 	kvm_async_pf_wakeup_all(vcpu);
vcpu             2635 arch/x86/kvm/x86.c static void kvmclock_reset(struct kvm_vcpu *vcpu)
vcpu             2637 arch/x86/kvm/x86.c 	vcpu->arch.pv_time_enabled = false;
vcpu             2638 arch/x86/kvm/x86.c 	vcpu->arch.time = 0;
vcpu             2641 arch/x86/kvm/x86.c static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
vcpu             2643 arch/x86/kvm/x86.c 	++vcpu->stat.tlb_flush;
vcpu             2644 arch/x86/kvm/x86.c 	kvm_x86_ops->tlb_flush(vcpu, invalidate_gpa);
vcpu             2647 arch/x86/kvm/x86.c static void record_steal_time(struct kvm_vcpu *vcpu)
vcpu             2652 arch/x86/kvm/x86.c 	if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
vcpu             2656 arch/x86/kvm/x86.c 	if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT,
vcpu             2657 arch/x86/kvm/x86.c 			&map, &vcpu->arch.st.cache, false))
vcpu             2661 arch/x86/kvm/x86.c 		offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
vcpu             2667 arch/x86/kvm/x86.c 	trace_kvm_pv_tlb_flush(vcpu->vcpu_id,
vcpu             2670 arch/x86/kvm/x86.c 		kvm_vcpu_flush_tlb(vcpu, false);
vcpu             2672 arch/x86/kvm/x86.c 	vcpu->arch.st.preempted = 0;
vcpu             2682 arch/x86/kvm/x86.c 		vcpu->arch.st.last_steal;
vcpu             2683 arch/x86/kvm/x86.c 	vcpu->arch.st.last_steal = current->sched_info.run_delay;
vcpu             2689 arch/x86/kvm/x86.c 	kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, false);
vcpu             2692 arch/x86/kvm/x86.c int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vcpu             2710 arch/x86/kvm/x86.c 			vcpu->arch.microcode_version = data;
vcpu             2715 arch/x86/kvm/x86.c 		vcpu->arch.arch_capabilities = data;
vcpu             2718 arch/x86/kvm/x86.c 		return set_efer(vcpu, msr_info);
vcpu             2726 arch/x86/kvm/x86.c 			vcpu->arch.msr_hwcr = data;
vcpu             2728 arch/x86/kvm/x86.c 			vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
vcpu             2735 arch/x86/kvm/x86.c 			vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
vcpu             2749 arch/x86/kvm/x86.c 		vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
vcpu             2753 arch/x86/kvm/x86.c 		return kvm_mtrr_set_msr(vcpu, msr, data);
vcpu             2755 arch/x86/kvm/x86.c 		return kvm_set_apic_base(vcpu, msr_info);
vcpu             2757 arch/x86/kvm/x86.c 		return kvm_x2apic_msr_write(vcpu, msr, data);
vcpu             2759 arch/x86/kvm/x86.c 		kvm_set_lapic_tscdeadline_msr(vcpu, data);
vcpu             2762 arch/x86/kvm/x86.c 		if (guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST)) {
vcpu             2764 arch/x86/kvm/x86.c 				s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
vcpu             2765 arch/x86/kvm/x86.c 				adjust_tsc_offset_guest(vcpu, adj);
vcpu             2767 arch/x86/kvm/x86.c 			vcpu->arch.ia32_tsc_adjust_msr = data;
vcpu             2771 arch/x86/kvm/x86.c 		if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT) &&
vcpu             2772 arch/x86/kvm/x86.c 		    ((vcpu->arch.ia32_misc_enable_msr ^ data) & MSR_IA32_MISC_ENABLE_MWAIT)) {
vcpu             2773 arch/x86/kvm/x86.c 			if (!guest_cpuid_has(vcpu, X86_FEATURE_XMM3))
vcpu             2775 arch/x86/kvm/x86.c 			vcpu->arch.ia32_misc_enable_msr = data;
vcpu             2776 arch/x86/kvm/x86.c 			kvm_update_cpuid(vcpu);
vcpu             2778 arch/x86/kvm/x86.c 			vcpu->arch.ia32_misc_enable_msr = data;
vcpu             2784 arch/x86/kvm/x86.c 		vcpu->arch.smbase = data;
vcpu             2787 arch/x86/kvm/x86.c 		vcpu->arch.msr_ia32_power_ctl = data;
vcpu             2790 arch/x86/kvm/x86.c 		kvm_write_tsc(vcpu, msr_info);
vcpu             2795 arch/x86/kvm/x86.c 		vcpu->arch.smi_count = data;
vcpu             2799 arch/x86/kvm/x86.c 		vcpu->kvm->arch.wall_clock = data;
vcpu             2800 arch/x86/kvm/x86.c 		kvm_write_wall_clock(vcpu->kvm, data);
vcpu             2804 arch/x86/kvm/x86.c 		struct kvm_arch *ka = &vcpu->kvm->arch;
vcpu             2806 arch/x86/kvm/x86.c 		if (vcpu->vcpu_id == 0 && !msr_info->host_initiated) {
vcpu             2810 arch/x86/kvm/x86.c 				kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
vcpu             2815 arch/x86/kvm/x86.c 		vcpu->arch.time = data;
vcpu             2816 arch/x86/kvm/x86.c 		kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
vcpu             2819 arch/x86/kvm/x86.c 		vcpu->arch.pv_time_enabled = false;
vcpu             2823 arch/x86/kvm/x86.c 		if (!kvm_gfn_to_hva_cache_init(vcpu->kvm,
vcpu             2824 arch/x86/kvm/x86.c 		     &vcpu->arch.pv_time, data & ~1ULL,
vcpu             2826 arch/x86/kvm/x86.c 			vcpu->arch.pv_time_enabled = true;
vcpu             2831 arch/x86/kvm/x86.c 		if (kvm_pv_enable_async_pf(vcpu, data))
vcpu             2842 arch/x86/kvm/x86.c 		vcpu->arch.st.msr_val = data;
vcpu             2847 arch/x86/kvm/x86.c 		kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
vcpu             2851 arch/x86/kvm/x86.c 		if (kvm_lapic_enable_pv_eoi(vcpu, data, sizeof(u8)))
vcpu             2860 arch/x86/kvm/x86.c 		vcpu->arch.msr_kvm_poll_control = data;
vcpu             2866 arch/x86/kvm/x86.c 		return set_msr_mce(vcpu, msr_info);
vcpu             2873 arch/x86/kvm/x86.c 		if (kvm_pmu_is_valid_msr(vcpu, msr))
vcpu             2874 arch/x86/kvm/x86.c 			return kvm_pmu_set_msr(vcpu, msr_info);
vcpu             2877 arch/x86/kvm/x86.c 			vcpu_unimpl(vcpu, "disabled perfctr wrmsr: "
vcpu             2897 arch/x86/kvm/x86.c 		return kvm_hv_set_msr_common(vcpu, msr, data,
vcpu             2904 arch/x86/kvm/x86.c 			vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n",
vcpu             2908 arch/x86/kvm/x86.c 		if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
vcpu             2910 arch/x86/kvm/x86.c 		vcpu->arch.osvw.length = data;
vcpu             2913 arch/x86/kvm/x86.c 		if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
vcpu             2915 arch/x86/kvm/x86.c 		vcpu->arch.osvw.status = data;
vcpu             2920 arch/x86/kvm/x86.c 		     cpuid_fault_enabled(vcpu)))
vcpu             2922 arch/x86/kvm/x86.c 		vcpu->arch.msr_platform_info = data;
vcpu             2927 arch/x86/kvm/x86.c 		     !supports_cpuid_fault(vcpu)))
vcpu             2929 arch/x86/kvm/x86.c 		vcpu->arch.msr_misc_features_enables = data;
vcpu             2932 arch/x86/kvm/x86.c 		if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
vcpu             2933 arch/x86/kvm/x86.c 			return xen_hvm_config(vcpu, data);
vcpu             2934 arch/x86/kvm/x86.c 		if (kvm_pmu_is_valid_msr(vcpu, msr))
vcpu             2935 arch/x86/kvm/x86.c 			return kvm_pmu_set_msr(vcpu, msr_info);
vcpu             2937 arch/x86/kvm/x86.c 			vcpu_debug_ratelimited(vcpu, "unhandled wrmsr: 0x%x data 0x%llx\n",
vcpu             2942 arch/x86/kvm/x86.c 				vcpu_unimpl(vcpu,
vcpu             2952 arch/x86/kvm/x86.c static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
vcpu             2955 arch/x86/kvm/x86.c 	u64 mcg_cap = vcpu->arch.mcg_cap;
vcpu             2964 arch/x86/kvm/x86.c 		data = vcpu->arch.mcg_cap;
vcpu             2969 arch/x86/kvm/x86.c 		data = vcpu->arch.mcg_ctl;
vcpu             2972 arch/x86/kvm/x86.c 		data = vcpu->arch.mcg_status;
vcpu             2981 arch/x86/kvm/x86.c 			data = vcpu->arch.mce_banks[offset];
vcpu             2990 arch/x86/kvm/x86.c int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vcpu             3018 arch/x86/kvm/x86.c 		if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
vcpu             3019 arch/x86/kvm/x86.c 			return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
vcpu             3023 arch/x86/kvm/x86.c 		msr_info->data = vcpu->arch.microcode_version;
vcpu             3027 arch/x86/kvm/x86.c 		    !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
vcpu             3029 arch/x86/kvm/x86.c 		msr_info->data = vcpu->arch.arch_capabilities;
vcpu             3032 arch/x86/kvm/x86.c 		msr_info->data = vcpu->arch.msr_ia32_power_ctl;
vcpu             3035 arch/x86/kvm/x86.c 		msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + vcpu->arch.tsc_offset;
vcpu             3039 arch/x86/kvm/x86.c 		return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data);
vcpu             3058 arch/x86/kvm/x86.c 		msr_info->data = kvm_get_apic_base(vcpu);
vcpu             3061 arch/x86/kvm/x86.c 		return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data);
vcpu             3064 arch/x86/kvm/x86.c 		msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu);
vcpu             3067 arch/x86/kvm/x86.c 		msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr;
vcpu             3070 arch/x86/kvm/x86.c 		msr_info->data = vcpu->arch.ia32_misc_enable_msr;
vcpu             3075 arch/x86/kvm/x86.c 		msr_info->data = vcpu->arch.smbase;
vcpu             3078 arch/x86/kvm/x86.c 		msr_info->data = vcpu->arch.smi_count;
vcpu             3087 arch/x86/kvm/x86.c 		msr_info->data = vcpu->arch.efer;
vcpu             3091 arch/x86/kvm/x86.c 		msr_info->data = vcpu->kvm->arch.wall_clock;
vcpu             3095 arch/x86/kvm/x86.c 		msr_info->data = vcpu->arch.time;
vcpu             3098 arch/x86/kvm/x86.c 		msr_info->data = vcpu->arch.apf.msr_val;
vcpu             3101 arch/x86/kvm/x86.c 		msr_info->data = vcpu->arch.st.msr_val;
vcpu             3104 arch/x86/kvm/x86.c 		msr_info->data = vcpu->arch.pv_eoi.msr_val;
vcpu             3107 arch/x86/kvm/x86.c 		msr_info->data = vcpu->arch.msr_kvm_poll_control;
vcpu             3115 arch/x86/kvm/x86.c 		return get_msr_mce(vcpu, msr_info->index, &msr_info->data,
vcpu             3136 arch/x86/kvm/x86.c 		return kvm_hv_get_msr_common(vcpu,
vcpu             3154 arch/x86/kvm/x86.c 		if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
vcpu             3156 arch/x86/kvm/x86.c 		msr_info->data = vcpu->arch.osvw.length;
vcpu             3159 arch/x86/kvm/x86.c 		if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
vcpu             3161 arch/x86/kvm/x86.c 		msr_info->data = vcpu->arch.osvw.status;
vcpu             3165 arch/x86/kvm/x86.c 		    !vcpu->kvm->arch.guest_can_read_msr_platform_info)
vcpu             3167 arch/x86/kvm/x86.c 		msr_info->data = vcpu->arch.msr_platform_info;
vcpu             3170 arch/x86/kvm/x86.c 		msr_info->data = vcpu->arch.msr_misc_features_enables;
vcpu             3173 arch/x86/kvm/x86.c 		msr_info->data = vcpu->arch.msr_hwcr;
vcpu             3176 arch/x86/kvm/x86.c 		if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
vcpu             3177 arch/x86/kvm/x86.c 			return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
vcpu             3179 arch/x86/kvm/x86.c 			vcpu_debug_ratelimited(vcpu, "unhandled rdmsr: 0x%x\n",
vcpu             3184 arch/x86/kvm/x86.c 				vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n",
vcpu             3199 arch/x86/kvm/x86.c static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
vcpu             3201 arch/x86/kvm/x86.c 		    int (*do_msr)(struct kvm_vcpu *vcpu,
vcpu             3207 arch/x86/kvm/x86.c 		if (do_msr(vcpu, entries[i].index, &entries[i].data))
vcpu             3218 arch/x86/kvm/x86.c static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
vcpu             3219 arch/x86/kvm/x86.c 		  int (*do_msr)(struct kvm_vcpu *vcpu,
vcpu             3243 arch/x86/kvm/x86.c 	r = n = __msr_io(vcpu, &msrs, entries, do_msr);
vcpu             3488 arch/x86/kvm/x86.c static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
vcpu             3490 arch/x86/kvm/x86.c 	return kvm_arch_has_noncoherent_dma(vcpu->kvm);
vcpu             3493 arch/x86/kvm/x86.c void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu             3496 arch/x86/kvm/x86.c 	if (need_emulate_wbinvd(vcpu)) {
vcpu             3498 arch/x86/kvm/x86.c 			cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
vcpu             3499 arch/x86/kvm/x86.c 		else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
vcpu             3500 arch/x86/kvm/x86.c 			smp_call_function_single(vcpu->cpu,
vcpu             3504 arch/x86/kvm/x86.c 	kvm_x86_ops->vcpu_load(vcpu, cpu);
vcpu             3507 arch/x86/kvm/x86.c 	if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
vcpu             3508 arch/x86/kvm/x86.c 		adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
vcpu             3509 arch/x86/kvm/x86.c 		vcpu->arch.tsc_offset_adjustment = 0;
vcpu             3510 arch/x86/kvm/x86.c 		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
vcpu             3513 arch/x86/kvm/x86.c 	if (unlikely(vcpu->cpu != cpu) || kvm_check_tsc_unstable()) {
vcpu             3514 arch/x86/kvm/x86.c 		s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
vcpu             3515 arch/x86/kvm/x86.c 				rdtsc() - vcpu->arch.last_host_tsc;
vcpu             3520 arch/x86/kvm/x86.c 			u64 offset = kvm_compute_tsc_offset(vcpu,
vcpu             3521 arch/x86/kvm/x86.c 						vcpu->arch.last_guest_tsc);
vcpu             3522 arch/x86/kvm/x86.c 			kvm_vcpu_write_tsc_offset(vcpu, offset);
vcpu             3523 arch/x86/kvm/x86.c 			vcpu->arch.tsc_catchup = 1;
vcpu             3526 arch/x86/kvm/x86.c 		if (kvm_lapic_hv_timer_in_use(vcpu))
vcpu             3527 arch/x86/kvm/x86.c 			kvm_lapic_restart_hv_timer(vcpu);
vcpu             3533 arch/x86/kvm/x86.c 		if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1)
vcpu             3534 arch/x86/kvm/x86.c 			kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
vcpu             3535 arch/x86/kvm/x86.c 		if (vcpu->cpu != cpu)
vcpu             3536 arch/x86/kvm/x86.c 			kvm_make_request(KVM_REQ_MIGRATE_TIMER, vcpu);
vcpu             3537 arch/x86/kvm/x86.c 		vcpu->cpu = cpu;
vcpu             3540 arch/x86/kvm/x86.c 	kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
vcpu             3543 arch/x86/kvm/x86.c static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
vcpu             3548 arch/x86/kvm/x86.c 	if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
vcpu             3551 arch/x86/kvm/x86.c 	if (vcpu->arch.st.preempted)
vcpu             3554 arch/x86/kvm/x86.c 	if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map,
vcpu             3555 arch/x86/kvm/x86.c 			&vcpu->arch.st.cache, true))
vcpu             3559 arch/x86/kvm/x86.c 		offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
vcpu             3561 arch/x86/kvm/x86.c 	st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
vcpu             3563 arch/x86/kvm/x86.c 	kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true);
vcpu             3566 arch/x86/kvm/x86.c void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
vcpu             3570 arch/x86/kvm/x86.c 	if (vcpu->preempted)
vcpu             3571 arch/x86/kvm/x86.c 		vcpu->arch.preempted_in_kernel = !kvm_x86_ops->get_cpl(vcpu);
vcpu             3586 arch/x86/kvm/x86.c 	idx = srcu_read_lock(&vcpu->kvm->srcu);
vcpu             3587 arch/x86/kvm/x86.c 	kvm_steal_time_set_preempted(vcpu);
vcpu             3588 arch/x86/kvm/x86.c 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
vcpu             3590 arch/x86/kvm/x86.c 	kvm_x86_ops->vcpu_put(vcpu);
vcpu             3591 arch/x86/kvm/x86.c 	vcpu->arch.last_host_tsc = rdtsc();
vcpu             3600 arch/x86/kvm/x86.c static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
vcpu             3603 arch/x86/kvm/x86.c 	if (vcpu->arch.apicv_active)
vcpu             3604 arch/x86/kvm/x86.c 		kvm_x86_ops->sync_pir_to_irr(vcpu);
vcpu             3606 arch/x86/kvm/x86.c 	return kvm_apic_get_state(vcpu, s);
vcpu             3609 arch/x86/kvm/x86.c static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
vcpu             3614 arch/x86/kvm/x86.c 	r = kvm_apic_set_state(vcpu, s);
vcpu             3617 arch/x86/kvm/x86.c 	update_cr8_intercept(vcpu);
vcpu             3622 arch/x86/kvm/x86.c static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
vcpu             3624 arch/x86/kvm/x86.c 	return (!lapic_in_kernel(vcpu) ||
vcpu             3625 arch/x86/kvm/x86.c 		kvm_apic_accept_pic_intr(vcpu));
vcpu             3634 arch/x86/kvm/x86.c static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
vcpu             3636 arch/x86/kvm/x86.c 	return kvm_arch_interrupt_allowed(vcpu) &&
vcpu             3637 arch/x86/kvm/x86.c 		!kvm_cpu_has_interrupt(vcpu) &&
vcpu             3638 arch/x86/kvm/x86.c 		!kvm_event_needs_reinjection(vcpu) &&
vcpu             3639 arch/x86/kvm/x86.c 		kvm_cpu_accept_dm_intr(vcpu);
vcpu             3642 arch/x86/kvm/x86.c static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
vcpu             3648 arch/x86/kvm/x86.c 	if (!irqchip_in_kernel(vcpu->kvm)) {
vcpu             3649 arch/x86/kvm/x86.c 		kvm_queue_interrupt(vcpu, irq->irq, false);
vcpu             3650 arch/x86/kvm/x86.c 		kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu             3658 arch/x86/kvm/x86.c 	if (pic_in_kernel(vcpu->kvm))
vcpu             3661 arch/x86/kvm/x86.c 	if (vcpu->arch.pending_external_vector != -1)
vcpu             3664 arch/x86/kvm/x86.c 	vcpu->arch.pending_external_vector = irq->irq;
vcpu             3665 arch/x86/kvm/x86.c 	kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu             3669 arch/x86/kvm/x86.c static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
vcpu             3671 arch/x86/kvm/x86.c 	kvm_inject_nmi(vcpu);
vcpu             3676 arch/x86/kvm/x86.c static int kvm_vcpu_ioctl_smi(struct kvm_vcpu *vcpu)
vcpu             3678 arch/x86/kvm/x86.c 	kvm_make_request(KVM_REQ_SMI, vcpu);
vcpu             3683 arch/x86/kvm/x86.c static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
vcpu             3688 arch/x86/kvm/x86.c 	vcpu->arch.tpr_access_reporting = !!tac->enabled;
vcpu             3692 arch/x86/kvm/x86.c static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
vcpu             3704 arch/x86/kvm/x86.c 	vcpu->arch.mcg_cap = mcg_cap;
vcpu             3707 arch/x86/kvm/x86.c 		vcpu->arch.mcg_ctl = ~(u64)0;
vcpu             3710 arch/x86/kvm/x86.c 		vcpu->arch.mce_banks[bank*4] = ~(u64)0;
vcpu             3712 arch/x86/kvm/x86.c 	kvm_x86_ops->setup_mce(vcpu);
vcpu             3717 arch/x86/kvm/x86.c static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
vcpu             3720 arch/x86/kvm/x86.c 	u64 mcg_cap = vcpu->arch.mcg_cap;
vcpu             3722 arch/x86/kvm/x86.c 	u64 *banks = vcpu->arch.mce_banks;
vcpu             3731 arch/x86/kvm/x86.c 	    vcpu->arch.mcg_ctl != ~(u64)0)
vcpu             3741 arch/x86/kvm/x86.c 		if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
vcpu             3742 arch/x86/kvm/x86.c 		    !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
vcpu             3743 arch/x86/kvm/x86.c 			kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
vcpu             3750 arch/x86/kvm/x86.c 		vcpu->arch.mcg_status = mce->mcg_status;
vcpu             3752 arch/x86/kvm/x86.c 		kvm_queue_exception(vcpu, MC_VECTOR);
vcpu             3765 arch/x86/kvm/x86.c static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
vcpu             3768 arch/x86/kvm/x86.c 	process_nmi(vcpu);
vcpu             3776 arch/x86/kvm/x86.c 	if (kvm_exception_is_soft(vcpu->arch.exception.nr)) {
vcpu             3780 arch/x86/kvm/x86.c 		events->exception.injected = vcpu->arch.exception.injected;
vcpu             3781 arch/x86/kvm/x86.c 		events->exception.pending = vcpu->arch.exception.pending;
vcpu             3787 arch/x86/kvm/x86.c 		if (!vcpu->kvm->arch.exception_payload_enabled)
vcpu             3789 arch/x86/kvm/x86.c 				vcpu->arch.exception.pending;
vcpu             3791 arch/x86/kvm/x86.c 	events->exception.nr = vcpu->arch.exception.nr;
vcpu             3792 arch/x86/kvm/x86.c 	events->exception.has_error_code = vcpu->arch.exception.has_error_code;
vcpu             3793 arch/x86/kvm/x86.c 	events->exception.error_code = vcpu->arch.exception.error_code;
vcpu             3794 arch/x86/kvm/x86.c 	events->exception_has_payload = vcpu->arch.exception.has_payload;
vcpu             3795 arch/x86/kvm/x86.c 	events->exception_payload = vcpu->arch.exception.payload;
vcpu             3798 arch/x86/kvm/x86.c 		vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft;
vcpu             3799 arch/x86/kvm/x86.c 	events->interrupt.nr = vcpu->arch.interrupt.nr;
vcpu             3801 arch/x86/kvm/x86.c 	events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu);
vcpu             3803 arch/x86/kvm/x86.c 	events->nmi.injected = vcpu->arch.nmi_injected;
vcpu             3804 arch/x86/kvm/x86.c 	events->nmi.pending = vcpu->arch.nmi_pending != 0;
vcpu             3805 arch/x86/kvm/x86.c 	events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
vcpu             3810 arch/x86/kvm/x86.c 	events->smi.smm = is_smm(vcpu);
vcpu             3811 arch/x86/kvm/x86.c 	events->smi.pending = vcpu->arch.smi_pending;
vcpu             3813 arch/x86/kvm/x86.c 		!!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK);
vcpu             3814 arch/x86/kvm/x86.c 	events->smi.latched_init = kvm_lapic_latched_init(vcpu);
vcpu             3819 arch/x86/kvm/x86.c 	if (vcpu->kvm->arch.exception_payload_enabled)
vcpu             3825 arch/x86/kvm/x86.c static void kvm_smm_changed(struct kvm_vcpu *vcpu);
vcpu             3827 arch/x86/kvm/x86.c static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
vcpu             3838 arch/x86/kvm/x86.c 		if (!vcpu->kvm->arch.exception_payload_enabled)
vcpu             3856 arch/x86/kvm/x86.c 	    vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED)
vcpu             3859 arch/x86/kvm/x86.c 	process_nmi(vcpu);
vcpu             3860 arch/x86/kvm/x86.c 	vcpu->arch.exception.injected = events->exception.injected;
vcpu             3861 arch/x86/kvm/x86.c 	vcpu->arch.exception.pending = events->exception.pending;
vcpu             3862 arch/x86/kvm/x86.c 	vcpu->arch.exception.nr = events->exception.nr;
vcpu             3863 arch/x86/kvm/x86.c 	vcpu->arch.exception.has_error_code = events->exception.has_error_code;
vcpu             3864 arch/x86/kvm/x86.c 	vcpu->arch.exception.error_code = events->exception.error_code;
vcpu             3865 arch/x86/kvm/x86.c 	vcpu->arch.exception.has_payload = events->exception_has_payload;
vcpu             3866 arch/x86/kvm/x86.c 	vcpu->arch.exception.payload = events->exception_payload;
vcpu             3868 arch/x86/kvm/x86.c 	vcpu->arch.interrupt.injected = events->interrupt.injected;
vcpu             3869 arch/x86/kvm/x86.c 	vcpu->arch.interrupt.nr = events->interrupt.nr;
vcpu             3870 arch/x86/kvm/x86.c 	vcpu->arch.interrupt.soft = events->interrupt.soft;
vcpu             3872 arch/x86/kvm/x86.c 		kvm_x86_ops->set_interrupt_shadow(vcpu,
vcpu             3875 arch/x86/kvm/x86.c 	vcpu->arch.nmi_injected = events->nmi.injected;
vcpu             3877 arch/x86/kvm/x86.c 		vcpu->arch.nmi_pending = events->nmi.pending;
vcpu             3878 arch/x86/kvm/x86.c 	kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
vcpu             3881 arch/x86/kvm/x86.c 	    lapic_in_kernel(vcpu))
vcpu             3882 arch/x86/kvm/x86.c 		vcpu->arch.apic->sipi_vector = events->sipi_vector;
vcpu             3885 arch/x86/kvm/x86.c 		if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) {
vcpu             3887 arch/x86/kvm/x86.c 				vcpu->arch.hflags |= HF_SMM_MASK;
vcpu             3889 arch/x86/kvm/x86.c 				vcpu->arch.hflags &= ~HF_SMM_MASK;
vcpu             3890 arch/x86/kvm/x86.c 			kvm_smm_changed(vcpu);
vcpu             3893 arch/x86/kvm/x86.c 		vcpu->arch.smi_pending = events->smi.pending;
vcpu             3897 arch/x86/kvm/x86.c 				vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
vcpu             3899 arch/x86/kvm/x86.c 				vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK;
vcpu             3900 arch/x86/kvm/x86.c 			if (lapic_in_kernel(vcpu)) {
vcpu             3902 arch/x86/kvm/x86.c 					set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
vcpu             3904 arch/x86/kvm/x86.c 					clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
vcpu             3909 arch/x86/kvm/x86.c 	kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu             3914 arch/x86/kvm/x86.c static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
vcpu             3919 arch/x86/kvm/x86.c 	memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
vcpu             3920 arch/x86/kvm/x86.c 	kvm_get_dr(vcpu, 6, &val);
vcpu             3922 arch/x86/kvm/x86.c 	dbgregs->dr7 = vcpu->arch.dr7;
vcpu             3927 arch/x86/kvm/x86.c static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
vcpu             3938 arch/x86/kvm/x86.c 	memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
vcpu             3939 arch/x86/kvm/x86.c 	kvm_update_dr0123(vcpu);
vcpu             3940 arch/x86/kvm/x86.c 	vcpu->arch.dr6 = dbgregs->dr6;
vcpu             3941 arch/x86/kvm/x86.c 	kvm_update_dr6(vcpu);
vcpu             3942 arch/x86/kvm/x86.c 	vcpu->arch.dr7 = dbgregs->dr7;
vcpu             3943 arch/x86/kvm/x86.c 	kvm_update_dr7(vcpu);
vcpu             3950 arch/x86/kvm/x86.c static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
vcpu             3952 arch/x86/kvm/x86.c 	struct xregs_state *xsave = &vcpu->arch.guest_fpu->state.xsave;
vcpu             3963 arch/x86/kvm/x86.c 	xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE;
vcpu             3981 arch/x86/kvm/x86.c 				memcpy(dest + offset, &vcpu->arch.pkru,
vcpu             3982 arch/x86/kvm/x86.c 				       sizeof(vcpu->arch.pkru));
vcpu             3992 arch/x86/kvm/x86.c static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
vcpu             3994 arch/x86/kvm/x86.c 	struct xregs_state *xsave = &vcpu->arch.guest_fpu->state.xsave;
vcpu             4024 arch/x86/kvm/x86.c 				memcpy(&vcpu->arch.pkru, src + offset,
vcpu             4025 arch/x86/kvm/x86.c 				       sizeof(vcpu->arch.pkru));
vcpu             4034 arch/x86/kvm/x86.c static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
vcpu             4039 arch/x86/kvm/x86.c 		fill_xsave((u8 *) guest_xsave->region, vcpu);
vcpu             4042 arch/x86/kvm/x86.c 			&vcpu->arch.guest_fpu->state.fxsave,
vcpu             4051 arch/x86/kvm/x86.c static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
vcpu             4067 arch/x86/kvm/x86.c 		load_xsave(vcpu, (u8 *)guest_xsave->region);
vcpu             4072 arch/x86/kvm/x86.c 		memcpy(&vcpu->arch.guest_fpu->state.fxsave,
vcpu             4078 arch/x86/kvm/x86.c static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
vcpu             4089 arch/x86/kvm/x86.c 	guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
vcpu             4092 arch/x86/kvm/x86.c static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
vcpu             4106 arch/x86/kvm/x86.c 			r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
vcpu             4121 arch/x86/kvm/x86.c static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
vcpu             4123 arch/x86/kvm/x86.c 	if (!vcpu->arch.pv_time_enabled)
vcpu             4125 arch/x86/kvm/x86.c 	vcpu->arch.pvclock_set_guest_stopped_request = true;
vcpu             4126 arch/x86/kvm/x86.c 	kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
vcpu             4130 arch/x86/kvm/x86.c static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
vcpu             4147 arch/x86/kvm/x86.c 		if (!irqchip_in_kernel(vcpu->kvm))
vcpu             4149 arch/x86/kvm/x86.c 		return kvm_hv_activate_synic(vcpu, cap->cap ==
vcpu             4154 arch/x86/kvm/x86.c 		r = kvm_x86_ops->nested_enable_evmcs(vcpu, &vmcs_version);
vcpu             4166 arch/x86/kvm/x86.c 		return kvm_x86_ops->enable_direct_tlbflush(vcpu);
vcpu             4176 arch/x86/kvm/x86.c 	struct kvm_vcpu *vcpu = filp->private_data;
vcpu             4186 arch/x86/kvm/x86.c 	vcpu_load(vcpu);
vcpu             4192 arch/x86/kvm/x86.c 		if (!lapic_in_kernel(vcpu))
vcpu             4200 arch/x86/kvm/x86.c 		r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic);
vcpu             4211 arch/x86/kvm/x86.c 		if (!lapic_in_kernel(vcpu))
vcpu             4219 arch/x86/kvm/x86.c 		r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
vcpu             4228 arch/x86/kvm/x86.c 		r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
vcpu             4232 arch/x86/kvm/x86.c 		r = kvm_vcpu_ioctl_nmi(vcpu);
vcpu             4236 arch/x86/kvm/x86.c 		r = kvm_vcpu_ioctl_smi(vcpu);
vcpu             4246 arch/x86/kvm/x86.c 		r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
vcpu             4256 arch/x86/kvm/x86.c 		r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
vcpu             4267 arch/x86/kvm/x86.c 		r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
vcpu             4278 arch/x86/kvm/x86.c 		int idx = srcu_read_lock(&vcpu->kvm->srcu);
vcpu             4279 arch/x86/kvm/x86.c 		r = msr_io(vcpu, argp, do_get_msr, 1);
vcpu             4280 arch/x86/kvm/x86.c 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
vcpu             4284 arch/x86/kvm/x86.c 		int idx = srcu_read_lock(&vcpu->kvm->srcu);
vcpu             4285 arch/x86/kvm/x86.c 		r = msr_io(vcpu, argp, do_set_msr, 0);
vcpu             4286 arch/x86/kvm/x86.c 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
vcpu             4295 arch/x86/kvm/x86.c 		r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
vcpu             4309 arch/x86/kvm/x86.c 		if (!lapic_in_kernel(vcpu))
vcpu             4314 arch/x86/kvm/x86.c 		idx = srcu_read_lock(&vcpu->kvm->srcu);
vcpu             4315 arch/x86/kvm/x86.c 		r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
vcpu             4316 arch/x86/kvm/x86.c 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
vcpu             4325 arch/x86/kvm/x86.c 		r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
vcpu             4334 arch/x86/kvm/x86.c 		r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
vcpu             4340 arch/x86/kvm/x86.c 		kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
vcpu             4355 arch/x86/kvm/x86.c 		r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
vcpu             4361 arch/x86/kvm/x86.c 		kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
vcpu             4378 arch/x86/kvm/x86.c 		r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
vcpu             4387 arch/x86/kvm/x86.c 		kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
vcpu             4402 arch/x86/kvm/x86.c 		r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
vcpu             4411 arch/x86/kvm/x86.c 		kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs);
vcpu             4427 arch/x86/kvm/x86.c 		r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
vcpu             4442 arch/x86/kvm/x86.c 		if (!kvm_set_tsc_khz(vcpu, user_tsc_khz))
vcpu             4448 arch/x86/kvm/x86.c 		r = vcpu->arch.virtual_tsc_khz;
vcpu             4452 arch/x86/kvm/x86.c 		r = kvm_set_guest_paused(vcpu);
vcpu             4461 arch/x86/kvm/x86.c 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
vcpu             4477 arch/x86/kvm/x86.c 		r = kvm_x86_ops->get_nested_state(vcpu, user_kvm_nested_state,
vcpu             4520 arch/x86/kvm/x86.c 		idx = srcu_read_lock(&vcpu->kvm->srcu);
vcpu             4521 arch/x86/kvm/x86.c 		r = kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state, &kvm_state);
vcpu             4522 arch/x86/kvm/x86.c 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
vcpu             4533 arch/x86/kvm/x86.c 		r = kvm_vcpu_ioctl_get_hv_cpuid(vcpu, &cpuid,
vcpu             4550 arch/x86/kvm/x86.c 	vcpu_put(vcpu);
vcpu             4554 arch/x86/kvm/x86.c vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
vcpu             5287 arch/x86/kvm/x86.c static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
vcpu             5295 arch/x86/kvm/x86.c 		if (!(lapic_in_kernel(vcpu) &&
vcpu             5296 arch/x86/kvm/x86.c 		      !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v))
vcpu             5297 arch/x86/kvm/x86.c 		    && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v))
vcpu             5308 arch/x86/kvm/x86.c static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
vcpu             5315 arch/x86/kvm/x86.c 		if (!(lapic_in_kernel(vcpu) &&
vcpu             5316 arch/x86/kvm/x86.c 		      !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev,
vcpu             5318 arch/x86/kvm/x86.c 		    && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v))
vcpu             5330 arch/x86/kvm/x86.c static void kvm_set_segment(struct kvm_vcpu *vcpu,
vcpu             5333 arch/x86/kvm/x86.c 	kvm_x86_ops->set_segment(vcpu, var, seg);
vcpu             5336 arch/x86/kvm/x86.c void kvm_get_segment(struct kvm_vcpu *vcpu,
vcpu             5339 arch/x86/kvm/x86.c 	kvm_x86_ops->get_segment(vcpu, var, seg);
vcpu             5342 arch/x86/kvm/x86.c gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
vcpu             5347 arch/x86/kvm/x86.c 	BUG_ON(!mmu_is_nested(vcpu));
vcpu             5351 arch/x86/kvm/x86.c 	t_gpa  = vcpu->arch.mmu->gva_to_gpa(vcpu, gpa, access, exception);
vcpu             5356 arch/x86/kvm/x86.c gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
vcpu             5359 arch/x86/kvm/x86.c 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
vcpu             5360 arch/x86/kvm/x86.c 	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
vcpu             5363 arch/x86/kvm/x86.c  gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
vcpu             5366 arch/x86/kvm/x86.c 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
vcpu             5368 arch/x86/kvm/x86.c 	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
vcpu             5371 arch/x86/kvm/x86.c gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
vcpu             5374 arch/x86/kvm/x86.c 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
vcpu             5376 arch/x86/kvm/x86.c 	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
vcpu             5380 arch/x86/kvm/x86.c gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
vcpu             5383 arch/x86/kvm/x86.c 	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception);
vcpu             5387 arch/x86/kvm/x86.c 				      struct kvm_vcpu *vcpu, u32 access,
vcpu             5394 arch/x86/kvm/x86.c 		gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
vcpu             5402 arch/x86/kvm/x86.c 		ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data,
vcpu             5422 arch/x86/kvm/x86.c 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
vcpu             5423 arch/x86/kvm/x86.c 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
vcpu             5428 arch/x86/kvm/x86.c 	gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK,
vcpu             5436 arch/x86/kvm/x86.c 	ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, val,
vcpu             5444 arch/x86/kvm/x86.c int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
vcpu             5448 arch/x86/kvm/x86.c 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
vcpu             5457 arch/x86/kvm/x86.c 	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
vcpu             5466 arch/x86/kvm/x86.c 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
vcpu             5469 arch/x86/kvm/x86.c 	if (!system && kvm_x86_ops->get_cpl(vcpu) == 3)
vcpu             5472 arch/x86/kvm/x86.c 	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception);
vcpu             5478 arch/x86/kvm/x86.c 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
vcpu             5479 arch/x86/kvm/x86.c 	int r = kvm_vcpu_read_guest(vcpu, addr, val, bytes);
vcpu             5485 arch/x86/kvm/x86.c 				      struct kvm_vcpu *vcpu, u32 access,
vcpu             5492 arch/x86/kvm/x86.c 		gpa_t gpa =  vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
vcpu             5501 arch/x86/kvm/x86.c 		ret = kvm_vcpu_write_guest(vcpu, gpa, data, towrite);
vcpu             5519 arch/x86/kvm/x86.c 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
vcpu             5522 arch/x86/kvm/x86.c 	if (!system && kvm_x86_ops->get_cpl(vcpu) == 3)
vcpu             5525 arch/x86/kvm/x86.c 	return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
vcpu             5529 arch/x86/kvm/x86.c int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
vcpu             5533 arch/x86/kvm/x86.c 	vcpu->arch.l1tf_flush_l1d = true;
vcpu             5542 arch/x86/kvm/x86.c 	return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
vcpu             5547 arch/x86/kvm/x86.c int handle_ud(struct kvm_vcpu *vcpu)
vcpu             5554 arch/x86/kvm/x86.c 	    kvm_read_guest_virt(vcpu, kvm_get_linear_rip(vcpu),
vcpu             5557 arch/x86/kvm/x86.c 		kvm_rip_write(vcpu, kvm_rip_read(vcpu) + sizeof(sig));
vcpu             5561 arch/x86/kvm/x86.c 	return kvm_emulate_instruction(vcpu, emul_type);
vcpu             5565 arch/x86/kvm/x86.c static int vcpu_is_mmio_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
vcpu             5572 arch/x86/kvm/x86.c 	if (vcpu_match_mmio_gpa(vcpu, gpa)) {
vcpu             5580 arch/x86/kvm/x86.c static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
vcpu             5584 arch/x86/kvm/x86.c 	u32 access = ((kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0)
vcpu             5592 arch/x86/kvm/x86.c 	if (vcpu_match_mmio_gva(vcpu, gva)
vcpu             5593 arch/x86/kvm/x86.c 	    && !permission_fault(vcpu, vcpu->arch.walk_mmu,
vcpu             5594 arch/x86/kvm/x86.c 				 vcpu->arch.mmio_access, 0, access)) {
vcpu             5595 arch/x86/kvm/x86.c 		*gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
vcpu             5601 arch/x86/kvm/x86.c 	*gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
vcpu             5606 arch/x86/kvm/x86.c 	return vcpu_is_mmio_gpa(vcpu, gva, *gpa, write);
vcpu             5609 arch/x86/kvm/x86.c int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
vcpu             5614 arch/x86/kvm/x86.c 	ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes);
vcpu             5617 arch/x86/kvm/x86.c 	kvm_page_track_write(vcpu, gpa, val, bytes);
vcpu             5622 arch/x86/kvm/x86.c 	int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val,
vcpu             5624 arch/x86/kvm/x86.c 	int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa,
vcpu             5626 arch/x86/kvm/x86.c 	int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
vcpu             5628 arch/x86/kvm/x86.c 	int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
vcpu             5633 arch/x86/kvm/x86.c static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
vcpu             5635 arch/x86/kvm/x86.c 	if (vcpu->mmio_read_completed) {
vcpu             5637 arch/x86/kvm/x86.c 			       vcpu->mmio_fragments[0].gpa, val);
vcpu             5638 arch/x86/kvm/x86.c 		vcpu->mmio_read_completed = 0;
vcpu             5645 arch/x86/kvm/x86.c static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
vcpu             5648 arch/x86/kvm/x86.c 	return !kvm_vcpu_read_guest(vcpu, gpa, val, bytes);
vcpu             5651 arch/x86/kvm/x86.c static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
vcpu             5654 arch/x86/kvm/x86.c 	return emulator_write_phys(vcpu, gpa, val, bytes);
vcpu             5657 arch/x86/kvm/x86.c static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
vcpu             5660 arch/x86/kvm/x86.c 	return vcpu_mmio_write(vcpu, gpa, bytes, val);
vcpu             5663 arch/x86/kvm/x86.c static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
vcpu             5670 arch/x86/kvm/x86.c static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
vcpu             5673 arch/x86/kvm/x86.c 	struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0];
vcpu             5675 arch/x86/kvm/x86.c 	memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len));
vcpu             5696 arch/x86/kvm/x86.c 				       struct kvm_vcpu *vcpu,
vcpu             5703 arch/x86/kvm/x86.c 	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
vcpu             5712 arch/x86/kvm/x86.c 	if (vcpu->arch.gpa_available &&
vcpu             5714 arch/x86/kvm/x86.c 	    (addr & ~PAGE_MASK) == (vcpu->arch.gpa_val & ~PAGE_MASK)) {
vcpu             5715 arch/x86/kvm/x86.c 		gpa = vcpu->arch.gpa_val;
vcpu             5716 arch/x86/kvm/x86.c 		ret = vcpu_is_mmio_gpa(vcpu, addr, gpa, write);
vcpu             5718 arch/x86/kvm/x86.c 		ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
vcpu             5723 arch/x86/kvm/x86.c 	if (!ret && ops->read_write_emulate(vcpu, gpa, val, bytes))
vcpu             5729 arch/x86/kvm/x86.c 	handled = ops->read_write_mmio(vcpu, gpa, bytes, val);
vcpu             5737 arch/x86/kvm/x86.c 	WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS);
vcpu             5738 arch/x86/kvm/x86.c 	frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++];
vcpu             5751 arch/x86/kvm/x86.c 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
vcpu             5756 arch/x86/kvm/x86.c 		  ops->read_write_prepare(vcpu, val, bytes))
vcpu             5759 arch/x86/kvm/x86.c 	vcpu->mmio_nr_fragments = 0;
vcpu             5767 arch/x86/kvm/x86.c 						 vcpu, ops);
vcpu             5779 arch/x86/kvm/x86.c 					 vcpu, ops);
vcpu             5783 arch/x86/kvm/x86.c 	if (!vcpu->mmio_nr_fragments)
vcpu             5786 arch/x86/kvm/x86.c 	gpa = vcpu->mmio_fragments[0].gpa;
vcpu             5788 arch/x86/kvm/x86.c 	vcpu->mmio_needed = 1;
vcpu             5789 arch/x86/kvm/x86.c 	vcpu->mmio_cur_fragment = 0;
vcpu             5791 arch/x86/kvm/x86.c 	vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len);
vcpu             5792 arch/x86/kvm/x86.c 	vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write;
vcpu             5793 arch/x86/kvm/x86.c 	vcpu->run->exit_reason = KVM_EXIT_MMIO;
vcpu             5794 arch/x86/kvm/x86.c 	vcpu->run->mmio.phys_addr = gpa;
vcpu             5796 arch/x86/kvm/x86.c 	return ops->read_write_exit_mmio(vcpu, gpa, val, bytes);
vcpu             5837 arch/x86/kvm/x86.c 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
vcpu             5846 arch/x86/kvm/x86.c 	gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
vcpu             5855 arch/x86/kvm/x86.c 	if (kvm_vcpu_map(vcpu, gpa_to_gfn(gpa), &map))
vcpu             5877 arch/x86/kvm/x86.c 	kvm_vcpu_unmap(vcpu, &map, true);
vcpu             5882 arch/x86/kvm/x86.c 	kvm_page_track_write(vcpu, gpa, new, bytes);
vcpu             5892 arch/x86/kvm/x86.c static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
vcpu             5896 arch/x86/kvm/x86.c 	for (i = 0; i < vcpu->arch.pio.count; i++) {
vcpu             5897 arch/x86/kvm/x86.c 		if (vcpu->arch.pio.in)
vcpu             5898 arch/x86/kvm/x86.c 			r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port,
vcpu             5899 arch/x86/kvm/x86.c 					    vcpu->arch.pio.size, pd);
vcpu             5901 arch/x86/kvm/x86.c 			r = kvm_io_bus_write(vcpu, KVM_PIO_BUS,
vcpu             5902 arch/x86/kvm/x86.c 					     vcpu->arch.pio.port, vcpu->arch.pio.size,
vcpu             5906 arch/x86/kvm/x86.c 		pd += vcpu->arch.pio.size;
vcpu             5911 arch/x86/kvm/x86.c static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
vcpu             5915 arch/x86/kvm/x86.c 	vcpu->arch.pio.port = port;
vcpu             5916 arch/x86/kvm/x86.c 	vcpu->arch.pio.in = in;
vcpu             5917 arch/x86/kvm/x86.c 	vcpu->arch.pio.count  = count;
vcpu             5918 arch/x86/kvm/x86.c 	vcpu->arch.pio.size = size;
vcpu             5920 arch/x86/kvm/x86.c 	if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
vcpu             5921 arch/x86/kvm/x86.c 		vcpu->arch.pio.count = 0;
vcpu             5925 arch/x86/kvm/x86.c 	vcpu->run->exit_reason = KVM_EXIT_IO;
vcpu             5926 arch/x86/kvm/x86.c 	vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
vcpu             5927 arch/x86/kvm/x86.c 	vcpu->run->io.size = size;
vcpu             5928 arch/x86/kvm/x86.c 	vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
vcpu             5929 arch/x86/kvm/x86.c 	vcpu->run->io.count = count;
vcpu             5930 arch/x86/kvm/x86.c 	vcpu->run->io.port = port;
vcpu             5939 arch/x86/kvm/x86.c 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
vcpu             5942 arch/x86/kvm/x86.c 	if (vcpu->arch.pio.count)
vcpu             5945 arch/x86/kvm/x86.c 	memset(vcpu->arch.pio_data, 0, size * count);
vcpu             5947 arch/x86/kvm/x86.c 	ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
vcpu             5950 arch/x86/kvm/x86.c 		memcpy(val, vcpu->arch.pio_data, size * count);
vcpu             5951 arch/x86/kvm/x86.c 		trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data);
vcpu             5952 arch/x86/kvm/x86.c 		vcpu->arch.pio.count = 0;
vcpu             5963 arch/x86/kvm/x86.c 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
vcpu             5965 arch/x86/kvm/x86.c 	memcpy(vcpu->arch.pio_data, val, size * count);
vcpu             5966 arch/x86/kvm/x86.c 	trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data);
vcpu             5967 arch/x86/kvm/x86.c 	return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
vcpu             5970 arch/x86/kvm/x86.c static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
vcpu             5972 arch/x86/kvm/x86.c 	return kvm_x86_ops->get_segment_base(vcpu, seg);
vcpu             5980 arch/x86/kvm/x86.c static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
vcpu             5982 arch/x86/kvm/x86.c 	if (!need_emulate_wbinvd(vcpu))
vcpu             5988 arch/x86/kvm/x86.c 		cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
vcpu             5989 arch/x86/kvm/x86.c 		smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
vcpu             5992 arch/x86/kvm/x86.c 		cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
vcpu             5998 arch/x86/kvm/x86.c int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
vcpu             6000 arch/x86/kvm/x86.c 	kvm_emulate_wbinvd_noskip(vcpu);
vcpu             6001 arch/x86/kvm/x86.c 	return kvm_skip_emulated_instruction(vcpu);
vcpu             6032 arch/x86/kvm/x86.c 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
vcpu             6037 arch/x86/kvm/x86.c 		value = kvm_read_cr0(vcpu);
vcpu             6040 arch/x86/kvm/x86.c 		value = vcpu->arch.cr2;
vcpu             6043 arch/x86/kvm/x86.c 		value = kvm_read_cr3(vcpu);
vcpu             6046 arch/x86/kvm/x86.c 		value = kvm_read_cr4(vcpu);
vcpu             6049 arch/x86/kvm/x86.c 		value = kvm_get_cr8(vcpu);
vcpu             6061 arch/x86/kvm/x86.c 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
vcpu             6066 arch/x86/kvm/x86.c 		res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
vcpu             6069 arch/x86/kvm/x86.c 		vcpu->arch.cr2 = val;
vcpu             6072 arch/x86/kvm/x86.c 		res = kvm_set_cr3(vcpu, val);
vcpu             6075 arch/x86/kvm/x86.c 		res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
vcpu             6078 arch/x86/kvm/x86.c 		res = kvm_set_cr8(vcpu, val);
vcpu             6159 arch/x86/kvm/x86.c 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
vcpu             6181 arch/x86/kvm/x86.c 	kvm_set_segment(vcpu, &var, seg);
vcpu             6199 arch/x86/kvm/x86.c 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
vcpu             6201 arch/x86/kvm/x86.c 	return vcpu->arch.smbase;
vcpu             6206 arch/x86/kvm/x86.c 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
vcpu             6208 arch/x86/kvm/x86.c 	vcpu->arch.smbase = smbase;
vcpu             6326 arch/x86/kvm/x86.c static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
vcpu             6328 arch/x86/kvm/x86.c 	u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu);
vcpu             6339 arch/x86/kvm/x86.c 		kvm_x86_ops->set_interrupt_shadow(vcpu, mask);
vcpu             6341 arch/x86/kvm/x86.c 			kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu             6345 arch/x86/kvm/x86.c static bool inject_emulated_exception(struct kvm_vcpu *vcpu)
vcpu             6347 arch/x86/kvm/x86.c 	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
vcpu             6349 arch/x86/kvm/x86.c 		return kvm_propagate_fault(vcpu, &ctxt->exception);
vcpu             6352 arch/x86/kvm/x86.c 		kvm_queue_exception_e(vcpu, ctxt->exception.vector,
vcpu             6355 arch/x86/kvm/x86.c 		kvm_queue_exception(vcpu, ctxt->exception.vector);
vcpu             6359 arch/x86/kvm/x86.c static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
vcpu             6361 arch/x86/kvm/x86.c 	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
vcpu             6364 arch/x86/kvm/x86.c 	kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
vcpu             6366 arch/x86/kvm/x86.c 	ctxt->eflags = kvm_get_rflags(vcpu);
vcpu             6369 arch/x86/kvm/x86.c 	ctxt->eip = kvm_rip_read(vcpu);
vcpu             6370 arch/x86/kvm/x86.c 	ctxt->mode = (!is_protmode(vcpu))		? X86EMUL_MODE_REAL :
vcpu             6372 arch/x86/kvm/x86.c 		     (cs_l && is_long_mode(vcpu))	? X86EMUL_MODE_PROT64 :
vcpu             6380 arch/x86/kvm/x86.c 	vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
vcpu             6383 arch/x86/kvm/x86.c void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
vcpu             6385 arch/x86/kvm/x86.c 	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
vcpu             6388 arch/x86/kvm/x86.c 	init_emulate_ctxt(vcpu);
vcpu             6396 arch/x86/kvm/x86.c 		kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
vcpu             6399 arch/x86/kvm/x86.c 		kvm_rip_write(vcpu, ctxt->eip);
vcpu             6400 arch/x86/kvm/x86.c 		kvm_set_rflags(vcpu, ctxt->eflags);
vcpu             6405 arch/x86/kvm/x86.c static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type)
vcpu             6407 arch/x86/kvm/x86.c 	++vcpu->stat.insn_emulation_fail;
vcpu             6408 arch/x86/kvm/x86.c 	trace_kvm_emulate_insn_failed(vcpu);
vcpu             6411 arch/x86/kvm/x86.c 		kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
vcpu             6416 arch/x86/kvm/x86.c 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu             6417 arch/x86/kvm/x86.c 		vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
vcpu             6418 arch/x86/kvm/x86.c 		vcpu->run->internal.ndata = 0;
vcpu             6422 arch/x86/kvm/x86.c 	kvm_queue_exception(vcpu, UD_VECTOR);
vcpu             6424 arch/x86/kvm/x86.c 	if (!is_guest_mode(vcpu) && kvm_x86_ops->get_cpl(vcpu) == 0) {
vcpu             6425 arch/x86/kvm/x86.c 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu             6426 arch/x86/kvm/x86.c 		vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
vcpu             6427 arch/x86/kvm/x86.c 		vcpu->run->internal.ndata = 0;
vcpu             6434 arch/x86/kvm/x86.c static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
vcpu             6444 arch/x86/kvm/x86.c 	if (WARN_ON_ONCE(is_guest_mode(vcpu)))
vcpu             6447 arch/x86/kvm/x86.c 	if (!vcpu->arch.mmu->direct_map) {
vcpu             6452 arch/x86/kvm/x86.c 		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
vcpu             6468 arch/x86/kvm/x86.c 	pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
vcpu             6480 arch/x86/kvm/x86.c 	if (vcpu->arch.mmu->direct_map) {
vcpu             6483 arch/x86/kvm/x86.c 		spin_lock(&vcpu->kvm->mmu_lock);
vcpu             6484 arch/x86/kvm/x86.c 		indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
vcpu             6485 arch/x86/kvm/x86.c 		spin_unlock(&vcpu->kvm->mmu_lock);
vcpu             6488 arch/x86/kvm/x86.c 			kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
vcpu             6498 arch/x86/kvm/x86.c 	kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
vcpu             6511 arch/x86/kvm/x86.c 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
vcpu             6514 arch/x86/kvm/x86.c 	last_retry_eip = vcpu->arch.last_retry_eip;
vcpu             6515 arch/x86/kvm/x86.c 	last_retry_addr = vcpu->arch.last_retry_addr;
vcpu             6530 arch/x86/kvm/x86.c 	vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
vcpu             6535 arch/x86/kvm/x86.c 	if (WARN_ON_ONCE(is_guest_mode(vcpu)))
vcpu             6544 arch/x86/kvm/x86.c 	vcpu->arch.last_retry_eip = ctxt->eip;
vcpu             6545 arch/x86/kvm/x86.c 	vcpu->arch.last_retry_addr = cr2_or_gpa;
vcpu             6547 arch/x86/kvm/x86.c 	if (!vcpu->arch.mmu->direct_map)
vcpu             6548 arch/x86/kvm/x86.c 		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
vcpu             6550 arch/x86/kvm/x86.c 	kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
vcpu             6555 arch/x86/kvm/x86.c static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
vcpu             6556 arch/x86/kvm/x86.c static int complete_emulated_pio(struct kvm_vcpu *vcpu);
vcpu             6558 arch/x86/kvm/x86.c static void kvm_smm_changed(struct kvm_vcpu *vcpu)
vcpu             6560 arch/x86/kvm/x86.c 	if (!(vcpu->arch.hflags & HF_SMM_MASK)) {
vcpu             6562 arch/x86/kvm/x86.c 		trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, false);
vcpu             6565 arch/x86/kvm/x86.c 		kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu             6568 arch/x86/kvm/x86.c 	kvm_mmu_reset_context(vcpu);
vcpu             6586 arch/x86/kvm/x86.c static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu)
vcpu             6588 arch/x86/kvm/x86.c 	struct kvm_run *kvm_run = vcpu->run;
vcpu             6590 arch/x86/kvm/x86.c 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
vcpu             6592 arch/x86/kvm/x86.c 		kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
vcpu             6597 arch/x86/kvm/x86.c 	kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BS);
vcpu             6601 arch/x86/kvm/x86.c int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
vcpu             6603 arch/x86/kvm/x86.c 	unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
vcpu             6606 arch/x86/kvm/x86.c 	r = kvm_x86_ops->skip_emulated_instruction(vcpu);
vcpu             6619 arch/x86/kvm/x86.c 		r = kvm_vcpu_do_singlestep(vcpu);
vcpu             6624 arch/x86/kvm/x86.c static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
vcpu             6626 arch/x86/kvm/x86.c 	if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) &&
vcpu             6627 arch/x86/kvm/x86.c 	    (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) {
vcpu             6628 arch/x86/kvm/x86.c 		struct kvm_run *kvm_run = vcpu->run;
vcpu             6629 arch/x86/kvm/x86.c 		unsigned long eip = kvm_get_linear_rip(vcpu);
vcpu             6631 arch/x86/kvm/x86.c 					   vcpu->arch.guest_debug_dr7,
vcpu             6632 arch/x86/kvm/x86.c 					   vcpu->arch.eff_db);
vcpu             6644 arch/x86/kvm/x86.c 	if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) &&
vcpu             6645 arch/x86/kvm/x86.c 	    !(kvm_get_rflags(vcpu) & X86_EFLAGS_RF)) {
vcpu             6646 arch/x86/kvm/x86.c 		unsigned long eip = kvm_get_linear_rip(vcpu);
vcpu             6648 arch/x86/kvm/x86.c 					   vcpu->arch.dr7,
vcpu             6649 arch/x86/kvm/x86.c 					   vcpu->arch.db);
vcpu             6652 arch/x86/kvm/x86.c 			vcpu->arch.dr6 &= ~DR_TRAP_BITS;
vcpu             6653 arch/x86/kvm/x86.c 			vcpu->arch.dr6 |= dr6 | DR6_RTM;
vcpu             6654 arch/x86/kvm/x86.c 			kvm_queue_exception(vcpu, DB_VECTOR);
vcpu             6694 arch/x86/kvm/x86.c int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
vcpu             6698 arch/x86/kvm/x86.c 	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
vcpu             6700 arch/x86/kvm/x86.c 	bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable;
vcpu             6702 arch/x86/kvm/x86.c 	vcpu->arch.l1tf_flush_l1d = true;
vcpu             6708 arch/x86/kvm/x86.c 	vcpu->arch.write_fault_to_shadow_pgtable = false;
vcpu             6709 arch/x86/kvm/x86.c 	kvm_clear_exception_queue(vcpu);
vcpu             6712 arch/x86/kvm/x86.c 		init_emulate_ctxt(vcpu);
vcpu             6721 arch/x86/kvm/x86.c 		    kvm_vcpu_check_breakpoint(vcpu, &r))
vcpu             6733 arch/x86/kvm/x86.c 		trace_kvm_emulate_insn_start(vcpu);
vcpu             6734 arch/x86/kvm/x86.c 		++vcpu->stat.insn_emulation;
vcpu             6738 arch/x86/kvm/x86.c 				kvm_queue_exception(vcpu, UD_VECTOR);
vcpu             6741 arch/x86/kvm/x86.c 			if (reexecute_instruction(vcpu, cr2_or_gpa,
vcpu             6752 arch/x86/kvm/x86.c 				inject_emulated_exception(vcpu);
vcpu             6755 arch/x86/kvm/x86.c 			return handle_emulation_failure(vcpu, emulation_type);
vcpu             6761 arch/x86/kvm/x86.c 		kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
vcpu             6771 arch/x86/kvm/x86.c 		kvm_rip_write(vcpu, ctxt->_eip);
vcpu             6773 arch/x86/kvm/x86.c 			kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF);
vcpu             6782 arch/x86/kvm/x86.c 	if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
vcpu             6783 arch/x86/kvm/x86.c 		vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
vcpu             6797 arch/x86/kvm/x86.c 		if (reexecute_instruction(vcpu, cr2_or_gpa, write_fault_to_spt,
vcpu             6801 arch/x86/kvm/x86.c 		return handle_emulation_failure(vcpu, emulation_type);
vcpu             6806 arch/x86/kvm/x86.c 		if (inject_emulated_exception(vcpu))
vcpu             6808 arch/x86/kvm/x86.c 	} else if (vcpu->arch.pio.count) {
vcpu             6809 arch/x86/kvm/x86.c 		if (!vcpu->arch.pio.in) {
vcpu             6811 arch/x86/kvm/x86.c 			vcpu->arch.pio.count = 0;
vcpu             6814 arch/x86/kvm/x86.c 			vcpu->arch.complete_userspace_io = complete_emulated_pio;
vcpu             6817 arch/x86/kvm/x86.c 	} else if (vcpu->mmio_needed) {
vcpu             6818 arch/x86/kvm/x86.c 		++vcpu->stat.mmio_exits;
vcpu             6820 arch/x86/kvm/x86.c 		if (!vcpu->mmio_is_write)
vcpu             6823 arch/x86/kvm/x86.c 		vcpu->arch.complete_userspace_io = complete_emulated_mmio;
vcpu             6830 arch/x86/kvm/x86.c 		unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
vcpu             6831 arch/x86/kvm/x86.c 		toggle_interruptibility(vcpu, ctxt->interruptibility);
vcpu             6832 arch/x86/kvm/x86.c 		vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
vcpu             6835 arch/x86/kvm/x86.c 			kvm_rip_write(vcpu, ctxt->eip);
vcpu             6836 arch/x86/kvm/x86.c 			if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
vcpu             6837 arch/x86/kvm/x86.c 				r = kvm_vcpu_do_singlestep(vcpu);
vcpu             6838 arch/x86/kvm/x86.c 			__kvm_set_rflags(vcpu, ctxt->eflags);
vcpu             6848 arch/x86/kvm/x86.c 			kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu             6850 arch/x86/kvm/x86.c 		vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
vcpu             6855 arch/x86/kvm/x86.c int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type)
vcpu             6857 arch/x86/kvm/x86.c 	return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
vcpu             6861 arch/x86/kvm/x86.c int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
vcpu             6864 arch/x86/kvm/x86.c 	return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len);
vcpu             6868 arch/x86/kvm/x86.c static int complete_fast_pio_out_port_0x7e(struct kvm_vcpu *vcpu)
vcpu             6870 arch/x86/kvm/x86.c 	vcpu->arch.pio.count = 0;
vcpu             6874 arch/x86/kvm/x86.c static int complete_fast_pio_out(struct kvm_vcpu *vcpu)
vcpu             6876 arch/x86/kvm/x86.c 	vcpu->arch.pio.count = 0;
vcpu             6878 arch/x86/kvm/x86.c 	if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip)))
vcpu             6881 arch/x86/kvm/x86.c 	return kvm_skip_emulated_instruction(vcpu);
vcpu             6884 arch/x86/kvm/x86.c static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
vcpu             6887 arch/x86/kvm/x86.c 	unsigned long val = kvm_rax_read(vcpu);
vcpu             6888 arch/x86/kvm/x86.c 	int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
vcpu             6898 arch/x86/kvm/x86.c 	    kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_OUT_7E_INC_RIP)) {
vcpu             6899 arch/x86/kvm/x86.c 		vcpu->arch.complete_userspace_io =
vcpu             6901 arch/x86/kvm/x86.c 		kvm_skip_emulated_instruction(vcpu);
vcpu             6903 arch/x86/kvm/x86.c 		vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
vcpu             6904 arch/x86/kvm/x86.c 		vcpu->arch.complete_userspace_io = complete_fast_pio_out;
vcpu             6909 arch/x86/kvm/x86.c static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
vcpu             6914 arch/x86/kvm/x86.c 	BUG_ON(vcpu->arch.pio.count != 1);
vcpu             6916 arch/x86/kvm/x86.c 	if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) {
vcpu             6917 arch/x86/kvm/x86.c 		vcpu->arch.pio.count = 0;
vcpu             6922 arch/x86/kvm/x86.c 	val = (vcpu->arch.pio.size < 4) ? kvm_rax_read(vcpu) : 0;
vcpu             6928 arch/x86/kvm/x86.c 	emulator_pio_in_emulated(&vcpu->arch.emulate_ctxt, vcpu->arch.pio.size,
vcpu             6929 arch/x86/kvm/x86.c 				 vcpu->arch.pio.port, &val, 1);
vcpu             6930 arch/x86/kvm/x86.c 	kvm_rax_write(vcpu, val);
vcpu             6932 arch/x86/kvm/x86.c 	return kvm_skip_emulated_instruction(vcpu);
vcpu             6935 arch/x86/kvm/x86.c static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
vcpu             6942 arch/x86/kvm/x86.c 	val = (size < 4) ? kvm_rax_read(vcpu) : 0;
vcpu             6944 arch/x86/kvm/x86.c 	ret = emulator_pio_in_emulated(&vcpu->arch.emulate_ctxt, size, port,
vcpu             6947 arch/x86/kvm/x86.c 		kvm_rax_write(vcpu, val);
vcpu             6951 arch/x86/kvm/x86.c 	vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
vcpu             6952 arch/x86/kvm/x86.c 	vcpu->arch.complete_userspace_io = complete_fast_pio_in;
vcpu             6957 arch/x86/kvm/x86.c int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in)
vcpu             6962 arch/x86/kvm/x86.c 		ret = kvm_fast_pio_in(vcpu, size, port);
vcpu             6964 arch/x86/kvm/x86.c 		ret = kvm_fast_pio_out(vcpu, size, port);
vcpu             6965 arch/x86/kvm/x86.c 	return ret && kvm_skip_emulated_instruction(vcpu);
vcpu             6993 arch/x86/kvm/x86.c 	struct kvm_vcpu *vcpu;
vcpu             7014 arch/x86/kvm/x86.c 		kvm_for_each_vcpu(cpu, vcpu, kvm)
vcpu             7015 arch/x86/kvm/x86.c 			kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
vcpu             7017 arch/x86/kvm/x86.c 		kvm_for_each_vcpu(cpu, vcpu, kvm)
vcpu             7018 arch/x86/kvm/x86.c 			kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
vcpu             7029 arch/x86/kvm/x86.c 	struct kvm_vcpu *vcpu;
vcpu             7075 arch/x86/kvm/x86.c 		kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu             7076 arch/x86/kvm/x86.c 			if (vcpu->cpu != cpu)
vcpu             7078 arch/x86/kvm/x86.c 			kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
vcpu             7079 arch/x86/kvm/x86.c 			if (vcpu->cpu != raw_smp_processor_id())
vcpu             7183 arch/x86/kvm/x86.c 	struct kvm_vcpu *vcpu = __this_cpu_read(current_vcpu);
vcpu             7185 arch/x86/kvm/x86.c 	kvm_make_request(KVM_REQ_PMI, vcpu);
vcpu             7187 arch/x86/kvm/x86.c 			(unsigned long *)&vcpu->arch.pmu.global_status);
vcpu             7202 arch/x86/kvm/x86.c 	struct kvm_vcpu *vcpu;
vcpu             7207 arch/x86/kvm/x86.c 		kvm_for_each_vcpu(i, vcpu, kvm)
vcpu             7208 arch/x86/kvm/x86.c 			kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
vcpu             7347 arch/x86/kvm/x86.c int kvm_vcpu_halt(struct kvm_vcpu *vcpu)
vcpu             7349 arch/x86/kvm/x86.c 	++vcpu->stat.halt_exits;
vcpu             7350 arch/x86/kvm/x86.c 	if (lapic_in_kernel(vcpu)) {
vcpu             7351 arch/x86/kvm/x86.c 		vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
vcpu             7354 arch/x86/kvm/x86.c 		vcpu->run->exit_reason = KVM_EXIT_HLT;
vcpu             7360 arch/x86/kvm/x86.c int kvm_emulate_halt(struct kvm_vcpu *vcpu)
vcpu             7362 arch/x86/kvm/x86.c 	int ret = kvm_skip_emulated_instruction(vcpu);
vcpu             7367 arch/x86/kvm/x86.c 	return kvm_vcpu_halt(vcpu) && ret;
vcpu             7372 arch/x86/kvm/x86.c static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr,
vcpu             7388 arch/x86/kvm/x86.c 	clock_pairing.tsc = kvm_read_l1_tsc(vcpu, cycle);
vcpu             7393 arch/x86/kvm/x86.c 	if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing,
vcpu             7420 arch/x86/kvm/x86.c void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu)
vcpu             7422 arch/x86/kvm/x86.c 	if (!lapic_in_kernel(vcpu)) {
vcpu             7423 arch/x86/kvm/x86.c 		WARN_ON_ONCE(vcpu->arch.apicv_active);
vcpu             7426 arch/x86/kvm/x86.c 	if (!vcpu->arch.apicv_active)
vcpu             7429 arch/x86/kvm/x86.c 	vcpu->arch.apicv_active = false;
vcpu             7430 arch/x86/kvm/x86.c 	kvm_x86_ops->refresh_apicv_exec_ctrl(vcpu);
vcpu             7442 arch/x86/kvm/x86.c 		target = map->phys_map[dest_id]->vcpu;
vcpu             7450 arch/x86/kvm/x86.c int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
vcpu             7455 arch/x86/kvm/x86.c 	if (kvm_hv_hypercall_enabled(vcpu->kvm))
vcpu             7456 arch/x86/kvm/x86.c 		return kvm_hv_hypercall(vcpu);
vcpu             7458 arch/x86/kvm/x86.c 	nr = kvm_rax_read(vcpu);
vcpu             7459 arch/x86/kvm/x86.c 	a0 = kvm_rbx_read(vcpu);
vcpu             7460 arch/x86/kvm/x86.c 	a1 = kvm_rcx_read(vcpu);
vcpu             7461 arch/x86/kvm/x86.c 	a2 = kvm_rdx_read(vcpu);
vcpu             7462 arch/x86/kvm/x86.c 	a3 = kvm_rsi_read(vcpu);
vcpu             7466 arch/x86/kvm/x86.c 	op_64_bit = is_64_bit_mode(vcpu);
vcpu             7475 arch/x86/kvm/x86.c 	if (kvm_x86_ops->get_cpl(vcpu) != 0) {
vcpu             7485 arch/x86/kvm/x86.c 		kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1);
vcpu             7486 arch/x86/kvm/x86.c 		kvm_sched_yield(vcpu->kvm, a1);
vcpu             7491 arch/x86/kvm/x86.c 		ret = kvm_pv_clock_pairing(vcpu, a0, a1);
vcpu             7495 arch/x86/kvm/x86.c 		ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit);
vcpu             7498 arch/x86/kvm/x86.c 		kvm_sched_yield(vcpu->kvm, a0);
vcpu             7508 arch/x86/kvm/x86.c 	kvm_rax_write(vcpu, ret);
vcpu             7510 arch/x86/kvm/x86.c 	++vcpu->stat.hypercalls;
vcpu             7511 arch/x86/kvm/x86.c 	return kvm_skip_emulated_instruction(vcpu);
vcpu             7517 arch/x86/kvm/x86.c 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
vcpu             7519 arch/x86/kvm/x86.c 	unsigned long rip = kvm_rip_read(vcpu);
vcpu             7521 arch/x86/kvm/x86.c 	kvm_x86_ops->patch_hypercall(vcpu, instruction);
vcpu             7527 arch/x86/kvm/x86.c static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
vcpu             7529 arch/x86/kvm/x86.c 	return vcpu->run->request_interrupt_window &&
vcpu             7530 arch/x86/kvm/x86.c 		likely(!pic_in_kernel(vcpu->kvm));
vcpu             7533 arch/x86/kvm/x86.c static void post_kvm_run_save(struct kvm_vcpu *vcpu)
vcpu             7535 arch/x86/kvm/x86.c 	struct kvm_run *kvm_run = vcpu->run;
vcpu             7537 arch/x86/kvm/x86.c 	kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
vcpu             7538 arch/x86/kvm/x86.c 	kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0;
vcpu             7539 arch/x86/kvm/x86.c 	kvm_run->cr8 = kvm_get_cr8(vcpu);
vcpu             7540 arch/x86/kvm/x86.c 	kvm_run->apic_base = kvm_get_apic_base(vcpu);
vcpu             7542 arch/x86/kvm/x86.c 		pic_in_kernel(vcpu->kvm) ||
vcpu             7543 arch/x86/kvm/x86.c 		kvm_vcpu_ready_for_interrupt_injection(vcpu);
vcpu             7546 arch/x86/kvm/x86.c static void update_cr8_intercept(struct kvm_vcpu *vcpu)
vcpu             7553 arch/x86/kvm/x86.c 	if (!lapic_in_kernel(vcpu))
vcpu             7556 arch/x86/kvm/x86.c 	if (vcpu->arch.apicv_active)
vcpu             7559 arch/x86/kvm/x86.c 	if (!vcpu->arch.apic->vapic_addr)
vcpu             7560 arch/x86/kvm/x86.c 		max_irr = kvm_lapic_find_highest_irr(vcpu);
vcpu             7567 arch/x86/kvm/x86.c 	tpr = kvm_lapic_get_cr8(vcpu);
vcpu             7569 arch/x86/kvm/x86.c 	kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
vcpu             7572 arch/x86/kvm/x86.c static int inject_pending_event(struct kvm_vcpu *vcpu)
vcpu             7578 arch/x86/kvm/x86.c 	if (vcpu->arch.exception.injected)
vcpu             7579 arch/x86/kvm/x86.c 		kvm_x86_ops->queue_exception(vcpu);
vcpu             7594 arch/x86/kvm/x86.c 	else if (!vcpu->arch.exception.pending) {
vcpu             7595 arch/x86/kvm/x86.c 		if (vcpu->arch.nmi_injected)
vcpu             7596 arch/x86/kvm/x86.c 			kvm_x86_ops->set_nmi(vcpu);
vcpu             7597 arch/x86/kvm/x86.c 		else if (vcpu->arch.interrupt.injected)
vcpu             7598 arch/x86/kvm/x86.c 			kvm_x86_ops->set_irq(vcpu);
vcpu             7607 arch/x86/kvm/x86.c 	if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
vcpu             7608 arch/x86/kvm/x86.c 		r = kvm_x86_ops->check_nested_events(vcpu);
vcpu             7614 arch/x86/kvm/x86.c 	if (vcpu->arch.exception.pending) {
vcpu             7615 arch/x86/kvm/x86.c 		trace_kvm_inj_exception(vcpu->arch.exception.nr,
vcpu             7616 arch/x86/kvm/x86.c 					vcpu->arch.exception.has_error_code,
vcpu             7617 arch/x86/kvm/x86.c 					vcpu->arch.exception.error_code);
vcpu             7619 arch/x86/kvm/x86.c 		WARN_ON_ONCE(vcpu->arch.exception.injected);
vcpu             7620 arch/x86/kvm/x86.c 		vcpu->arch.exception.pending = false;
vcpu             7621 arch/x86/kvm/x86.c 		vcpu->arch.exception.injected = true;
vcpu             7623 arch/x86/kvm/x86.c 		if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT)
vcpu             7624 arch/x86/kvm/x86.c 			__kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) |
vcpu             7627 arch/x86/kvm/x86.c 		if (vcpu->arch.exception.nr == DB_VECTOR) {
vcpu             7638 arch/x86/kvm/x86.c 			kvm_deliver_exception_payload(vcpu);
vcpu             7639 arch/x86/kvm/x86.c 			if (vcpu->arch.dr7 & DR7_GD) {
vcpu             7640 arch/x86/kvm/x86.c 				vcpu->arch.dr7 &= ~DR7_GD;
vcpu             7641 arch/x86/kvm/x86.c 				kvm_update_dr7(vcpu);
vcpu             7645 arch/x86/kvm/x86.c 		kvm_x86_ops->queue_exception(vcpu);
vcpu             7649 arch/x86/kvm/x86.c 	if (kvm_event_needs_reinjection(vcpu))
vcpu             7652 arch/x86/kvm/x86.c 	if (vcpu->arch.smi_pending && !is_smm(vcpu) &&
vcpu             7653 arch/x86/kvm/x86.c 	    kvm_x86_ops->smi_allowed(vcpu)) {
vcpu             7654 arch/x86/kvm/x86.c 		vcpu->arch.smi_pending = false;
vcpu             7655 arch/x86/kvm/x86.c 		++vcpu->arch.smi_count;
vcpu             7656 arch/x86/kvm/x86.c 		enter_smm(vcpu);
vcpu             7657 arch/x86/kvm/x86.c 	} else if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
vcpu             7658 arch/x86/kvm/x86.c 		--vcpu->arch.nmi_pending;
vcpu             7659 arch/x86/kvm/x86.c 		vcpu->arch.nmi_injected = true;
vcpu             7660 arch/x86/kvm/x86.c 		kvm_x86_ops->set_nmi(vcpu);
vcpu             7661 arch/x86/kvm/x86.c 	} else if (kvm_cpu_has_injectable_intr(vcpu)) {
vcpu             7669 arch/x86/kvm/x86.c 		if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
vcpu             7670 arch/x86/kvm/x86.c 			r = kvm_x86_ops->check_nested_events(vcpu);
vcpu             7674 arch/x86/kvm/x86.c 		if (kvm_x86_ops->interrupt_allowed(vcpu)) {
vcpu             7675 arch/x86/kvm/x86.c 			kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
vcpu             7677 arch/x86/kvm/x86.c 			kvm_x86_ops->set_irq(vcpu);
vcpu             7684 arch/x86/kvm/x86.c static void process_nmi(struct kvm_vcpu *vcpu)
vcpu             7693 arch/x86/kvm/x86.c 	if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected)
vcpu             7696 arch/x86/kvm/x86.c 	vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0);
vcpu             7697 arch/x86/kvm/x86.c 	vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit);
vcpu             7698 arch/x86/kvm/x86.c 	kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu             7715 arch/x86/kvm/x86.c static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu, char *buf, int n)
vcpu             7720 arch/x86/kvm/x86.c 	kvm_get_segment(vcpu, &seg, n);
vcpu             7734 arch/x86/kvm/x86.c static void enter_smm_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n)
vcpu             7740 arch/x86/kvm/x86.c 	kvm_get_segment(vcpu, &seg, n);
vcpu             7751 arch/x86/kvm/x86.c static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf)
vcpu             7758 arch/x86/kvm/x86.c 	put_smstate(u32, buf, 0x7ffc, kvm_read_cr0(vcpu));
vcpu             7759 arch/x86/kvm/x86.c 	put_smstate(u32, buf, 0x7ff8, kvm_read_cr3(vcpu));
vcpu             7760 arch/x86/kvm/x86.c 	put_smstate(u32, buf, 0x7ff4, kvm_get_rflags(vcpu));
vcpu             7761 arch/x86/kvm/x86.c 	put_smstate(u32, buf, 0x7ff0, kvm_rip_read(vcpu));
vcpu             7764 arch/x86/kvm/x86.c 		put_smstate(u32, buf, 0x7fd0 + i * 4, kvm_register_read(vcpu, i));
vcpu             7766 arch/x86/kvm/x86.c 	kvm_get_dr(vcpu, 6, &val);
vcpu             7768 arch/x86/kvm/x86.c 	kvm_get_dr(vcpu, 7, &val);
vcpu             7771 arch/x86/kvm/x86.c 	kvm_get_segment(vcpu, &seg, VCPU_SREG_TR);
vcpu             7777 arch/x86/kvm/x86.c 	kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR);
vcpu             7783 arch/x86/kvm/x86.c 	kvm_x86_ops->get_gdt(vcpu, &dt);
vcpu             7787 arch/x86/kvm/x86.c 	kvm_x86_ops->get_idt(vcpu, &dt);
vcpu             7792 arch/x86/kvm/x86.c 		enter_smm_save_seg_32(vcpu, buf, i);
vcpu             7794 arch/x86/kvm/x86.c 	put_smstate(u32, buf, 0x7f14, kvm_read_cr4(vcpu));
vcpu             7798 arch/x86/kvm/x86.c 	put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase);
vcpu             7802 arch/x86/kvm/x86.c static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
vcpu             7810 arch/x86/kvm/x86.c 		put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read(vcpu, i));
vcpu             7812 arch/x86/kvm/x86.c 	put_smstate(u64, buf, 0x7f78, kvm_rip_read(vcpu));
vcpu             7813 arch/x86/kvm/x86.c 	put_smstate(u32, buf, 0x7f70, kvm_get_rflags(vcpu));
vcpu             7815 arch/x86/kvm/x86.c 	kvm_get_dr(vcpu, 6, &val);
vcpu             7817 arch/x86/kvm/x86.c 	kvm_get_dr(vcpu, 7, &val);
vcpu             7820 arch/x86/kvm/x86.c 	put_smstate(u64, buf, 0x7f58, kvm_read_cr0(vcpu));
vcpu             7821 arch/x86/kvm/x86.c 	put_smstate(u64, buf, 0x7f50, kvm_read_cr3(vcpu));
vcpu             7822 arch/x86/kvm/x86.c 	put_smstate(u64, buf, 0x7f48, kvm_read_cr4(vcpu));
vcpu             7824 arch/x86/kvm/x86.c 	put_smstate(u32, buf, 0x7f00, vcpu->arch.smbase);
vcpu             7829 arch/x86/kvm/x86.c 	put_smstate(u64, buf, 0x7ed0, vcpu->arch.efer);
vcpu             7831 arch/x86/kvm/x86.c 	kvm_get_segment(vcpu, &seg, VCPU_SREG_TR);
vcpu             7837 arch/x86/kvm/x86.c 	kvm_x86_ops->get_idt(vcpu, &dt);
vcpu             7841 arch/x86/kvm/x86.c 	kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR);
vcpu             7847 arch/x86/kvm/x86.c 	kvm_x86_ops->get_gdt(vcpu, &dt);
vcpu             7852 arch/x86/kvm/x86.c 		enter_smm_save_seg_64(vcpu, buf, i);
vcpu             7856 arch/x86/kvm/x86.c static void enter_smm(struct kvm_vcpu *vcpu)
vcpu             7863 arch/x86/kvm/x86.c 	trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
vcpu             7866 arch/x86/kvm/x86.c 	if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
vcpu             7867 arch/x86/kvm/x86.c 		enter_smm_save_state_64(vcpu, buf);
vcpu             7870 arch/x86/kvm/x86.c 		enter_smm_save_state_32(vcpu, buf);
vcpu             7877 arch/x86/kvm/x86.c 	kvm_x86_ops->pre_enter_smm(vcpu, buf);
vcpu             7879 arch/x86/kvm/x86.c 	vcpu->arch.hflags |= HF_SMM_MASK;
vcpu             7880 arch/x86/kvm/x86.c 	kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf));
vcpu             7882 arch/x86/kvm/x86.c 	if (kvm_x86_ops->get_nmi_mask(vcpu))
vcpu             7883 arch/x86/kvm/x86.c 		vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
vcpu             7885 arch/x86/kvm/x86.c 		kvm_x86_ops->set_nmi_mask(vcpu, true);
vcpu             7887 arch/x86/kvm/x86.c 	kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
vcpu             7888 arch/x86/kvm/x86.c 	kvm_rip_write(vcpu, 0x8000);
vcpu             7890 arch/x86/kvm/x86.c 	cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG);
vcpu             7891 arch/x86/kvm/x86.c 	kvm_x86_ops->set_cr0(vcpu, cr0);
vcpu             7892 arch/x86/kvm/x86.c 	vcpu->arch.cr0 = cr0;
vcpu             7894 arch/x86/kvm/x86.c 	kvm_x86_ops->set_cr4(vcpu, 0);
vcpu             7898 arch/x86/kvm/x86.c 	kvm_x86_ops->set_idt(vcpu, &dt);
vcpu             7900 arch/x86/kvm/x86.c 	__kvm_set_dr(vcpu, 7, DR7_FIXED_1);
vcpu             7902 arch/x86/kvm/x86.c 	cs.selector = (vcpu->arch.smbase >> 4) & 0xffff;
vcpu             7903 arch/x86/kvm/x86.c 	cs.base = vcpu->arch.smbase;
vcpu             7920 arch/x86/kvm/x86.c 	kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
vcpu             7921 arch/x86/kvm/x86.c 	kvm_set_segment(vcpu, &ds, VCPU_SREG_DS);
vcpu             7922 arch/x86/kvm/x86.c 	kvm_set_segment(vcpu, &ds, VCPU_SREG_ES);
vcpu             7923 arch/x86/kvm/x86.c 	kvm_set_segment(vcpu, &ds, VCPU_SREG_FS);
vcpu             7924 arch/x86/kvm/x86.c 	kvm_set_segment(vcpu, &ds, VCPU_SREG_GS);
vcpu             7925 arch/x86/kvm/x86.c 	kvm_set_segment(vcpu, &ds, VCPU_SREG_SS);
vcpu             7928 arch/x86/kvm/x86.c 	if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
vcpu             7929 arch/x86/kvm/x86.c 		kvm_x86_ops->set_efer(vcpu, 0);
vcpu             7932 arch/x86/kvm/x86.c 	kvm_update_cpuid(vcpu);
vcpu             7933 arch/x86/kvm/x86.c 	kvm_mmu_reset_context(vcpu);
vcpu             7936 arch/x86/kvm/x86.c static void process_smi(struct kvm_vcpu *vcpu)
vcpu             7938 arch/x86/kvm/x86.c 	vcpu->arch.smi_pending = true;
vcpu             7939 arch/x86/kvm/x86.c 	kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu             7947 arch/x86/kvm/x86.c static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
vcpu             7949 arch/x86/kvm/x86.c 	if (!kvm_apic_present(vcpu))
vcpu             7952 arch/x86/kvm/x86.c 	bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256);
vcpu             7954 arch/x86/kvm/x86.c 	if (irqchip_split(vcpu->kvm))
vcpu             7955 arch/x86/kvm/x86.c 		kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
vcpu             7957 arch/x86/kvm/x86.c 		if (vcpu->arch.apicv_active)
vcpu             7958 arch/x86/kvm/x86.c 			kvm_x86_ops->sync_pir_to_irr(vcpu);
vcpu             7959 arch/x86/kvm/x86.c 		if (ioapic_in_kernel(vcpu->kvm))
vcpu             7960 arch/x86/kvm/x86.c 			kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
vcpu             7963 arch/x86/kvm/x86.c 	if (is_guest_mode(vcpu))
vcpu             7964 arch/x86/kvm/x86.c 		vcpu->arch.load_eoi_exitmap_pending = true;
vcpu             7966 arch/x86/kvm/x86.c 		kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
vcpu             7969 arch/x86/kvm/x86.c static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
vcpu             7973 arch/x86/kvm/x86.c 	if (!kvm_apic_hw_enabled(vcpu->arch.apic))
vcpu             7976 arch/x86/kvm/x86.c 	bitmap_or((ulong *)eoi_exit_bitmap, vcpu->arch.ioapic_handled_vectors,
vcpu             7977 arch/x86/kvm/x86.c 		  vcpu_to_synic(vcpu)->vec_bitmap, 256);
vcpu             7978 arch/x86/kvm/x86.c 	kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
vcpu             7995 arch/x86/kvm/x86.c void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
vcpu             7999 arch/x86/kvm/x86.c 	if (!lapic_in_kernel(vcpu))
vcpu             8005 arch/x86/kvm/x86.c 	page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
vcpu             8008 arch/x86/kvm/x86.c 	kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page));
vcpu             8018 arch/x86/kvm/x86.c void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu)
vcpu             8020 arch/x86/kvm/x86.c 	smp_send_reschedule(vcpu->cpu);
vcpu             8029 arch/x86/kvm/x86.c static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
vcpu             8033 arch/x86/kvm/x86.c 		dm_request_for_irq_injection(vcpu) &&
vcpu             8034 arch/x86/kvm/x86.c 		kvm_cpu_accept_dm_intr(vcpu);
vcpu             8038 arch/x86/kvm/x86.c 	if (kvm_request_pending(vcpu)) {
vcpu             8039 arch/x86/kvm/x86.c 		if (kvm_check_request(KVM_REQ_GET_VMCS12_PAGES, vcpu)) {
vcpu             8040 arch/x86/kvm/x86.c 			if (unlikely(!kvm_x86_ops->get_vmcs12_pages(vcpu))) {
vcpu             8045 arch/x86/kvm/x86.c 		if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
vcpu             8046 arch/x86/kvm/x86.c 			kvm_mmu_unload(vcpu);
vcpu             8047 arch/x86/kvm/x86.c 		if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
vcpu             8048 arch/x86/kvm/x86.c 			__kvm_migrate_timers(vcpu);
vcpu             8049 arch/x86/kvm/x86.c 		if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu))
vcpu             8050 arch/x86/kvm/x86.c 			kvm_gen_update_masterclock(vcpu->kvm);
vcpu             8051 arch/x86/kvm/x86.c 		if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu))
vcpu             8052 arch/x86/kvm/x86.c 			kvm_gen_kvmclock_update(vcpu);
vcpu             8053 arch/x86/kvm/x86.c 		if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
vcpu             8054 arch/x86/kvm/x86.c 			r = kvm_guest_time_update(vcpu);
vcpu             8058 arch/x86/kvm/x86.c 		if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
vcpu             8059 arch/x86/kvm/x86.c 			kvm_mmu_sync_roots(vcpu);
vcpu             8060 arch/x86/kvm/x86.c 		if (kvm_check_request(KVM_REQ_LOAD_CR3, vcpu))
vcpu             8061 arch/x86/kvm/x86.c 			kvm_mmu_load_cr3(vcpu);
vcpu             8062 arch/x86/kvm/x86.c 		if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
vcpu             8063 arch/x86/kvm/x86.c 			kvm_vcpu_flush_tlb(vcpu, true);
vcpu             8064 arch/x86/kvm/x86.c 		if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
vcpu             8065 arch/x86/kvm/x86.c 			vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
vcpu             8069 arch/x86/kvm/x86.c 		if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
vcpu             8070 arch/x86/kvm/x86.c 			vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
vcpu             8071 arch/x86/kvm/x86.c 			vcpu->mmio_needed = 0;
vcpu             8075 arch/x86/kvm/x86.c 		if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
vcpu             8077 arch/x86/kvm/x86.c 			vcpu->arch.apf.halted = true;
vcpu             8081 arch/x86/kvm/x86.c 		if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
vcpu             8082 arch/x86/kvm/x86.c 			record_steal_time(vcpu);
vcpu             8083 arch/x86/kvm/x86.c 		if (kvm_check_request(KVM_REQ_SMI, vcpu))
vcpu             8084 arch/x86/kvm/x86.c 			process_smi(vcpu);
vcpu             8085 arch/x86/kvm/x86.c 		if (kvm_check_request(KVM_REQ_NMI, vcpu))
vcpu             8086 arch/x86/kvm/x86.c 			process_nmi(vcpu);
vcpu             8087 arch/x86/kvm/x86.c 		if (kvm_check_request(KVM_REQ_PMU, vcpu))
vcpu             8088 arch/x86/kvm/x86.c 			kvm_pmu_handle_event(vcpu);
vcpu             8089 arch/x86/kvm/x86.c 		if (kvm_check_request(KVM_REQ_PMI, vcpu))
vcpu             8090 arch/x86/kvm/x86.c 			kvm_pmu_deliver_pmi(vcpu);
vcpu             8091 arch/x86/kvm/x86.c 		if (kvm_check_request(KVM_REQ_IOAPIC_EOI_EXIT, vcpu)) {
vcpu             8092 arch/x86/kvm/x86.c 			BUG_ON(vcpu->arch.pending_ioapic_eoi > 255);
vcpu             8093 arch/x86/kvm/x86.c 			if (test_bit(vcpu->arch.pending_ioapic_eoi,
vcpu             8094 arch/x86/kvm/x86.c 				     vcpu->arch.ioapic_handled_vectors)) {
vcpu             8095 arch/x86/kvm/x86.c 				vcpu->run->exit_reason = KVM_EXIT_IOAPIC_EOI;
vcpu             8096 arch/x86/kvm/x86.c 				vcpu->run->eoi.vector =
vcpu             8097 arch/x86/kvm/x86.c 						vcpu->arch.pending_ioapic_eoi;
vcpu             8102 arch/x86/kvm/x86.c 		if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu))
vcpu             8103 arch/x86/kvm/x86.c 			vcpu_scan_ioapic(vcpu);
vcpu             8104 arch/x86/kvm/x86.c 		if (kvm_check_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu))
vcpu             8105 arch/x86/kvm/x86.c 			vcpu_load_eoi_exitmap(vcpu);
vcpu             8106 arch/x86/kvm/x86.c 		if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu))
vcpu             8107 arch/x86/kvm/x86.c 			kvm_vcpu_reload_apic_access_page(vcpu);
vcpu             8108 arch/x86/kvm/x86.c 		if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) {
vcpu             8109 arch/x86/kvm/x86.c 			vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
vcpu             8110 arch/x86/kvm/x86.c 			vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH;
vcpu             8114 arch/x86/kvm/x86.c 		if (kvm_check_request(KVM_REQ_HV_RESET, vcpu)) {
vcpu             8115 arch/x86/kvm/x86.c 			vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
vcpu             8116 arch/x86/kvm/x86.c 			vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET;
vcpu             8120 arch/x86/kvm/x86.c 		if (kvm_check_request(KVM_REQ_HV_EXIT, vcpu)) {
vcpu             8121 arch/x86/kvm/x86.c 			vcpu->run->exit_reason = KVM_EXIT_HYPERV;
vcpu             8122 arch/x86/kvm/x86.c 			vcpu->run->hyperv = vcpu->arch.hyperv.exit;
vcpu             8132 arch/x86/kvm/x86.c 		if (kvm_check_request(KVM_REQ_HV_STIMER, vcpu))
vcpu             8133 arch/x86/kvm/x86.c 			kvm_hv_process_stimers(vcpu);
vcpu             8136 arch/x86/kvm/x86.c 	if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
vcpu             8137 arch/x86/kvm/x86.c 		++vcpu->stat.req_event;
vcpu             8138 arch/x86/kvm/x86.c 		kvm_apic_accept_events(vcpu);
vcpu             8139 arch/x86/kvm/x86.c 		if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
vcpu             8144 arch/x86/kvm/x86.c 		if (inject_pending_event(vcpu) != 0)
vcpu             8161 arch/x86/kvm/x86.c 			if (vcpu->arch.smi_pending && !is_smm(vcpu))
vcpu             8162 arch/x86/kvm/x86.c 				if (!kvm_x86_ops->enable_smi_window(vcpu))
vcpu             8164 arch/x86/kvm/x86.c 			if (vcpu->arch.nmi_pending)
vcpu             8165 arch/x86/kvm/x86.c 				kvm_x86_ops->enable_nmi_window(vcpu);
vcpu             8166 arch/x86/kvm/x86.c 			if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
vcpu             8167 arch/x86/kvm/x86.c 				kvm_x86_ops->enable_irq_window(vcpu);
vcpu             8168 arch/x86/kvm/x86.c 			WARN_ON(vcpu->arch.exception.pending);
vcpu             8171 arch/x86/kvm/x86.c 		if (kvm_lapic_enabled(vcpu)) {
vcpu             8172 arch/x86/kvm/x86.c 			update_cr8_intercept(vcpu);
vcpu             8173 arch/x86/kvm/x86.c 			kvm_lapic_sync_to_vapic(vcpu);
vcpu             8177 arch/x86/kvm/x86.c 	r = kvm_mmu_reload(vcpu);
vcpu             8184 arch/x86/kvm/x86.c 	kvm_x86_ops->prepare_guest_switch(vcpu);
vcpu             8192 arch/x86/kvm/x86.c 	vcpu->mode = IN_GUEST_MODE;
vcpu             8194 arch/x86/kvm/x86.c 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
vcpu             8214 arch/x86/kvm/x86.c 	if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active)
vcpu             8215 arch/x86/kvm/x86.c 		kvm_x86_ops->sync_pir_to_irr(vcpu);
vcpu             8217 arch/x86/kvm/x86.c 	if (vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu)
vcpu             8219 arch/x86/kvm/x86.c 		vcpu->mode = OUTSIDE_GUEST_MODE;
vcpu             8223 arch/x86/kvm/x86.c 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
vcpu             8229 arch/x86/kvm/x86.c 		kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu             8230 arch/x86/kvm/x86.c 		kvm_x86_ops->request_immediate_exit(vcpu);
vcpu             8233 arch/x86/kvm/x86.c 	trace_kvm_entry(vcpu->vcpu_id);
vcpu             8237 arch/x86/kvm/x86.c 	vcpu->arch.host_pkru = read_pkru();
vcpu             8243 arch/x86/kvm/x86.c 	if (unlikely(vcpu->arch.switch_db_regs)) {
vcpu             8245 arch/x86/kvm/x86.c 		set_debugreg(vcpu->arch.eff_db[0], 0);
vcpu             8246 arch/x86/kvm/x86.c 		set_debugreg(vcpu->arch.eff_db[1], 1);
vcpu             8247 arch/x86/kvm/x86.c 		set_debugreg(vcpu->arch.eff_db[2], 2);
vcpu             8248 arch/x86/kvm/x86.c 		set_debugreg(vcpu->arch.eff_db[3], 3);
vcpu             8249 arch/x86/kvm/x86.c 		set_debugreg(vcpu->arch.dr6, 6);
vcpu             8250 arch/x86/kvm/x86.c 		vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
vcpu             8253 arch/x86/kvm/x86.c 	kvm_x86_ops->run(vcpu);
vcpu             8261 arch/x86/kvm/x86.c 	if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) {
vcpu             8262 arch/x86/kvm/x86.c 		WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
vcpu             8263 arch/x86/kvm/x86.c 		kvm_x86_ops->sync_dirty_debug_regs(vcpu);
vcpu             8264 arch/x86/kvm/x86.c 		kvm_update_dr0123(vcpu);
vcpu             8265 arch/x86/kvm/x86.c 		kvm_update_dr6(vcpu);
vcpu             8266 arch/x86/kvm/x86.c 		kvm_update_dr7(vcpu);
vcpu             8267 arch/x86/kvm/x86.c 		vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
vcpu             8280 arch/x86/kvm/x86.c 	vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
vcpu             8282 arch/x86/kvm/x86.c 	vcpu->mode = OUTSIDE_GUEST_MODE;
vcpu             8285 arch/x86/kvm/x86.c 	kvm_x86_ops->handle_exit_irqoff(vcpu);
vcpu             8294 arch/x86/kvm/x86.c 	kvm_before_interrupt(vcpu);
vcpu             8296 arch/x86/kvm/x86.c 	++vcpu->stat.exits;
vcpu             8298 arch/x86/kvm/x86.c 	kvm_after_interrupt(vcpu);
vcpu             8301 arch/x86/kvm/x86.c 	if (lapic_in_kernel(vcpu)) {
vcpu             8302 arch/x86/kvm/x86.c 		s64 delta = vcpu->arch.apic->lapic_timer.advance_expire_delta;
vcpu             8304 arch/x86/kvm/x86.c 			trace_kvm_wait_lapic_expire(vcpu->vcpu_id, delta);
vcpu             8305 arch/x86/kvm/x86.c 			vcpu->arch.apic->lapic_timer.advance_expire_delta = S64_MIN;
vcpu             8312 arch/x86/kvm/x86.c 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
vcpu             8318 arch/x86/kvm/x86.c 		unsigned long rip = kvm_rip_read(vcpu);
vcpu             8322 arch/x86/kvm/x86.c 	if (unlikely(vcpu->arch.tsc_always_catchup))
vcpu             8323 arch/x86/kvm/x86.c 		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
vcpu             8325 arch/x86/kvm/x86.c 	if (vcpu->arch.apic_attention)
vcpu             8326 arch/x86/kvm/x86.c 		kvm_lapic_sync_from_vapic(vcpu);
vcpu             8328 arch/x86/kvm/x86.c 	vcpu->arch.gpa_available = false;
vcpu             8329 arch/x86/kvm/x86.c 	r = kvm_x86_ops->handle_exit(vcpu);
vcpu             8333 arch/x86/kvm/x86.c 	kvm_x86_ops->cancel_injection(vcpu);
vcpu             8334 arch/x86/kvm/x86.c 	if (unlikely(vcpu->arch.apic_attention))
vcpu             8335 arch/x86/kvm/x86.c 		kvm_lapic_sync_from_vapic(vcpu);
vcpu             8340 arch/x86/kvm/x86.c static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
vcpu             8342 arch/x86/kvm/x86.c 	if (!kvm_arch_vcpu_runnable(vcpu) &&
vcpu             8343 arch/x86/kvm/x86.c 	    (!kvm_x86_ops->pre_block || kvm_x86_ops->pre_block(vcpu) == 0)) {
vcpu             8344 arch/x86/kvm/x86.c 		srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
vcpu             8345 arch/x86/kvm/x86.c 		kvm_vcpu_block(vcpu);
vcpu             8346 arch/x86/kvm/x86.c 		vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
vcpu             8349 arch/x86/kvm/x86.c 			kvm_x86_ops->post_block(vcpu);
vcpu             8351 arch/x86/kvm/x86.c 		if (!kvm_check_request(KVM_REQ_UNHALT, vcpu))
vcpu             8355 arch/x86/kvm/x86.c 	kvm_apic_accept_events(vcpu);
vcpu             8356 arch/x86/kvm/x86.c 	switch(vcpu->arch.mp_state) {
vcpu             8358 arch/x86/kvm/x86.c 		vcpu->arch.pv.pv_unhalted = false;
vcpu             8359 arch/x86/kvm/x86.c 		vcpu->arch.mp_state =
vcpu             8363 arch/x86/kvm/x86.c 		vcpu->arch.apf.halted = false;
vcpu             8374 arch/x86/kvm/x86.c static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
vcpu             8376 arch/x86/kvm/x86.c 	if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events)
vcpu             8377 arch/x86/kvm/x86.c 		kvm_x86_ops->check_nested_events(vcpu);
vcpu             8379 arch/x86/kvm/x86.c 	return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
vcpu             8380 arch/x86/kvm/x86.c 		!vcpu->arch.apf.halted);
vcpu             8383 arch/x86/kvm/x86.c static int vcpu_run(struct kvm_vcpu *vcpu)
vcpu             8386 arch/x86/kvm/x86.c 	struct kvm *kvm = vcpu->kvm;
vcpu             8388 arch/x86/kvm/x86.c 	vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
vcpu             8389 arch/x86/kvm/x86.c 	vcpu->arch.l1tf_flush_l1d = true;
vcpu             8392 arch/x86/kvm/x86.c 		if (kvm_vcpu_running(vcpu)) {
vcpu             8393 arch/x86/kvm/x86.c 			r = vcpu_enter_guest(vcpu);
vcpu             8395 arch/x86/kvm/x86.c 			r = vcpu_block(kvm, vcpu);
vcpu             8401 arch/x86/kvm/x86.c 		kvm_clear_request(KVM_REQ_PENDING_TIMER, vcpu);
vcpu             8402 arch/x86/kvm/x86.c 		if (kvm_cpu_has_pending_timer(vcpu))
vcpu             8403 arch/x86/kvm/x86.c 			kvm_inject_pending_timer_irqs(vcpu);
vcpu             8405 arch/x86/kvm/x86.c 		if (dm_request_for_irq_injection(vcpu) &&
vcpu             8406 arch/x86/kvm/x86.c 			kvm_vcpu_ready_for_interrupt_injection(vcpu)) {
vcpu             8408 arch/x86/kvm/x86.c 			vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
vcpu             8409 arch/x86/kvm/x86.c 			++vcpu->stat.request_irq_exits;
vcpu             8413 arch/x86/kvm/x86.c 		kvm_check_async_pf_completion(vcpu);
vcpu             8417 arch/x86/kvm/x86.c 			vcpu->run->exit_reason = KVM_EXIT_INTR;
vcpu             8418 arch/x86/kvm/x86.c 			++vcpu->stat.signal_exits;
vcpu             8422 arch/x86/kvm/x86.c 			srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
vcpu             8424 arch/x86/kvm/x86.c 			vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
vcpu             8428 arch/x86/kvm/x86.c 	srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
vcpu             8433 arch/x86/kvm/x86.c static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
vcpu             8437 arch/x86/kvm/x86.c 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
vcpu             8438 arch/x86/kvm/x86.c 	r = kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
vcpu             8439 arch/x86/kvm/x86.c 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
vcpu             8443 arch/x86/kvm/x86.c static int complete_emulated_pio(struct kvm_vcpu *vcpu)
vcpu             8445 arch/x86/kvm/x86.c 	BUG_ON(!vcpu->arch.pio.count);
vcpu             8447 arch/x86/kvm/x86.c 	return complete_emulated_io(vcpu);
vcpu             8468 arch/x86/kvm/x86.c static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
vcpu             8470 arch/x86/kvm/x86.c 	struct kvm_run *run = vcpu->run;
vcpu             8474 arch/x86/kvm/x86.c 	BUG_ON(!vcpu->mmio_needed);
vcpu             8477 arch/x86/kvm/x86.c 	frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment];
vcpu             8479 arch/x86/kvm/x86.c 	if (!vcpu->mmio_is_write)
vcpu             8485 arch/x86/kvm/x86.c 		vcpu->mmio_cur_fragment++;
vcpu             8493 arch/x86/kvm/x86.c 	if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) {
vcpu             8494 arch/x86/kvm/x86.c 		vcpu->mmio_needed = 0;
vcpu             8497 arch/x86/kvm/x86.c 		if (vcpu->mmio_is_write)
vcpu             8499 arch/x86/kvm/x86.c 		vcpu->mmio_read_completed = 1;
vcpu             8500 arch/x86/kvm/x86.c 		return complete_emulated_io(vcpu);
vcpu             8505 arch/x86/kvm/x86.c 	if (vcpu->mmio_is_write)
vcpu             8508 arch/x86/kvm/x86.c 	run->mmio.is_write = vcpu->mmio_is_write;
vcpu             8509 arch/x86/kvm/x86.c 	vcpu->arch.complete_userspace_io = complete_emulated_mmio;
vcpu             8527 arch/x86/kvm/x86.c static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
vcpu             8531 arch/x86/kvm/x86.c 	kvm_save_current_fpu(vcpu->arch.user_fpu);
vcpu             8534 arch/x86/kvm/x86.c 	__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu->state,
vcpu             8544 arch/x86/kvm/x86.c static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
vcpu             8548 arch/x86/kvm/x86.c 	kvm_save_current_fpu(vcpu->arch.guest_fpu);
vcpu             8550 arch/x86/kvm/x86.c 	copy_kernel_to_fpregs(&vcpu->arch.user_fpu->state);
vcpu             8555 arch/x86/kvm/x86.c 	++vcpu->stat.fpu_reload;
vcpu             8559 arch/x86/kvm/x86.c int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
vcpu             8563 arch/x86/kvm/x86.c 	vcpu_load(vcpu);
vcpu             8564 arch/x86/kvm/x86.c 	kvm_sigset_activate(vcpu);
vcpu             8565 arch/x86/kvm/x86.c 	kvm_load_guest_fpu(vcpu);
vcpu             8567 arch/x86/kvm/x86.c 	if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
vcpu             8572 arch/x86/kvm/x86.c 		kvm_vcpu_block(vcpu);
vcpu             8573 arch/x86/kvm/x86.c 		kvm_apic_accept_events(vcpu);
vcpu             8574 arch/x86/kvm/x86.c 		kvm_clear_request(KVM_REQ_UNHALT, vcpu);
vcpu             8578 arch/x86/kvm/x86.c 			vcpu->run->exit_reason = KVM_EXIT_INTR;
vcpu             8579 arch/x86/kvm/x86.c 			++vcpu->stat.signal_exits;
vcpu             8584 arch/x86/kvm/x86.c 	if (vcpu->run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) {
vcpu             8589 arch/x86/kvm/x86.c 	if (vcpu->run->kvm_dirty_regs) {
vcpu             8590 arch/x86/kvm/x86.c 		r = sync_regs(vcpu);
vcpu             8596 arch/x86/kvm/x86.c 	if (!lapic_in_kernel(vcpu)) {
vcpu             8597 arch/x86/kvm/x86.c 		if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
vcpu             8603 arch/x86/kvm/x86.c 	if (unlikely(vcpu->arch.complete_userspace_io)) {
vcpu             8604 arch/x86/kvm/x86.c 		int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io;
vcpu             8605 arch/x86/kvm/x86.c 		vcpu->arch.complete_userspace_io = NULL;
vcpu             8606 arch/x86/kvm/x86.c 		r = cui(vcpu);
vcpu             8610 arch/x86/kvm/x86.c 		WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed);
vcpu             8615 arch/x86/kvm/x86.c 		r = vcpu_run(vcpu);
vcpu             8618 arch/x86/kvm/x86.c 	kvm_put_guest_fpu(vcpu);
vcpu             8619 arch/x86/kvm/x86.c 	if (vcpu->run->kvm_valid_regs)
vcpu             8620 arch/x86/kvm/x86.c 		store_regs(vcpu);
vcpu             8621 arch/x86/kvm/x86.c 	post_kvm_run_save(vcpu);
vcpu             8622 arch/x86/kvm/x86.c 	kvm_sigset_deactivate(vcpu);
vcpu             8624 arch/x86/kvm/x86.c 	vcpu_put(vcpu);
vcpu             8628 arch/x86/kvm/x86.c static void __get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu             8630 arch/x86/kvm/x86.c 	if (vcpu->arch.emulate_regs_need_sync_to_vcpu) {
vcpu             8638 arch/x86/kvm/x86.c 		emulator_writeback_register_cache(&vcpu->arch.emulate_ctxt);
vcpu             8639 arch/x86/kvm/x86.c 		vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
vcpu             8641 arch/x86/kvm/x86.c 	regs->rax = kvm_rax_read(vcpu);
vcpu             8642 arch/x86/kvm/x86.c 	regs->rbx = kvm_rbx_read(vcpu);
vcpu             8643 arch/x86/kvm/x86.c 	regs->rcx = kvm_rcx_read(vcpu);
vcpu             8644 arch/x86/kvm/x86.c 	regs->rdx = kvm_rdx_read(vcpu);
vcpu             8645 arch/x86/kvm/x86.c 	regs->rsi = kvm_rsi_read(vcpu);
vcpu             8646 arch/x86/kvm/x86.c 	regs->rdi = kvm_rdi_read(vcpu);
vcpu             8647 arch/x86/kvm/x86.c 	regs->rsp = kvm_rsp_read(vcpu);
vcpu             8648 arch/x86/kvm/x86.c 	regs->rbp = kvm_rbp_read(vcpu);
vcpu             8650 arch/x86/kvm/x86.c 	regs->r8 = kvm_r8_read(vcpu);
vcpu             8651 arch/x86/kvm/x86.c 	regs->r9 = kvm_r9_read(vcpu);
vcpu             8652 arch/x86/kvm/x86.c 	regs->r10 = kvm_r10_read(vcpu);
vcpu             8653 arch/x86/kvm/x86.c 	regs->r11 = kvm_r11_read(vcpu);
vcpu             8654 arch/x86/kvm/x86.c 	regs->r12 = kvm_r12_read(vcpu);
vcpu             8655 arch/x86/kvm/x86.c 	regs->r13 = kvm_r13_read(vcpu);
vcpu             8656 arch/x86/kvm/x86.c 	regs->r14 = kvm_r14_read(vcpu);
vcpu             8657 arch/x86/kvm/x86.c 	regs->r15 = kvm_r15_read(vcpu);
vcpu             8660 arch/x86/kvm/x86.c 	regs->rip = kvm_rip_read(vcpu);
vcpu             8661 arch/x86/kvm/x86.c 	regs->rflags = kvm_get_rflags(vcpu);
vcpu             8664 arch/x86/kvm/x86.c int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu             8666 arch/x86/kvm/x86.c 	vcpu_load(vcpu);
vcpu             8667 arch/x86/kvm/x86.c 	__get_regs(vcpu, regs);
vcpu             8668 arch/x86/kvm/x86.c 	vcpu_put(vcpu);
vcpu             8672 arch/x86/kvm/x86.c static void __set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu             8674 arch/x86/kvm/x86.c 	vcpu->arch.emulate_regs_need_sync_from_vcpu = true;
vcpu             8675 arch/x86/kvm/x86.c 	vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
vcpu             8677 arch/x86/kvm/x86.c 	kvm_rax_write(vcpu, regs->rax);
vcpu             8678 arch/x86/kvm/x86.c 	kvm_rbx_write(vcpu, regs->rbx);
vcpu             8679 arch/x86/kvm/x86.c 	kvm_rcx_write(vcpu, regs->rcx);
vcpu             8680 arch/x86/kvm/x86.c 	kvm_rdx_write(vcpu, regs->rdx);
vcpu             8681 arch/x86/kvm/x86.c 	kvm_rsi_write(vcpu, regs->rsi);
vcpu             8682 arch/x86/kvm/x86.c 	kvm_rdi_write(vcpu, regs->rdi);
vcpu             8683 arch/x86/kvm/x86.c 	kvm_rsp_write(vcpu, regs->rsp);
vcpu             8684 arch/x86/kvm/x86.c 	kvm_rbp_write(vcpu, regs->rbp);
vcpu             8686 arch/x86/kvm/x86.c 	kvm_r8_write(vcpu, regs->r8);
vcpu             8687 arch/x86/kvm/x86.c 	kvm_r9_write(vcpu, regs->r9);
vcpu             8688 arch/x86/kvm/x86.c 	kvm_r10_write(vcpu, regs->r10);
vcpu             8689 arch/x86/kvm/x86.c 	kvm_r11_write(vcpu, regs->r11);
vcpu             8690 arch/x86/kvm/x86.c 	kvm_r12_write(vcpu, regs->r12);
vcpu             8691 arch/x86/kvm/x86.c 	kvm_r13_write(vcpu, regs->r13);
vcpu             8692 arch/x86/kvm/x86.c 	kvm_r14_write(vcpu, regs->r14);
vcpu             8693 arch/x86/kvm/x86.c 	kvm_r15_write(vcpu, regs->r15);
vcpu             8696 arch/x86/kvm/x86.c 	kvm_rip_write(vcpu, regs->rip);
vcpu             8697 arch/x86/kvm/x86.c 	kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED);
vcpu             8699 arch/x86/kvm/x86.c 	vcpu->arch.exception.pending = false;
vcpu             8701 arch/x86/kvm/x86.c 	kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu             8704 arch/x86/kvm/x86.c int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu             8706 arch/x86/kvm/x86.c 	vcpu_load(vcpu);
vcpu             8707 arch/x86/kvm/x86.c 	__set_regs(vcpu, regs);
vcpu             8708 arch/x86/kvm/x86.c 	vcpu_put(vcpu);
vcpu             8712 arch/x86/kvm/x86.c void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
vcpu             8716 arch/x86/kvm/x86.c 	kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
vcpu             8722 arch/x86/kvm/x86.c static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
vcpu             8726 arch/x86/kvm/x86.c 	kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
vcpu             8727 arch/x86/kvm/x86.c 	kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
vcpu             8728 arch/x86/kvm/x86.c 	kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
vcpu             8729 arch/x86/kvm/x86.c 	kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
vcpu             8730 arch/x86/kvm/x86.c 	kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
vcpu             8731 arch/x86/kvm/x86.c 	kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
vcpu             8733 arch/x86/kvm/x86.c 	kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
vcpu             8734 arch/x86/kvm/x86.c 	kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
vcpu             8736 arch/x86/kvm/x86.c 	kvm_x86_ops->get_idt(vcpu, &dt);
vcpu             8739 arch/x86/kvm/x86.c 	kvm_x86_ops->get_gdt(vcpu, &dt);
vcpu             8743 arch/x86/kvm/x86.c 	sregs->cr0 = kvm_read_cr0(vcpu);
vcpu             8744 arch/x86/kvm/x86.c 	sregs->cr2 = vcpu->arch.cr2;
vcpu             8745 arch/x86/kvm/x86.c 	sregs->cr3 = kvm_read_cr3(vcpu);
vcpu             8746 arch/x86/kvm/x86.c 	sregs->cr4 = kvm_read_cr4(vcpu);
vcpu             8747 arch/x86/kvm/x86.c 	sregs->cr8 = kvm_get_cr8(vcpu);
vcpu             8748 arch/x86/kvm/x86.c 	sregs->efer = vcpu->arch.efer;
vcpu             8749 arch/x86/kvm/x86.c 	sregs->apic_base = kvm_get_apic_base(vcpu);
vcpu             8753 arch/x86/kvm/x86.c 	if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft)
vcpu             8754 arch/x86/kvm/x86.c 		set_bit(vcpu->arch.interrupt.nr,
vcpu             8758 arch/x86/kvm/x86.c int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
vcpu             8761 arch/x86/kvm/x86.c 	vcpu_load(vcpu);
vcpu             8762 arch/x86/kvm/x86.c 	__get_sregs(vcpu, sregs);
vcpu             8763 arch/x86/kvm/x86.c 	vcpu_put(vcpu);
vcpu             8767 arch/x86/kvm/x86.c int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
vcpu             8770 arch/x86/kvm/x86.c 	vcpu_load(vcpu);
vcpu             8772 arch/x86/kvm/x86.c 		kvm_load_guest_fpu(vcpu);
vcpu             8774 arch/x86/kvm/x86.c 	kvm_apic_accept_events(vcpu);
vcpu             8775 arch/x86/kvm/x86.c 	if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED &&
vcpu             8776 arch/x86/kvm/x86.c 					vcpu->arch.pv.pv_unhalted)
vcpu             8779 arch/x86/kvm/x86.c 		mp_state->mp_state = vcpu->arch.mp_state;
vcpu             8782 arch/x86/kvm/x86.c 		kvm_put_guest_fpu(vcpu);
vcpu             8783 arch/x86/kvm/x86.c 	vcpu_put(vcpu);
vcpu             8787 arch/x86/kvm/x86.c int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
vcpu             8792 arch/x86/kvm/x86.c 	vcpu_load(vcpu);
vcpu             8794 arch/x86/kvm/x86.c 	if (!lapic_in_kernel(vcpu) &&
vcpu             8799 arch/x86/kvm/x86.c 	if ((is_smm(vcpu) || vcpu->arch.smi_pending) &&
vcpu             8805 arch/x86/kvm/x86.c 		vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
vcpu             8806 arch/x86/kvm/x86.c 		set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events);
vcpu             8808 arch/x86/kvm/x86.c 		vcpu->arch.mp_state = mp_state->mp_state;
vcpu             8809 arch/x86/kvm/x86.c 	kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu             8813 arch/x86/kvm/x86.c 	vcpu_put(vcpu);
vcpu             8817 arch/x86/kvm/x86.c int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
vcpu             8820 arch/x86/kvm/x86.c 	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
vcpu             8823 arch/x86/kvm/x86.c 	init_emulate_ctxt(vcpu);
vcpu             8828 arch/x86/kvm/x86.c 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu             8829 arch/x86/kvm/x86.c 		vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
vcpu             8830 arch/x86/kvm/x86.c 		vcpu->run->internal.ndata = 0;
vcpu             8834 arch/x86/kvm/x86.c 	kvm_rip_write(vcpu, ctxt->eip);
vcpu             8835 arch/x86/kvm/x86.c 	kvm_set_rflags(vcpu, ctxt->eflags);
vcpu             8836 arch/x86/kvm/x86.c 	kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu             8841 arch/x86/kvm/x86.c static int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
vcpu             8861 arch/x86/kvm/x86.c 	return kvm_valid_cr4(vcpu, sregs->cr4);
vcpu             8864 arch/x86/kvm/x86.c static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
vcpu             8873 arch/x86/kvm/x86.c 	if (kvm_valid_sregs(vcpu, sregs))
vcpu             8878 arch/x86/kvm/x86.c 	if (kvm_set_apic_base(vcpu, &apic_base_msr))
vcpu             8883 arch/x86/kvm/x86.c 	kvm_x86_ops->set_idt(vcpu, &dt);
vcpu             8886 arch/x86/kvm/x86.c 	kvm_x86_ops->set_gdt(vcpu, &dt);
vcpu             8888 arch/x86/kvm/x86.c 	vcpu->arch.cr2 = sregs->cr2;
vcpu             8889 arch/x86/kvm/x86.c 	mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
vcpu             8890 arch/x86/kvm/x86.c 	vcpu->arch.cr3 = sregs->cr3;
vcpu             8891 arch/x86/kvm/x86.c 	__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
vcpu             8893 arch/x86/kvm/x86.c 	kvm_set_cr8(vcpu, sregs->cr8);
vcpu             8895 arch/x86/kvm/x86.c 	mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
vcpu             8896 arch/x86/kvm/x86.c 	kvm_x86_ops->set_efer(vcpu, sregs->efer);
vcpu             8898 arch/x86/kvm/x86.c 	mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
vcpu             8899 arch/x86/kvm/x86.c 	kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
vcpu             8900 arch/x86/kvm/x86.c 	vcpu->arch.cr0 = sregs->cr0;
vcpu             8902 arch/x86/kvm/x86.c 	mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
vcpu             8903 arch/x86/kvm/x86.c 	cpuid_update_needed |= ((kvm_read_cr4(vcpu) ^ sregs->cr4) &
vcpu             8905 arch/x86/kvm/x86.c 	kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
vcpu             8907 arch/x86/kvm/x86.c 		kvm_update_cpuid(vcpu);
vcpu             8909 arch/x86/kvm/x86.c 	idx = srcu_read_lock(&vcpu->kvm->srcu);
vcpu             8910 arch/x86/kvm/x86.c 	if (is_pae_paging(vcpu)) {
vcpu             8911 arch/x86/kvm/x86.c 		load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
vcpu             8914 arch/x86/kvm/x86.c 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
vcpu             8917 arch/x86/kvm/x86.c 		kvm_mmu_reset_context(vcpu);
vcpu             8923 arch/x86/kvm/x86.c 		kvm_queue_interrupt(vcpu, pending_vec, false);
vcpu             8927 arch/x86/kvm/x86.c 	kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
vcpu             8928 arch/x86/kvm/x86.c 	kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
vcpu             8929 arch/x86/kvm/x86.c 	kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
vcpu             8930 arch/x86/kvm/x86.c 	kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
vcpu             8931 arch/x86/kvm/x86.c 	kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
vcpu             8932 arch/x86/kvm/x86.c 	kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
vcpu             8934 arch/x86/kvm/x86.c 	kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
vcpu             8935 arch/x86/kvm/x86.c 	kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
vcpu             8937 arch/x86/kvm/x86.c 	update_cr8_intercept(vcpu);
vcpu             8940 arch/x86/kvm/x86.c 	if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
vcpu             8942 arch/x86/kvm/x86.c 	    !is_protmode(vcpu))
vcpu             8943 arch/x86/kvm/x86.c 		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
vcpu             8945 arch/x86/kvm/x86.c 	kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu             8952 arch/x86/kvm/x86.c int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
vcpu             8957 arch/x86/kvm/x86.c 	vcpu_load(vcpu);
vcpu             8958 arch/x86/kvm/x86.c 	ret = __set_sregs(vcpu, sregs);
vcpu             8959 arch/x86/kvm/x86.c 	vcpu_put(vcpu);
vcpu             8963 arch/x86/kvm/x86.c int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
vcpu             8969 arch/x86/kvm/x86.c 	vcpu_load(vcpu);
vcpu             8973 arch/x86/kvm/x86.c 		if (vcpu->arch.exception.pending)
vcpu             8976 arch/x86/kvm/x86.c 			kvm_queue_exception(vcpu, DB_VECTOR);
vcpu             8978 arch/x86/kvm/x86.c 			kvm_queue_exception(vcpu, BP_VECTOR);
vcpu             8985 arch/x86/kvm/x86.c 	rflags = kvm_get_rflags(vcpu);
vcpu             8987 arch/x86/kvm/x86.c 	vcpu->guest_debug = dbg->control;
vcpu             8988 arch/x86/kvm/x86.c 	if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
vcpu             8989 arch/x86/kvm/x86.c 		vcpu->guest_debug = 0;
vcpu             8991 arch/x86/kvm/x86.c 	if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
vcpu             8993 arch/x86/kvm/x86.c 			vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
vcpu             8994 arch/x86/kvm/x86.c 		vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7];
vcpu             8997 arch/x86/kvm/x86.c 			vcpu->arch.eff_db[i] = vcpu->arch.db[i];
vcpu             8999 arch/x86/kvm/x86.c 	kvm_update_dr7(vcpu);
vcpu             9001 arch/x86/kvm/x86.c 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
vcpu             9002 arch/x86/kvm/x86.c 		vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
vcpu             9003 arch/x86/kvm/x86.c 			get_segment_base(vcpu, VCPU_SREG_CS);
vcpu             9009 arch/x86/kvm/x86.c 	kvm_set_rflags(vcpu, rflags);
vcpu             9011 arch/x86/kvm/x86.c 	kvm_x86_ops->update_bp_intercept(vcpu);
vcpu             9016 arch/x86/kvm/x86.c 	vcpu_put(vcpu);
vcpu             9023 arch/x86/kvm/x86.c int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
vcpu             9030 arch/x86/kvm/x86.c 	vcpu_load(vcpu);
vcpu             9032 arch/x86/kvm/x86.c 	idx = srcu_read_lock(&vcpu->kvm->srcu);
vcpu             9033 arch/x86/kvm/x86.c 	gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
vcpu             9034 arch/x86/kvm/x86.c 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
vcpu             9040 arch/x86/kvm/x86.c 	vcpu_put(vcpu);
vcpu             9044 arch/x86/kvm/x86.c int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
vcpu             9048 arch/x86/kvm/x86.c 	vcpu_load(vcpu);
vcpu             9050 arch/x86/kvm/x86.c 	fxsave = &vcpu->arch.guest_fpu->state.fxsave;
vcpu             9060 arch/x86/kvm/x86.c 	vcpu_put(vcpu);
vcpu             9064 arch/x86/kvm/x86.c int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
vcpu             9068 arch/x86/kvm/x86.c 	vcpu_load(vcpu);
vcpu             9070 arch/x86/kvm/x86.c 	fxsave = &vcpu->arch.guest_fpu->state.fxsave;
vcpu             9081 arch/x86/kvm/x86.c 	vcpu_put(vcpu);
vcpu             9085 arch/x86/kvm/x86.c static void store_regs(struct kvm_vcpu *vcpu)
vcpu             9089 arch/x86/kvm/x86.c 	if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_REGS)
vcpu             9090 arch/x86/kvm/x86.c 		__get_regs(vcpu, &vcpu->run->s.regs.regs);
vcpu             9092 arch/x86/kvm/x86.c 	if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_SREGS)
vcpu             9093 arch/x86/kvm/x86.c 		__get_sregs(vcpu, &vcpu->run->s.regs.sregs);
vcpu             9095 arch/x86/kvm/x86.c 	if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_EVENTS)
vcpu             9097 arch/x86/kvm/x86.c 				vcpu, &vcpu->run->s.regs.events);
vcpu             9100 arch/x86/kvm/x86.c static int sync_regs(struct kvm_vcpu *vcpu)
vcpu             9102 arch/x86/kvm/x86.c 	if (vcpu->run->kvm_dirty_regs & ~KVM_SYNC_X86_VALID_FIELDS)
vcpu             9105 arch/x86/kvm/x86.c 	if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_REGS) {
vcpu             9106 arch/x86/kvm/x86.c 		__set_regs(vcpu, &vcpu->run->s.regs.regs);
vcpu             9107 arch/x86/kvm/x86.c 		vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_REGS;
vcpu             9109 arch/x86/kvm/x86.c 	if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_SREGS) {
vcpu             9110 arch/x86/kvm/x86.c 		if (__set_sregs(vcpu, &vcpu->run->s.regs.sregs))
vcpu             9112 arch/x86/kvm/x86.c 		vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_SREGS;
vcpu             9114 arch/x86/kvm/x86.c 	if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_EVENTS) {
vcpu             9116 arch/x86/kvm/x86.c 				vcpu, &vcpu->run->s.regs.events))
vcpu             9118 arch/x86/kvm/x86.c 		vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_EVENTS;
vcpu             9124 arch/x86/kvm/x86.c static void fx_init(struct kvm_vcpu *vcpu)
vcpu             9126 arch/x86/kvm/x86.c 	fpstate_init(&vcpu->arch.guest_fpu->state);
vcpu             9128 arch/x86/kvm/x86.c 		vcpu->arch.guest_fpu->state.xsave.header.xcomp_bv =
vcpu             9134 arch/x86/kvm/x86.c 	vcpu->arch.xcr0 = XFEATURE_MASK_FP;
vcpu             9136 arch/x86/kvm/x86.c 	vcpu->arch.cr0 |= X86_CR0_ET;
vcpu             9139 arch/x86/kvm/x86.c void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
vcpu             9141 arch/x86/kvm/x86.c 	void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
vcpu             9142 arch/x86/kvm/x86.c 	struct gfn_to_pfn_cache *cache = &vcpu->arch.st.cache;
vcpu             9146 arch/x86/kvm/x86.c 	kvmclock_reset(vcpu);
vcpu             9148 arch/x86/kvm/x86.c 	kvm_x86_ops->vcpu_free(vcpu);
vcpu             9155 arch/x86/kvm/x86.c 	struct kvm_vcpu *vcpu;
vcpu             9162 arch/x86/kvm/x86.c 	vcpu = kvm_x86_ops->vcpu_create(kvm, id);
vcpu             9164 arch/x86/kvm/x86.c 	return vcpu;
vcpu             9167 arch/x86/kvm/x86.c int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu             9169 arch/x86/kvm/x86.c 	vcpu->arch.arch_capabilities = kvm_get_arch_capabilities();
vcpu             9170 arch/x86/kvm/x86.c 	vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT;
vcpu             9171 arch/x86/kvm/x86.c 	kvm_vcpu_mtrr_init(vcpu);
vcpu             9172 arch/x86/kvm/x86.c 	vcpu_load(vcpu);
vcpu             9173 arch/x86/kvm/x86.c 	kvm_vcpu_reset(vcpu, false);
vcpu             9174 arch/x86/kvm/x86.c 	kvm_init_mmu(vcpu, false);
vcpu             9175 arch/x86/kvm/x86.c 	vcpu_put(vcpu);
vcpu             9179 arch/x86/kvm/x86.c void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
vcpu             9182 arch/x86/kvm/x86.c 	struct kvm *kvm = vcpu->kvm;
vcpu             9184 arch/x86/kvm/x86.c 	kvm_hv_vcpu_postcreate(vcpu);
vcpu             9186 arch/x86/kvm/x86.c 	if (mutex_lock_killable(&vcpu->mutex))
vcpu             9188 arch/x86/kvm/x86.c 	vcpu_load(vcpu);
vcpu             9192 arch/x86/kvm/x86.c 	kvm_write_tsc(vcpu, &msr);
vcpu             9193 arch/x86/kvm/x86.c 	vcpu_put(vcpu);
vcpu             9196 arch/x86/kvm/x86.c 	vcpu->arch.msr_kvm_poll_control = 1;
vcpu             9198 arch/x86/kvm/x86.c 	mutex_unlock(&vcpu->mutex);
vcpu             9207 arch/x86/kvm/x86.c void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
vcpu             9209 arch/x86/kvm/x86.c 	kvm_arch_vcpu_free(vcpu);
vcpu             9212 arch/x86/kvm/x86.c void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
vcpu             9214 arch/x86/kvm/x86.c 	kvm_lapic_reset(vcpu, init_event);
vcpu             9216 arch/x86/kvm/x86.c 	vcpu->arch.hflags = 0;
vcpu             9218 arch/x86/kvm/x86.c 	vcpu->arch.smi_pending = 0;
vcpu             9219 arch/x86/kvm/x86.c 	vcpu->arch.smi_count = 0;
vcpu             9220 arch/x86/kvm/x86.c 	atomic_set(&vcpu->arch.nmi_queued, 0);
vcpu             9221 arch/x86/kvm/x86.c 	vcpu->arch.nmi_pending = 0;
vcpu             9222 arch/x86/kvm/x86.c 	vcpu->arch.nmi_injected = false;
vcpu             9223 arch/x86/kvm/x86.c 	kvm_clear_interrupt_queue(vcpu);
vcpu             9224 arch/x86/kvm/x86.c 	kvm_clear_exception_queue(vcpu);
vcpu             9225 arch/x86/kvm/x86.c 	vcpu->arch.exception.pending = false;
vcpu             9227 arch/x86/kvm/x86.c 	memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
vcpu             9228 arch/x86/kvm/x86.c 	kvm_update_dr0123(vcpu);
vcpu             9229 arch/x86/kvm/x86.c 	vcpu->arch.dr6 = DR6_INIT;
vcpu             9230 arch/x86/kvm/x86.c 	kvm_update_dr6(vcpu);
vcpu             9231 arch/x86/kvm/x86.c 	vcpu->arch.dr7 = DR7_FIXED_1;
vcpu             9232 arch/x86/kvm/x86.c 	kvm_update_dr7(vcpu);
vcpu             9234 arch/x86/kvm/x86.c 	vcpu->arch.cr2 = 0;
vcpu             9236 arch/x86/kvm/x86.c 	kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu             9237 arch/x86/kvm/x86.c 	vcpu->arch.apf.msr_val = 0;
vcpu             9238 arch/x86/kvm/x86.c 	vcpu->arch.st.msr_val = 0;
vcpu             9240 arch/x86/kvm/x86.c 	kvmclock_reset(vcpu);
vcpu             9242 arch/x86/kvm/x86.c 	kvm_clear_async_pf_completion_queue(vcpu);
vcpu             9243 arch/x86/kvm/x86.c 	kvm_async_pf_hash_reset(vcpu);
vcpu             9244 arch/x86/kvm/x86.c 	vcpu->arch.apf.halted = false;
vcpu             9254 arch/x86/kvm/x86.c 			kvm_put_guest_fpu(vcpu);
vcpu             9255 arch/x86/kvm/x86.c 		mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu->state.xsave,
vcpu             9259 arch/x86/kvm/x86.c 		mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu->state.xsave,
vcpu             9264 arch/x86/kvm/x86.c 			kvm_load_guest_fpu(vcpu);
vcpu             9268 arch/x86/kvm/x86.c 		kvm_pmu_reset(vcpu);
vcpu             9269 arch/x86/kvm/x86.c 		vcpu->arch.smbase = 0x30000;
vcpu             9271 arch/x86/kvm/x86.c 		vcpu->arch.msr_misc_features_enables = 0;
vcpu             9273 arch/x86/kvm/x86.c 		vcpu->arch.xcr0 = XFEATURE_MASK_FP;
vcpu             9276 arch/x86/kvm/x86.c 	memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
vcpu             9277 arch/x86/kvm/x86.c 	vcpu->arch.regs_avail = ~0;
vcpu             9278 arch/x86/kvm/x86.c 	vcpu->arch.regs_dirty = ~0;
vcpu             9280 arch/x86/kvm/x86.c 	vcpu->arch.ia32_xss = 0;
vcpu             9282 arch/x86/kvm/x86.c 	kvm_x86_ops->vcpu_reset(vcpu, init_event);
vcpu             9285 arch/x86/kvm/x86.c void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
vcpu             9289 arch/x86/kvm/x86.c 	kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
vcpu             9292 arch/x86/kvm/x86.c 	kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
vcpu             9293 arch/x86/kvm/x86.c 	kvm_rip_write(vcpu, 0);
vcpu             9299 arch/x86/kvm/x86.c 	struct kvm_vcpu *vcpu;
vcpu             9314 arch/x86/kvm/x86.c 		kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu             9315 arch/x86/kvm/x86.c 			if (!stable && vcpu->cpu == smp_processor_id())
vcpu             9316 arch/x86/kvm/x86.c 				kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
vcpu             9317 arch/x86/kvm/x86.c 			if (stable && vcpu->arch.last_host_tsc > local_tsc) {
vcpu             9319 arch/x86/kvm/x86.c 				if (vcpu->arch.last_host_tsc > max_tsc)
vcpu             9320 arch/x86/kvm/x86.c 					max_tsc = vcpu->arch.last_host_tsc;
vcpu             9367 arch/x86/kvm/x86.c 			kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu             9368 arch/x86/kvm/x86.c 				vcpu->arch.tsc_offset_adjustment += delta_cyc;
vcpu             9369 arch/x86/kvm/x86.c 				vcpu->arch.last_host_tsc = local_tsc;
vcpu             9370 arch/x86/kvm/x86.c 				kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
vcpu             9431 arch/x86/kvm/x86.c bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
vcpu             9433 arch/x86/kvm/x86.c 	return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id;
vcpu             9437 arch/x86/kvm/x86.c bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
vcpu             9439 arch/x86/kvm/x86.c 	return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0;
vcpu             9445 arch/x86/kvm/x86.c int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
vcpu             9450 arch/x86/kvm/x86.c 	vcpu->arch.emulate_ctxt.ops = &emulate_ops;
vcpu             9451 arch/x86/kvm/x86.c 	if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
vcpu             9452 arch/x86/kvm/x86.c 		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
vcpu             9454 arch/x86/kvm/x86.c 		vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
vcpu             9461 arch/x86/kvm/x86.c 	vcpu->arch.pio_data = page_address(page);
vcpu             9463 arch/x86/kvm/x86.c 	kvm_set_tsc_khz(vcpu, max_tsc_khz);
vcpu             9465 arch/x86/kvm/x86.c 	r = kvm_mmu_create(vcpu);
vcpu             9469 arch/x86/kvm/x86.c 	if (irqchip_in_kernel(vcpu->kvm)) {
vcpu             9470 arch/x86/kvm/x86.c 		vcpu->arch.apicv_active = kvm_x86_ops->get_enable_apicv(vcpu);
vcpu             9471 arch/x86/kvm/x86.c 		r = kvm_create_lapic(vcpu, lapic_timer_advance_ns);
vcpu             9477 arch/x86/kvm/x86.c 	vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
vcpu             9479 arch/x86/kvm/x86.c 	if (!vcpu->arch.mce_banks) {
vcpu             9483 arch/x86/kvm/x86.c 	vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
vcpu             9485 arch/x86/kvm/x86.c 	if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask,
vcpu             9491 arch/x86/kvm/x86.c 	fx_init(vcpu);
vcpu             9493 arch/x86/kvm/x86.c 	vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
vcpu             9495 arch/x86/kvm/x86.c 	vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
vcpu             9497 arch/x86/kvm/x86.c 	vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT;
vcpu             9499 arch/x86/kvm/x86.c 	kvm_async_pf_hash_reset(vcpu);
vcpu             9500 arch/x86/kvm/x86.c 	kvm_pmu_init(vcpu);
vcpu             9502 arch/x86/kvm/x86.c 	vcpu->arch.pending_external_vector = -1;
vcpu             9503 arch/x86/kvm/x86.c 	vcpu->arch.preempted_in_kernel = false;
vcpu             9505 arch/x86/kvm/x86.c 	kvm_hv_vcpu_init(vcpu);
vcpu             9510 arch/x86/kvm/x86.c 	kfree(vcpu->arch.mce_banks);
vcpu             9512 arch/x86/kvm/x86.c 	kvm_free_lapic(vcpu);
vcpu             9514 arch/x86/kvm/x86.c 	kvm_mmu_destroy(vcpu);
vcpu             9516 arch/x86/kvm/x86.c 	free_page((unsigned long)vcpu->arch.pio_data);
vcpu             9521 arch/x86/kvm/x86.c void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
vcpu             9525 arch/x86/kvm/x86.c 	kvm_hv_vcpu_uninit(vcpu);
vcpu             9526 arch/x86/kvm/x86.c 	kvm_pmu_destroy(vcpu);
vcpu             9527 arch/x86/kvm/x86.c 	kfree(vcpu->arch.mce_banks);
vcpu             9528 arch/x86/kvm/x86.c 	kvm_free_lapic(vcpu);
vcpu             9529 arch/x86/kvm/x86.c 	idx = srcu_read_lock(&vcpu->kvm->srcu);
vcpu             9530 arch/x86/kvm/x86.c 	kvm_mmu_destroy(vcpu);
vcpu             9531 arch/x86/kvm/x86.c 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
vcpu             9532 arch/x86/kvm/x86.c 	free_page((unsigned long)vcpu->arch.pio_data);
vcpu             9533 arch/x86/kvm/x86.c 	if (!lapic_in_kernel(vcpu))
vcpu             9537 arch/x86/kvm/x86.c void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
vcpu             9539 arch/x86/kvm/x86.c 	vcpu->arch.l1tf_flush_l1d = true;
vcpu             9540 arch/x86/kvm/x86.c 	kvm_x86_ops->sched_in(vcpu, cpu);
vcpu             9585 arch/x86/kvm/x86.c static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
vcpu             9587 arch/x86/kvm/x86.c 	vcpu_load(vcpu);
vcpu             9588 arch/x86/kvm/x86.c 	kvm_mmu_unload(vcpu);
vcpu             9589 arch/x86/kvm/x86.c 	vcpu_put(vcpu);
vcpu             9595 arch/x86/kvm/x86.c 	struct kvm_vcpu *vcpu;
vcpu             9600 arch/x86/kvm/x86.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu             9601 arch/x86/kvm/x86.c 		kvm_clear_async_pf_completion_queue(vcpu);
vcpu             9602 arch/x86/kvm/x86.c 		kvm_unload_vcpu_mmu(vcpu);
vcpu             9604 arch/x86/kvm/x86.c 	kvm_for_each_vcpu(i, vcpu, kvm)
vcpu             9605 arch/x86/kvm/x86.c 		kvm_arch_vcpu_free(vcpu);
vcpu             9812 arch/x86/kvm/x86.c 	struct kvm_vcpu *vcpu;
vcpu             9822 arch/x86/kvm/x86.c 	kvm_for_each_vcpu(i, vcpu, kvm)
vcpu             9823 arch/x86/kvm/x86.c 		kvm_vcpu_kick(vcpu);
vcpu             9945 arch/x86/kvm/x86.c static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
vcpu             9947 arch/x86/kvm/x86.c 	return (is_guest_mode(vcpu) &&
vcpu             9949 arch/x86/kvm/x86.c 			kvm_x86_ops->guest_apic_has_interrupt(vcpu));
vcpu             9952 arch/x86/kvm/x86.c static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
vcpu             9954 arch/x86/kvm/x86.c 	if (!list_empty_careful(&vcpu->async_pf.done))
vcpu             9957 arch/x86/kvm/x86.c 	if (kvm_apic_has_events(vcpu))
vcpu             9960 arch/x86/kvm/x86.c 	if (vcpu->arch.pv.pv_unhalted)
vcpu             9963 arch/x86/kvm/x86.c 	if (vcpu->arch.exception.pending)
vcpu             9966 arch/x86/kvm/x86.c 	if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
vcpu             9967 arch/x86/kvm/x86.c 	    (vcpu->arch.nmi_pending &&
vcpu             9968 arch/x86/kvm/x86.c 	     kvm_x86_ops->nmi_allowed(vcpu)))
vcpu             9971 arch/x86/kvm/x86.c 	if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
vcpu             9972 arch/x86/kvm/x86.c 	    (vcpu->arch.smi_pending && !is_smm(vcpu)))
vcpu             9975 arch/x86/kvm/x86.c 	if (kvm_arch_interrupt_allowed(vcpu) &&
vcpu             9976 arch/x86/kvm/x86.c 	    (kvm_cpu_has_interrupt(vcpu) ||
vcpu             9977 arch/x86/kvm/x86.c 	    kvm_guest_apic_has_interrupt(vcpu)))
vcpu             9980 arch/x86/kvm/x86.c 	if (kvm_hv_has_stimer_pending(vcpu))
vcpu             9986 arch/x86/kvm/x86.c int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
vcpu             9988 arch/x86/kvm/x86.c 	return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
vcpu             9991 arch/x86/kvm/x86.c bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
vcpu             9993 arch/x86/kvm/x86.c 	if (READ_ONCE(vcpu->arch.pv.pv_unhalted))
vcpu             9996 arch/x86/kvm/x86.c 	if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
vcpu             9997 arch/x86/kvm/x86.c 		kvm_test_request(KVM_REQ_SMI, vcpu) ||
vcpu             9998 arch/x86/kvm/x86.c 		 kvm_test_request(KVM_REQ_EVENT, vcpu))
vcpu             10001 arch/x86/kvm/x86.c 	if (vcpu->arch.apicv_active && kvm_x86_ops->dy_apicv_has_pending_interrupt(vcpu))
vcpu             10007 arch/x86/kvm/x86.c bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
vcpu             10009 arch/x86/kvm/x86.c 	return vcpu->arch.preempted_in_kernel;
vcpu             10012 arch/x86/kvm/x86.c int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
vcpu             10014 arch/x86/kvm/x86.c 	return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
vcpu             10017 arch/x86/kvm/x86.c int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
vcpu             10019 arch/x86/kvm/x86.c 	return kvm_x86_ops->interrupt_allowed(vcpu);
vcpu             10022 arch/x86/kvm/x86.c unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu)
vcpu             10024 arch/x86/kvm/x86.c 	if (is_64_bit_mode(vcpu))
vcpu             10025 arch/x86/kvm/x86.c 		return kvm_rip_read(vcpu);
vcpu             10026 arch/x86/kvm/x86.c 	return (u32)(get_segment_base(vcpu, VCPU_SREG_CS) +
vcpu             10027 arch/x86/kvm/x86.c 		     kvm_rip_read(vcpu));
vcpu             10031 arch/x86/kvm/x86.c bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
vcpu             10033 arch/x86/kvm/x86.c 	return kvm_get_linear_rip(vcpu) == linear_rip;
vcpu             10037 arch/x86/kvm/x86.c unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
vcpu             10041 arch/x86/kvm/x86.c 	rflags = kvm_x86_ops->get_rflags(vcpu);
vcpu             10042 arch/x86/kvm/x86.c 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
vcpu             10048 arch/x86/kvm/x86.c static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
vcpu             10050 arch/x86/kvm/x86.c 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
vcpu             10051 arch/x86/kvm/x86.c 	    kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
vcpu             10053 arch/x86/kvm/x86.c 	kvm_x86_ops->set_rflags(vcpu, rflags);
vcpu             10056 arch/x86/kvm/x86.c void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
vcpu             10058 arch/x86/kvm/x86.c 	__kvm_set_rflags(vcpu, rflags);
vcpu             10059 arch/x86/kvm/x86.c 	kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu             10063 arch/x86/kvm/x86.c void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
vcpu             10067 arch/x86/kvm/x86.c 	if ((vcpu->arch.mmu->direct_map != work->arch.direct_map) ||
vcpu             10071 arch/x86/kvm/x86.c 	r = kvm_mmu_reload(vcpu);
vcpu             10075 arch/x86/kvm/x86.c 	if (!vcpu->arch.mmu->direct_map &&
vcpu             10076 arch/x86/kvm/x86.c 	      work->arch.cr3 != vcpu->arch.mmu->get_cr3(vcpu))
vcpu             10079 arch/x86/kvm/x86.c 	vcpu->arch.mmu->page_fault(vcpu, work->cr2_or_gpa, 0, true);
vcpu             10092 arch/x86/kvm/x86.c static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
vcpu             10096 arch/x86/kvm/x86.c 	while (vcpu->arch.apf.gfns[key] != ~0)
vcpu             10099 arch/x86/kvm/x86.c 	vcpu->arch.apf.gfns[key] = gfn;
vcpu             10102 arch/x86/kvm/x86.c static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
vcpu             10108 arch/x86/kvm/x86.c 		     (vcpu->arch.apf.gfns[key] != gfn &&
vcpu             10109 arch/x86/kvm/x86.c 		      vcpu->arch.apf.gfns[key] != ~0); i++)
vcpu             10115 arch/x86/kvm/x86.c bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
vcpu             10117 arch/x86/kvm/x86.c 	return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
vcpu             10120 arch/x86/kvm/x86.c static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
vcpu             10124 arch/x86/kvm/x86.c 	i = j = kvm_async_pf_gfn_slot(vcpu, gfn);
vcpu             10126 arch/x86/kvm/x86.c 		vcpu->arch.apf.gfns[i] = ~0;
vcpu             10129 arch/x86/kvm/x86.c 			if (vcpu->arch.apf.gfns[j] == ~0)
vcpu             10131 arch/x86/kvm/x86.c 			k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]);
vcpu             10138 arch/x86/kvm/x86.c 		vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j];
vcpu             10143 arch/x86/kvm/x86.c static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
vcpu             10146 arch/x86/kvm/x86.c 	return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
vcpu             10150 arch/x86/kvm/x86.c static int apf_get_user(struct kvm_vcpu *vcpu, u32 *val)
vcpu             10153 arch/x86/kvm/x86.c 	return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, val,
vcpu             10157 arch/x86/kvm/x86.c static bool kvm_can_deliver_async_pf(struct kvm_vcpu *vcpu)
vcpu             10159 arch/x86/kvm/x86.c 	if (!vcpu->arch.apf.delivery_as_pf_vmexit && is_guest_mode(vcpu))
vcpu             10162 arch/x86/kvm/x86.c 	if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) ||
vcpu             10163 arch/x86/kvm/x86.c 	    (vcpu->arch.apf.send_user_only &&
vcpu             10164 arch/x86/kvm/x86.c 	     kvm_x86_ops->get_cpl(vcpu) == 0))
vcpu             10170 arch/x86/kvm/x86.c bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
vcpu             10172 arch/x86/kvm/x86.c 	if (unlikely(!lapic_in_kernel(vcpu) ||
vcpu             10173 arch/x86/kvm/x86.c 		     kvm_event_needs_reinjection(vcpu) ||
vcpu             10174 arch/x86/kvm/x86.c 		     vcpu->arch.exception.pending))
vcpu             10177 arch/x86/kvm/x86.c 	if (kvm_hlt_in_guest(vcpu->kvm) && !kvm_can_deliver_async_pf(vcpu))
vcpu             10184 arch/x86/kvm/x86.c 	return kvm_x86_ops->interrupt_allowed(vcpu);
vcpu             10187 arch/x86/kvm/x86.c void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
vcpu             10193 arch/x86/kvm/x86.c 	kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
vcpu             10195 arch/x86/kvm/x86.c 	if (kvm_can_deliver_async_pf(vcpu) &&
vcpu             10196 arch/x86/kvm/x86.c 	    !apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
vcpu             10203 arch/x86/kvm/x86.c 		kvm_inject_page_fault(vcpu, &fault);
vcpu             10213 arch/x86/kvm/x86.c 		kvm_make_request(KVM_REQ_APF_HALT, vcpu);
vcpu             10217 arch/x86/kvm/x86.c void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
vcpu             10226 arch/x86/kvm/x86.c 		kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
vcpu             10229 arch/x86/kvm/x86.c 	if (vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED &&
vcpu             10230 arch/x86/kvm/x86.c 	    !apf_get_user(vcpu, &val)) {
vcpu             10232 arch/x86/kvm/x86.c 		    vcpu->arch.exception.pending &&
vcpu             10233 arch/x86/kvm/x86.c 		    vcpu->arch.exception.nr == PF_VECTOR &&
vcpu             10234 arch/x86/kvm/x86.c 		    !apf_put_user(vcpu, 0)) {
vcpu             10235 arch/x86/kvm/x86.c 			vcpu->arch.exception.injected = false;
vcpu             10236 arch/x86/kvm/x86.c 			vcpu->arch.exception.pending = false;
vcpu             10237 arch/x86/kvm/x86.c 			vcpu->arch.exception.nr = 0;
vcpu             10238 arch/x86/kvm/x86.c 			vcpu->arch.exception.has_error_code = false;
vcpu             10239 arch/x86/kvm/x86.c 			vcpu->arch.exception.error_code = 0;
vcpu             10240 arch/x86/kvm/x86.c 			vcpu->arch.exception.has_payload = false;
vcpu             10241 arch/x86/kvm/x86.c 			vcpu->arch.exception.payload = 0;
vcpu             10242 arch/x86/kvm/x86.c 		} else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
vcpu             10249 arch/x86/kvm/x86.c 			kvm_inject_page_fault(vcpu, &fault);
vcpu             10252 arch/x86/kvm/x86.c 	vcpu->arch.apf.halted = false;
vcpu             10253 arch/x86/kvm/x86.c 	vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
vcpu             10256 arch/x86/kvm/x86.c bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
vcpu             10258 arch/x86/kvm/x86.c 	if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
vcpu             10261 arch/x86/kvm/x86.c 		return kvm_can_do_async_pf(vcpu);
vcpu             10351 arch/x86/kvm/x86.c bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
vcpu             10353 arch/x86/kvm/x86.c 	return (vcpu->arch.msr_kvm_poll_control & 1) == 0;
vcpu               49 arch/x86/kvm/x86.h static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
vcpu               51 arch/x86/kvm/x86.h 	vcpu->arch.exception.pending = false;
vcpu               52 arch/x86/kvm/x86.h 	vcpu->arch.exception.injected = false;
vcpu               55 arch/x86/kvm/x86.h static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector,
vcpu               58 arch/x86/kvm/x86.h 	vcpu->arch.interrupt.injected = true;
vcpu               59 arch/x86/kvm/x86.h 	vcpu->arch.interrupt.soft = soft;
vcpu               60 arch/x86/kvm/x86.h 	vcpu->arch.interrupt.nr = vector;
vcpu               63 arch/x86/kvm/x86.h static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
vcpu               65 arch/x86/kvm/x86.h 	vcpu->arch.interrupt.injected = false;
vcpu               68 arch/x86/kvm/x86.h static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu)
vcpu               70 arch/x86/kvm/x86.h 	return vcpu->arch.exception.injected || vcpu->arch.interrupt.injected ||
vcpu               71 arch/x86/kvm/x86.h 		vcpu->arch.nmi_injected;
vcpu               79 arch/x86/kvm/x86.h static inline bool is_protmode(struct kvm_vcpu *vcpu)
vcpu               81 arch/x86/kvm/x86.h 	return kvm_read_cr0_bits(vcpu, X86_CR0_PE);
vcpu               84 arch/x86/kvm/x86.h static inline int is_long_mode(struct kvm_vcpu *vcpu)
vcpu               87 arch/x86/kvm/x86.h 	return vcpu->arch.efer & EFER_LMA;
vcpu               93 arch/x86/kvm/x86.h static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
vcpu               97 arch/x86/kvm/x86.h 	if (!is_long_mode(vcpu))
vcpu               99 arch/x86/kvm/x86.h 	kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
vcpu              103 arch/x86/kvm/x86.h static inline bool is_la57_mode(struct kvm_vcpu *vcpu)
vcpu              106 arch/x86/kvm/x86.h 	return (vcpu->arch.efer & EFER_LMA) &&
vcpu              107 arch/x86/kvm/x86.h 		 kvm_read_cr4_bits(vcpu, X86_CR4_LA57);
vcpu              122 arch/x86/kvm/x86.h static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
vcpu              124 arch/x86/kvm/x86.h 	return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
vcpu              127 arch/x86/kvm/x86.h static inline int is_pae(struct kvm_vcpu *vcpu)
vcpu              129 arch/x86/kvm/x86.h 	return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
vcpu              132 arch/x86/kvm/x86.h static inline int is_pse(struct kvm_vcpu *vcpu)
vcpu              134 arch/x86/kvm/x86.h 	return kvm_read_cr4_bits(vcpu, X86_CR4_PSE);
vcpu              137 arch/x86/kvm/x86.h static inline int is_paging(struct kvm_vcpu *vcpu)
vcpu              139 arch/x86/kvm/x86.h 	return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG));
vcpu              142 arch/x86/kvm/x86.h static inline bool is_pae_paging(struct kvm_vcpu *vcpu)
vcpu              144 arch/x86/kvm/x86.h 	return !is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu);
vcpu              152 arch/x86/kvm/x86.h static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu)
vcpu              154 arch/x86/kvm/x86.h 	return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48;
vcpu              167 arch/x86/kvm/x86.h static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)
vcpu              170 arch/x86/kvm/x86.h 	return get_canonical(la, vcpu_virt_addr_bits(vcpu)) != la;
vcpu              186 arch/x86/kvm/x86.h static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
vcpu              189 arch/x86/kvm/x86.h 	u64 gen = kvm_memslots(vcpu->kvm)->generation;
vcpu              198 arch/x86/kvm/x86.h 	vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK;
vcpu              199 arch/x86/kvm/x86.h 	vcpu->arch.mmio_access = access;
vcpu              200 arch/x86/kvm/x86.h 	vcpu->arch.mmio_gfn = gfn;
vcpu              201 arch/x86/kvm/x86.h 	vcpu->arch.mmio_gen = gen;
vcpu              204 arch/x86/kvm/x86.h static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
vcpu              206 arch/x86/kvm/x86.h 	return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
vcpu              215 arch/x86/kvm/x86.h static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
vcpu              217 arch/x86/kvm/x86.h 	if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
vcpu              220 arch/x86/kvm/x86.h 	vcpu->arch.mmio_gva = 0;
vcpu              223 arch/x86/kvm/x86.h static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
vcpu              225 arch/x86/kvm/x86.h 	if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva &&
vcpu              226 arch/x86/kvm/x86.h 	      vcpu->arch.mmio_gva == (gva & PAGE_MASK))
vcpu              232 arch/x86/kvm/x86.h static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
vcpu              234 arch/x86/kvm/x86.h 	if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn &&
vcpu              235 arch/x86/kvm/x86.h 	      vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
vcpu              241 arch/x86/kvm/x86.h static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu,
vcpu              244 arch/x86/kvm/x86.h 	unsigned long val = kvm_register_read(vcpu, reg);
vcpu              246 arch/x86/kvm/x86.h 	return is_64_bit_mode(vcpu) ? val : (u32)val;
vcpu              249 arch/x86/kvm/x86.h static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
vcpu              253 arch/x86/kvm/x86.h 	if (!is_64_bit_mode(vcpu))
vcpu              255 arch/x86/kvm/x86.h 	return kvm_register_write(vcpu, reg, val);
vcpu              263 arch/x86/kvm/x86.h void kvm_set_pending_timer(struct kvm_vcpu *vcpu);
vcpu              264 arch/x86/kvm/x86.h void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
vcpu              266 arch/x86/kvm/x86.h void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
vcpu              269 arch/x86/kvm/x86.h int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
vcpu              273 arch/x86/kvm/x86.h int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu,
vcpu              277 arch/x86/kvm/x86.h int handle_ud(struct kvm_vcpu *vcpu);
vcpu              279 arch/x86/kvm/x86.h void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu);
vcpu              281 arch/x86/kvm/x86.h void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu);
vcpu              282 arch/x86/kvm/x86.h u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
vcpu              283 arch/x86/kvm/x86.h bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data);
vcpu              284 arch/x86/kvm/x86.h int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
vcpu              285 arch/x86/kvm/x86.h int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
vcpu              286 arch/x86/kvm/x86.h bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
vcpu              289 arch/x86/kvm/x86.h int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
vcpu              308 arch/x86/kvm/x86.h static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
vcpu              310 arch/x86/kvm/x86.h 	return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
vcpu              311 arch/x86/kvm/x86.h 				   vcpu->arch.virtual_tsc_shift);
vcpu              350 arch/x86/kvm/x86.h static inline void kvm_before_interrupt(struct kvm_vcpu *vcpu)
vcpu              352 arch/x86/kvm/x86.h 	__this_cpu_write(current_vcpu, vcpu);
vcpu              355 arch/x86/kvm/x86.h static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
vcpu              369 arch/x86/kvm/x86.h void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu);
vcpu              370 arch/x86/kvm/x86.h void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu);
vcpu               29 arch/x86/xen/irq.c 	struct vcpu_info *vcpu;
vcpu               32 arch/x86/xen/irq.c 	vcpu = this_cpu_read(xen_vcpu);
vcpu               35 arch/x86/xen/irq.c 	flags = !vcpu->evtchn_upcall_mask;
vcpu               47 arch/x86/xen/irq.c 	struct vcpu_info *vcpu;
vcpu               54 arch/x86/xen/irq.c 	vcpu = this_cpu_read(xen_vcpu);
vcpu               55 arch/x86/xen/irq.c 	vcpu->evtchn_upcall_mask = flags;
vcpu               59 arch/x86/xen/irq.c 		if (unlikely(vcpu->evtchn_upcall_pending))
vcpu               80 arch/x86/xen/irq.c 	struct vcpu_info *vcpu;
vcpu               89 arch/x86/xen/irq.c 	vcpu = this_cpu_read(xen_vcpu);
vcpu               90 arch/x86/xen/irq.c 	vcpu->evtchn_upcall_mask = 0;
vcpu               96 arch/x86/xen/irq.c 	if (unlikely(vcpu->evtchn_upcall_pending))
vcpu              540 arch/x86/xen/pmu.c 	xp.vcpu = cpu;
vcpu              573 arch/x86/xen/pmu.c 	xp.vcpu = cpu;
vcpu               77 arch/x86/xen/xen-ops.h bool xen_vcpu_stolen(int vcpu);
vcpu             1702 drivers/gpu/drm/i915/gvt/kvmgt.c static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
vcpu              509 drivers/iommu/fsl_pamu.c u32 get_stash_id(u32 stash_dest_hint, u32 vcpu)
vcpu              537 drivers/iommu/fsl_pamu.c 			if (be32_to_cpup(&prop[i]) == vcpu) {
vcpu              576 drivers/iommu/fsl_pamu.c 		 stash_dest_hint, vcpu);
vcpu              394 drivers/iommu/fsl_pamu.h u32 get_stash_id(u32 stash_dest_hint, u32 vcpu);
vcpu              563 drivers/irqchip/irq-gic-v3.c static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
vcpu              565 drivers/irqchip/irq-gic-v3.c 	if (vcpu)
vcpu             1719 drivers/irqchip/irq-gic-v3.c 		gic_v3_kvm_info.vcpu = r;
vcpu             2029 drivers/irqchip/irq-gic-v3.c 		struct resource *vcpu = &gic_v3_kvm_info.vcpu;
vcpu             2031 drivers/irqchip/irq-gic-v3.c 		vcpu->flags = IORESOURCE_MEM;
vcpu             2032 drivers/irqchip/irq-gic-v3.c 		vcpu->start = acpi_data.vcpu_base;
vcpu             2033 drivers/irqchip/irq-gic-v3.c 		vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
vcpu              315 drivers/irqchip/irq-gic.c static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
vcpu              321 drivers/irqchip/irq-gic.c 	if (vcpu)
vcpu             1409 drivers/irqchip/irq-gic.c 	struct resource *vcpu_res = &gic_v2_kvm_info.vcpu;
vcpu             1567 drivers/irqchip/irq-gic.c 	struct resource *vcpu_res = &gic_v2_kvm_info.vcpu;
vcpu              275 drivers/s390/crypto/vfio_ap_ops.c static int handle_pqap(struct kvm_vcpu *vcpu)
vcpu              285 drivers/s390/crypto/vfio_ap_ops.c 	if (!(vcpu->arch.sie_block->eca & ECA_AIV))
vcpu              288 drivers/s390/crypto/vfio_ap_ops.c 	apqn = vcpu->run->s.regs.gprs[0] & 0xffff;
vcpu              291 drivers/s390/crypto/vfio_ap_ops.c 	if (!vcpu->kvm->arch.crypto.pqap_hook)
vcpu              293 drivers/s390/crypto/vfio_ap_ops.c 	matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook,
vcpu              300 drivers/s390/crypto/vfio_ap_ops.c 	status = vcpu->run->s.regs.gprs[1];
vcpu              305 drivers/s390/crypto/vfio_ap_ops.c 					     vcpu->run->s.regs.gprs[2]);
vcpu              310 drivers/s390/crypto/vfio_ap_ops.c 	memcpy(&vcpu->run->s.regs.gprs[1], &qstatus, sizeof(qstatus));
vcpu              311 drivers/s390/crypto/vfio_ap_ops.c 	vcpu->run->s.regs.gprs[1] >>= 32;
vcpu              888 drivers/xen/events/events_base.c 		bind_ipi.vcpu = xen_vcpu_nr(cpu);
vcpu              941 drivers/xen/events/events_base.c 		if (status.u.virq == virq && status.vcpu == xen_vcpu_nr(cpu)) {
vcpu              984 drivers/xen/events/events_base.c 		bind_virq.vcpu = xen_vcpu_nr(cpu);
vcpu             1310 drivers/xen/events/events_base.c 	bind_vcpu.vcpu = xen_vcpu_nr(tcpu);
vcpu             1463 drivers/xen/events/events_base.c 		bind_virq.vcpu = xen_vcpu_nr(cpu);
vcpu             1487 drivers/xen/events/events_base.c 		bind_ipi.vcpu = xen_vcpu_nr(cpu);
vcpu              116 drivers/xen/events/events_fifo.c 	init_control.vcpu        = xen_vcpu_nr(cpu);
vcpu              478 drivers/xen/evtchn.c 		bind_virq.vcpu = xen_vcpu_nr(0);
vcpu              147 drivers/xen/time.c bool xen_vcpu_stolen(int vcpu)
vcpu              149 drivers/xen/time.c 	return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable;
vcpu              134 include/asm-generic/mshyperv.h 	int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
vcpu              152 include/asm-generic/mshyperv.h 		vcpu = hv_cpu_number_to_vp_number(cpu);
vcpu              153 include/asm-generic/mshyperv.h 		if (vcpu == VP_INVAL)
vcpu              155 include/asm-generic/mshyperv.h 		vcpu_bank = vcpu / 64;
vcpu              156 include/asm-generic/mshyperv.h 		vcpu_offset = vcpu % 64;
vcpu               27 include/kvm/arm_arch_timer.h 	struct kvm_vcpu			*vcpu;
vcpu               71 include/kvm/arm_arch_timer.h int kvm_timer_enable(struct kvm_vcpu *vcpu);
vcpu               72 include/kvm/arm_arch_timer.h int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu);
vcpu               73 include/kvm/arm_arch_timer.h void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
vcpu               74 include/kvm/arm_arch_timer.h void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu);
vcpu               75 include/kvm/arm_arch_timer.h bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu);
vcpu               76 include/kvm/arm_arch_timer.h void kvm_timer_update_run(struct kvm_vcpu *vcpu);
vcpu               77 include/kvm/arm_arch_timer.h void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu);
vcpu               82 include/kvm/arm_arch_timer.h int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
vcpu               83 include/kvm/arm_arch_timer.h int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
vcpu               84 include/kvm/arm_arch_timer.h int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
vcpu               86 include/kvm/arm_arch_timer.h bool kvm_timer_is_pending(struct kvm_vcpu *vcpu);
vcpu               90 include/kvm/arm_arch_timer.h void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu);
vcpu               91 include/kvm/arm_arch_timer.h void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu);
vcpu              102 include/kvm/arm_arch_timer.h #define arch_timer_ctx_index(ctx)	((ctx) - vcpu_timer((ctx)->vcpu)->timers)
vcpu              104 include/kvm/arm_arch_timer.h u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu,
vcpu              107 include/kvm/arm_arch_timer.h void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
vcpu               34 include/kvm/arm_pmu.h u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
vcpu               35 include/kvm/arm_pmu.h void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
vcpu               36 include/kvm/arm_pmu.h u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);
vcpu               37 include/kvm/arm_pmu.h void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
vcpu               38 include/kvm/arm_pmu.h void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
vcpu               39 include/kvm/arm_pmu.h void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
vcpu               40 include/kvm/arm_pmu.h void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
vcpu               41 include/kvm/arm_pmu.h void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
vcpu               42 include/kvm/arm_pmu.h void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
vcpu               43 include/kvm/arm_pmu.h void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
vcpu               44 include/kvm/arm_pmu.h bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu);
vcpu               45 include/kvm/arm_pmu.h void kvm_pmu_update_run(struct kvm_vcpu *vcpu);
vcpu               46 include/kvm/arm_pmu.h void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
vcpu               47 include/kvm/arm_pmu.h void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val);
vcpu               48 include/kvm/arm_pmu.h void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
vcpu               51 include/kvm/arm_pmu.h int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
vcpu               53 include/kvm/arm_pmu.h int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
vcpu               55 include/kvm/arm_pmu.h int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
vcpu               57 include/kvm/arm_pmu.h int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
vcpu               64 include/kvm/arm_pmu.h static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
vcpu               69 include/kvm/arm_pmu.h static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu,
vcpu               71 include/kvm/arm_pmu.h static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
vcpu               75 include/kvm/arm_pmu.h static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {}
vcpu               76 include/kvm/arm_pmu.h static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
vcpu               77 include/kvm/arm_pmu.h static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
vcpu               78 include/kvm/arm_pmu.h static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
vcpu               79 include/kvm/arm_pmu.h static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
vcpu               80 include/kvm/arm_pmu.h static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
vcpu               81 include/kvm/arm_pmu.h static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
vcpu               82 include/kvm/arm_pmu.h static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
vcpu               86 include/kvm/arm_pmu.h static inline void kvm_pmu_update_run(struct kvm_vcpu *vcpu) {}
vcpu               87 include/kvm/arm_pmu.h static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {}
vcpu               88 include/kvm/arm_pmu.h static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {}
vcpu               89 include/kvm/arm_pmu.h static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu,
vcpu               92 include/kvm/arm_pmu.h static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
vcpu               97 include/kvm/arm_pmu.h static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
vcpu              102 include/kvm/arm_pmu.h static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
vcpu              107 include/kvm/arm_pmu.h static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
vcpu               23 include/kvm/arm_psci.h static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
vcpu               32 include/kvm/arm_psci.h 	if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) {
vcpu               33 include/kvm/arm_psci.h 		if (vcpu->kvm->arch.psci_version)
vcpu               34 include/kvm/arm_psci.h 			return vcpu->kvm->arch.psci_version;
vcpu               43 include/kvm/arm_psci.h int kvm_hvc_call_handler(struct kvm_vcpu *vcpu);
vcpu               47 include/kvm/arm_psci.h int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu);
vcpu               48 include/kvm/arm_psci.h int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
vcpu               49 include/kvm/arm_psci.h int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
vcpu               50 include/kvm/arm_psci.h int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
vcpu               96 include/kvm/arm_vgic.h 	struct kvm_vcpu *vcpu;		/* SGIs and PPIs: The VCPU
vcpu              336 include/kvm/arm_vgic.h int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu);
vcpu              339 include/kvm/arm_vgic.h void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
vcpu              346 include/kvm/arm_vgic.h int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
vcpu              348 include/kvm/arm_vgic.h int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid);
vcpu              349 include/kvm/arm_vgic.h bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid);
vcpu              351 include/kvm/arm_vgic.h int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
vcpu              353 include/kvm/arm_vgic.h void kvm_vgic_load(struct kvm_vcpu *vcpu);
vcpu              354 include/kvm/arm_vgic.h void kvm_vgic_put(struct kvm_vcpu *vcpu);
vcpu              355 include/kvm/arm_vgic.h void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu);
vcpu              363 include/kvm/arm_vgic.h bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu);
vcpu              364 include/kvm/arm_vgic.h void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
vcpu              365 include/kvm/arm_vgic.h void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
vcpu              366 include/kvm/arm_vgic.h void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid);
vcpu              368 include/kvm/arm_vgic.h void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1);
vcpu              389 include/kvm/arm_vgic.h int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner);
vcpu              399 include/kvm/arm_vgic.h void kvm_vgic_v4_enable_doorbell(struct kvm_vcpu *vcpu);
vcpu              400 include/kvm/arm_vgic.h void kvm_vgic_v4_disable_doorbell(struct kvm_vcpu *vcpu);
vcpu               18 include/kvm/iodev.h 	int (*read)(struct kvm_vcpu *vcpu,
vcpu               23 include/kvm/iodev.h 	int (*write)(struct kvm_vcpu *vcpu,
vcpu               42 include/kvm/iodev.h static inline int kvm_iodevice_read(struct kvm_vcpu *vcpu,
vcpu               46 include/kvm/iodev.h 	return dev->ops->read ? dev->ops->read(vcpu, dev, addr, l, v)
vcpu               50 include/kvm/iodev.h static inline int kvm_iodevice_write(struct kvm_vcpu *vcpu,
vcpu               54 include/kvm/iodev.h 	return dev->ops->write ? dev->ops->write(vcpu, dev, addr, l, v)
vcpu               28 include/linux/irqchip/arm-gic-common.h 	struct resource vcpu;
vcpu              187 include/linux/kvm_host.h int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
vcpu              189 include/linux/kvm_host.h int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
vcpu              191 include/linux/kvm_host.h int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
vcpu              205 include/linux/kvm_host.h 	struct kvm_vcpu *vcpu;
vcpu              213 include/linux/kvm_host.h void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
vcpu              214 include/linux/kvm_host.h void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
vcpu              215 include/linux/kvm_host.h int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
vcpu              217 include/linux/kvm_host.h int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
vcpu              326 include/linux/kvm_host.h static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
vcpu              334 include/linux/kvm_host.h 	return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
vcpu              374 include/linux/kvm_host.h 	u32 vcpu;
vcpu              423 include/linux/kvm_host.h static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
vcpu              520 include/linux/kvm_host.h #define vcpu_unimpl(vcpu, fmt, ...)					\
vcpu              522 include/linux/kvm_host.h 			(vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__)
vcpu              524 include/linux/kvm_host.h #define vcpu_debug(vcpu, fmt, ...)					\
vcpu              525 include/linux/kvm_host.h 	kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
vcpu              526 include/linux/kvm_host.h #define vcpu_debug_ratelimited(vcpu, fmt, ...)				\
vcpu              527 include/linux/kvm_host.h 	kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id,           \
vcpu              529 include/linux/kvm_host.h #define vcpu_err(vcpu, fmt, ...)					\
vcpu              530 include/linux/kvm_host.h 	kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
vcpu              557 include/linux/kvm_host.h 	struct kvm_vcpu *vcpu = NULL;
vcpu              563 include/linux/kvm_host.h 		vcpu = kvm_get_vcpu(kvm, id);
vcpu              564 include/linux/kvm_host.h 	if (vcpu && vcpu->vcpu_id == id)
vcpu              565 include/linux/kvm_host.h 		return vcpu;
vcpu              566 include/linux/kvm_host.h 	kvm_for_each_vcpu(i, vcpu, kvm)
vcpu              567 include/linux/kvm_host.h 		if (vcpu->vcpu_id == id)
vcpu              568 include/linux/kvm_host.h 			return vcpu;
vcpu              572 include/linux/kvm_host.h static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu)
vcpu              577 include/linux/kvm_host.h 	kvm_for_each_vcpu(idx, tmp, vcpu->kvm)
vcpu              578 include/linux/kvm_host.h 		if (tmp == vcpu)
vcpu              588 include/linux/kvm_host.h int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
vcpu              589 include/linux/kvm_host.h void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
vcpu              591 include/linux/kvm_host.h void vcpu_load(struct kvm_vcpu *vcpu);
vcpu              592 include/linux/kvm_host.h void vcpu_put(struct kvm_vcpu *vcpu);
vcpu              639 include/linux/kvm_host.h static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
vcpu              641 include/linux/kvm_host.h 	int as_id = kvm_arch_vcpu_memslots_id(vcpu);
vcpu              643 include/linux/kvm_host.h 	return __kvm_memslots(vcpu->kvm, as_id);
vcpu              754 include/linux/kvm_host.h unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn);
vcpu              757 include/linux/kvm_host.h struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
vcpu              758 include/linux/kvm_host.h struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
vcpu              759 include/linux/kvm_host.h kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
vcpu              760 include/linux/kvm_host.h kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
vcpu              761 include/linux/kvm_host.h int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
vcpu              762 include/linux/kvm_host.h int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
vcpu              764 include/linux/kvm_host.h struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
vcpu              765 include/linux/kvm_host.h void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
vcpu              766 include/linux/kvm_host.h int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
vcpu              768 include/linux/kvm_host.h unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
vcpu              769 include/linux/kvm_host.h unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
vcpu              770 include/linux/kvm_host.h int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
vcpu              772 include/linux/kvm_host.h int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
vcpu              774 include/linux/kvm_host.h int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
vcpu              776 include/linux/kvm_host.h int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data,
vcpu              778 include/linux/kvm_host.h int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
vcpu              780 include/linux/kvm_host.h void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
vcpu              782 include/linux/kvm_host.h void kvm_sigset_activate(struct kvm_vcpu *vcpu);
vcpu              783 include/linux/kvm_host.h void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
vcpu              785 include/linux/kvm_host.h void kvm_vcpu_block(struct kvm_vcpu *vcpu);
vcpu              786 include/linux/kvm_host.h void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
vcpu              787 include/linux/kvm_host.h void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
vcpu              788 include/linux/kvm_host.h bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
vcpu              789 include/linux/kvm_host.h void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
vcpu              791 include/linux/kvm_host.h void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible);
vcpu              804 include/linux/kvm_host.h vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
vcpu              833 include/linux/kvm_host.h int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
vcpu              834 include/linux/kvm_host.h int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
vcpu              836 include/linux/kvm_host.h int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
vcpu              839 include/linux/kvm_host.h int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
vcpu              840 include/linux/kvm_host.h int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
vcpu              841 include/linux/kvm_host.h int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
vcpu              843 include/linux/kvm_host.h int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
vcpu              845 include/linux/kvm_host.h int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
vcpu              847 include/linux/kvm_host.h int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
vcpu              849 include/linux/kvm_host.h int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
vcpu              851 include/linux/kvm_host.h int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
vcpu              856 include/linux/kvm_host.h int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
vcpu              857 include/linux/kvm_host.h void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
vcpu              859 include/linux/kvm_host.h void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
vcpu              861 include/linux/kvm_host.h void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
vcpu              862 include/linux/kvm_host.h void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
vcpu              863 include/linux/kvm_host.h void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
vcpu              865 include/linux/kvm_host.h int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
vcpu              866 include/linux/kvm_host.h void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
vcpu              867 include/linux/kvm_host.h void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
vcpu              870 include/linux/kvm_host.h void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu);
vcpu              878 include/linux/kvm_host.h int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
vcpu              879 include/linux/kvm_host.h bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
vcpu              880 include/linux/kvm_host.h int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
vcpu              881 include/linux/kvm_host.h bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
vcpu              943 include/linux/kvm_host.h static inline struct swait_queue_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
vcpu              946 include/linux/kvm_host.h 	return vcpu->arch.wqp;
vcpu              948 include/linux/kvm_host.h 	return &vcpu->wq;
vcpu              970 include/linux/kvm_host.h int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
vcpu              971 include/linux/kvm_host.h void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
vcpu             1079 include/linux/kvm_host.h static inline struct page *kvm_vcpu_gpa_to_page(struct kvm_vcpu *vcpu,
vcpu             1082 include/linux/kvm_host.h 	return kvm_vcpu_gfn_to_page(vcpu, gpa_to_gfn(gpa));
vcpu             1200 include/linux/kvm_host.h static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
vcpu             1207 include/linux/kvm_host.h 	set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
vcpu             1210 include/linux/kvm_host.h static inline bool kvm_request_pending(struct kvm_vcpu *vcpu)
vcpu             1212 include/linux/kvm_host.h 	return READ_ONCE(vcpu->requests);
vcpu             1215 include/linux/kvm_host.h static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu)
vcpu             1217 include/linux/kvm_host.h 	return test_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
vcpu             1220 include/linux/kvm_host.h static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu)
vcpu             1222 include/linux/kvm_host.h 	clear_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
vcpu             1225 include/linux/kvm_host.h static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
vcpu             1227 include/linux/kvm_host.h 	if (kvm_test_request(req, vcpu)) {
vcpu             1228 include/linux/kvm_host.h 		kvm_clear_request(req, vcpu);
vcpu             1311 include/linux/kvm_host.h static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
vcpu             1313 include/linux/kvm_host.h 	vcpu->spin_loop.in_spin_loop = val;
vcpu             1315 include/linux/kvm_host.h static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
vcpu             1317 include/linux/kvm_host.h 	vcpu->spin_loop.dy_eligible = val;
vcpu             1322 include/linux/kvm_host.h static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
vcpu             1326 include/linux/kvm_host.h static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
vcpu             1345 include/linux/kvm_host.h static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
vcpu             1347 include/linux/kvm_host.h 	return vcpu->valid_wakeup;
vcpu             1351 include/linux/kvm_host.h static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
vcpu             1359 include/linux/kvm_host.h bool kvm_arch_no_poll(struct kvm_vcpu *vcpu);
vcpu             1361 include/linux/kvm_host.h static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
vcpu             1383 include/linux/kvm_host.h int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu);
vcpu             1385 include/linux/kvm_host.h static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
vcpu             1030 include/uapi/linux/kvm.h 	__u32 vcpu;
vcpu               64 include/xen/interface/event_channel.h 	uint32_t vcpu;
vcpu               93 include/xen/interface/event_channel.h 	uint32_t vcpu;
vcpu              140 include/xen/interface/event_channel.h 	uint32_t vcpu;		   /* VCPU to which this channel is bound.   */
vcpu              168 include/xen/interface/event_channel.h 	uint32_t vcpu;
vcpu              202 include/xen/interface/event_channel.h 	uint32_t vcpu;
vcpu               39 include/xen/interface/xenpmu.h 	uint32_t vcpu;
vcpu               34 include/xen/xen-ops.h bool xen_vcpu_stolen(int vcpu);
vcpu             1030 tools/include/uapi/linux/kvm.h 	__u32 vcpu;
vcpu              172 tools/perf/builtin-kvm.c 				event->vcpu[j].time = 0;
vcpu              173 tools/perf/builtin-kvm.c 				init_stats(&event->vcpu[j].stats);
vcpu              196 tools/perf/builtin-kvm.c 	prev = event->vcpu;
vcpu              197 tools/perf/builtin-kvm.c 	event->vcpu = realloc(event->vcpu,
vcpu              198 tools/perf/builtin-kvm.c 			      event->max_vcpu * sizeof(*event->vcpu));
vcpu              199 tools/perf/builtin-kvm.c 	if (!event->vcpu) {
vcpu              205 tools/perf/builtin-kvm.c 	memset(event->vcpu + old_max_vcpu, 0,
vcpu              206 tools/perf/builtin-kvm.c 	       (event->max_vcpu - old_max_vcpu) * sizeof(*event->vcpu));
vcpu              273 tools/perf/builtin-kvm.c 		kvm_stats = &event->vcpu[vcpu_id];
vcpu              290 tools/perf/builtin-kvm.c 	kvm_update_event_stats(&event->vcpu[vcpu_id], time_diff);
vcpu              349 tools/perf/builtin-kvm.c 	int vcpu;
vcpu              352 tools/perf/builtin-kvm.c 		vcpu = -1;
vcpu              354 tools/perf/builtin-kvm.c 		vcpu = vcpu_record->vcpu_id;
vcpu              400 tools/perf/builtin-kvm.c 	return update_kvm_event(event, vcpu, time_diff);
vcpu              457 tools/perf/builtin-kvm.c static u64 get_event_ ##func(struct kvm_event *event, int vcpu)		\
vcpu              459 tools/perf/builtin-kvm.c 	if (vcpu == -1)							\
vcpu              462 tools/perf/builtin-kvm.c 	if (vcpu >= event->max_vcpu)					\
vcpu              465 tools/perf/builtin-kvm.c 	return event->vcpu[vcpu].field;					\
vcpu              471 tools/perf/builtin-kvm.c 					struct kvm_event *two, int vcpu)\
vcpu              473 tools/perf/builtin-kvm.c 	return get_event_ ##func(one, vcpu) >				\
vcpu              474 tools/perf/builtin-kvm.c 				get_event_ ##func(two, vcpu);		\
vcpu              508 tools/perf/builtin-kvm.c 			     key_cmp_fun bigger, int vcpu)
vcpu              518 tools/perf/builtin-kvm.c 		if (bigger(event, p, vcpu))
vcpu              531 tools/perf/builtin-kvm.c 	int vcpu = kvm->trace_vcpu;
vcpu              533 tools/perf/builtin-kvm.c 	kvm->total_count += get_event_count(event, vcpu);
vcpu              534 tools/perf/builtin-kvm.c 	kvm->total_time += get_event_time(event, vcpu);
vcpu              537 tools/perf/builtin-kvm.c static bool event_is_valid(struct kvm_event *event, int vcpu)
vcpu              539 tools/perf/builtin-kvm.c 	return !!get_event_count(event, vcpu);
vcpu              545 tools/perf/builtin-kvm.c 	int vcpu = kvm->trace_vcpu;
vcpu              550 tools/perf/builtin-kvm.c 			if (event_is_valid(event, vcpu)) {
vcpu              553 tools/perf/builtin-kvm.c 						 kvm->compare, vcpu);
vcpu              573 tools/perf/builtin-kvm.c 	int vcpu = kvm->trace_vcpu;
vcpu              584 tools/perf/builtin-kvm.c 	if (vcpu == -1)
vcpu              587 tools/perf/builtin-kvm.c 		pr_info("VCPU %d:\n\n", vcpu);
vcpu              610 tools/perf/builtin-kvm.c 	int vcpu = kvm->trace_vcpu;
vcpu              632 tools/perf/builtin-kvm.c 		ecount = get_event_count(event, vcpu);
vcpu              633 tools/perf/builtin-kvm.c 		etime = get_event_time(event, vcpu);
vcpu              634 tools/perf/builtin-kvm.c 		max = get_event_max(event, vcpu);
vcpu              635 tools/perf/builtin-kvm.c 		min = get_event_min(event, vcpu);
vcpu              645 tools/perf/builtin-kvm.c 			kvm_event_rel_stddev(vcpu, event));
vcpu              734 tools/perf/builtin-kvm.c static bool verify_vcpu(int vcpu)
vcpu              736 tools/perf/builtin-kvm.c 	if (vcpu != -1 && vcpu < 0) {
vcpu              737 tools/perf/builtin-kvm.c 		pr_err("Invalid vcpu:%d.\n", vcpu);
vcpu             1140 tools/perf/builtin-kvm.c 	int vcpu = kvm->trace_vcpu;
vcpu             1145 tools/perf/builtin-kvm.c 	if (!verify_vcpu(vcpu))
vcpu               37 tools/perf/util/kvm-stat.h 	struct kvm_event_stats *vcpu;
vcpu              381 tools/testing/selftests/kvm/lib/kvm_util.c struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid)
vcpu              383 tools/testing/selftests/kvm/lib/kvm_util.c 	struct vcpu *vcpup;
vcpu              408 tools/testing/selftests/kvm/lib/kvm_util.c 	struct vcpu *vcpu = vcpu_find(vm, vcpuid);
vcpu              411 tools/testing/selftests/kvm/lib/kvm_util.c 	ret = munmap(vcpu->state, sizeof(*vcpu->state));
vcpu              414 tools/testing/selftests/kvm/lib/kvm_util.c 	close(vcpu->fd);
vcpu              418 tools/testing/selftests/kvm/lib/kvm_util.c 	if (vcpu->next)
vcpu              419 tools/testing/selftests/kvm/lib/kvm_util.c 		vcpu->next->prev = vcpu->prev;
vcpu              420 tools/testing/selftests/kvm/lib/kvm_util.c 	if (vcpu->prev)
vcpu              421 tools/testing/selftests/kvm/lib/kvm_util.c 		vcpu->prev->next = vcpu->next;
vcpu              423 tools/testing/selftests/kvm/lib/kvm_util.c 		vm->vcpu_head = vcpu->next;
vcpu              424 tools/testing/selftests/kvm/lib/kvm_util.c 	free(vcpu);
vcpu              806 tools/testing/selftests/kvm/lib/kvm_util.c 	struct vcpu *vcpu;
vcpu              809 tools/testing/selftests/kvm/lib/kvm_util.c 	vcpu = vcpu_find(vm, vcpuid);
vcpu              810 tools/testing/selftests/kvm/lib/kvm_util.c 	if (vcpu != NULL)
vcpu              815 tools/testing/selftests/kvm/lib/kvm_util.c 			vcpuid, vcpu->id, vcpu->state);
vcpu              818 tools/testing/selftests/kvm/lib/kvm_util.c 	vcpu = calloc(1, sizeof(*vcpu));
vcpu              819 tools/testing/selftests/kvm/lib/kvm_util.c 	TEST_ASSERT(vcpu != NULL, "Insufficient Memory");
vcpu              820 tools/testing/selftests/kvm/lib/kvm_util.c 	vcpu->id = vcpuid;
vcpu              821 tools/testing/selftests/kvm/lib/kvm_util.c 	vcpu->fd = ioctl(vm->fd, KVM_CREATE_VCPU, vcpuid);
vcpu              822 tools/testing/selftests/kvm/lib/kvm_util.c 	TEST_ASSERT(vcpu->fd >= 0, "KVM_CREATE_VCPU failed, rc: %i errno: %i",
vcpu              823 tools/testing/selftests/kvm/lib/kvm_util.c 		vcpu->fd, errno);
vcpu              825 tools/testing/selftests/kvm/lib/kvm_util.c 	TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->state), "vcpu mmap size "
vcpu              827 tools/testing/selftests/kvm/lib/kvm_util.c 		vcpu_mmap_sz(), sizeof(*vcpu->state));
vcpu              828 tools/testing/selftests/kvm/lib/kvm_util.c 	vcpu->state = (struct kvm_run *) mmap(NULL, sizeof(*vcpu->state),
vcpu              829 tools/testing/selftests/kvm/lib/kvm_util.c 		PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 0);
vcpu              830 tools/testing/selftests/kvm/lib/kvm_util.c 	TEST_ASSERT(vcpu->state != MAP_FAILED, "mmap vcpu_state failed, "
vcpu              835 tools/testing/selftests/kvm/lib/kvm_util.c 		vm->vcpu_head->prev = vcpu;
vcpu              836 tools/testing/selftests/kvm/lib/kvm_util.c 	vcpu->next = vm->vcpu_head;
vcpu              837 tools/testing/selftests/kvm/lib/kvm_util.c 	vm->vcpu_head = vcpu;
vcpu             1119 tools/testing/selftests/kvm/lib/kvm_util.c 	struct vcpu *vcpu = vcpu_find(vm, vcpuid);
vcpu             1120 tools/testing/selftests/kvm/lib/kvm_util.c 	TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
vcpu             1122 tools/testing/selftests/kvm/lib/kvm_util.c 	return vcpu->state;
vcpu             1148 tools/testing/selftests/kvm/lib/kvm_util.c 	struct vcpu *vcpu = vcpu_find(vm, vcpuid);
vcpu             1151 tools/testing/selftests/kvm/lib/kvm_util.c 	TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
vcpu             1153 tools/testing/selftests/kvm/lib/kvm_util.c 		rc = ioctl(vcpu->fd, KVM_RUN, NULL);
vcpu             1160 tools/testing/selftests/kvm/lib/kvm_util.c 	struct vcpu *vcpu = vcpu_find(vm, vcpuid);
vcpu             1163 tools/testing/selftests/kvm/lib/kvm_util.c 	TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
vcpu             1165 tools/testing/selftests/kvm/lib/kvm_util.c 	vcpu->state->immediate_exit = 1;
vcpu             1166 tools/testing/selftests/kvm/lib/kvm_util.c 	ret = ioctl(vcpu->fd, KVM_RUN, NULL);
vcpu             1167 tools/testing/selftests/kvm/lib/kvm_util.c 	vcpu->state->immediate_exit = 0;
vcpu             1192 tools/testing/selftests/kvm/lib/kvm_util.c 	struct vcpu *vcpu = vcpu_find(vm, vcpuid);
vcpu             1195 tools/testing/selftests/kvm/lib/kvm_util.c 	TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
vcpu             1197 tools/testing/selftests/kvm/lib/kvm_util.c 	ret = ioctl(vcpu->fd, KVM_SET_MP_STATE, mp_state);
vcpu             1219 tools/testing/selftests/kvm/lib/kvm_util.c 	struct vcpu *vcpu = vcpu_find(vm, vcpuid);
vcpu             1222 tools/testing/selftests/kvm/lib/kvm_util.c 	TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
vcpu             1224 tools/testing/selftests/kvm/lib/kvm_util.c 	ret = ioctl(vcpu->fd, KVM_GET_REGS, regs);
vcpu             1246 tools/testing/selftests/kvm/lib/kvm_util.c 	struct vcpu *vcpu = vcpu_find(vm, vcpuid);
vcpu             1249 tools/testing/selftests/kvm/lib/kvm_util.c 	TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
vcpu             1251 tools/testing/selftests/kvm/lib/kvm_util.c 	ret = ioctl(vcpu->fd, KVM_SET_REGS, regs);
vcpu             1260 tools/testing/selftests/kvm/lib/kvm_util.c 	struct vcpu *vcpu = vcpu_find(vm, vcpuid);
vcpu             1263 tools/testing/selftests/kvm/lib/kvm_util.c 	TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
vcpu             1265 tools/testing/selftests/kvm/lib/kvm_util.c 	ret = ioctl(vcpu->fd, KVM_GET_VCPU_EVENTS, events);
vcpu             1273 tools/testing/selftests/kvm/lib/kvm_util.c 	struct vcpu *vcpu = vcpu_find(vm, vcpuid);
vcpu             1276 tools/testing/selftests/kvm/lib/kvm_util.c 	TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
vcpu             1278 tools/testing/selftests/kvm/lib/kvm_util.c 	ret = ioctl(vcpu->fd, KVM_SET_VCPU_EVENTS, events);
vcpu             1288 tools/testing/selftests/kvm/lib/kvm_util.c 	struct vcpu *vcpu = vcpu_find(vm, vcpuid);
vcpu             1291 tools/testing/selftests/kvm/lib/kvm_util.c 	TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
vcpu             1293 tools/testing/selftests/kvm/lib/kvm_util.c 	ret = ioctl(vcpu->fd, KVM_GET_NESTED_STATE, state);
vcpu             1302 tools/testing/selftests/kvm/lib/kvm_util.c 	struct vcpu *vcpu = vcpu_find(vm, vcpuid);
vcpu             1305 tools/testing/selftests/kvm/lib/kvm_util.c 	TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
vcpu             1307 tools/testing/selftests/kvm/lib/kvm_util.c 	ret = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, state);
vcpu             1335 tools/testing/selftests/kvm/lib/kvm_util.c 	struct vcpu *vcpu = vcpu_find(vm, vcpuid);
vcpu             1338 tools/testing/selftests/kvm/lib/kvm_util.c 	TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
vcpu             1340 tools/testing/selftests/kvm/lib/kvm_util.c 	ret = ioctl(vcpu->fd, KVM_GET_SREGS, sregs);
vcpu             1369 tools/testing/selftests/kvm/lib/kvm_util.c 	struct vcpu *vcpu = vcpu_find(vm, vcpuid);
vcpu             1371 tools/testing/selftests/kvm/lib/kvm_util.c 	TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
vcpu             1373 tools/testing/selftests/kvm/lib/kvm_util.c 	return ioctl(vcpu->fd, KVM_SET_SREGS, sregs);
vcpu             1402 tools/testing/selftests/kvm/lib/kvm_util.c 	struct vcpu *vcpu = vcpu_find(vm, vcpuid);
vcpu             1405 tools/testing/selftests/kvm/lib/kvm_util.c 	TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
vcpu             1407 tools/testing/selftests/kvm/lib/kvm_util.c 	ret = ioctl(vcpu->fd, cmd, arg);
vcpu             1451 tools/testing/selftests/kvm/lib/kvm_util.c 	struct vcpu *vcpu;
vcpu             1477 tools/testing/selftests/kvm/lib/kvm_util.c 	for (vcpu = vm->vcpu_head; vcpu; vcpu = vcpu->next)
vcpu             1478 tools/testing/selftests/kvm/lib/kvm_util.c 		vcpu_dump(stream, vm, vcpu->id, indent + 2);
vcpu               38 tools/testing/selftests/kvm/lib/kvm_util_internal.h 	struct vcpu *next, *prev;
vcpu               55 tools/testing/selftests/kvm/lib/kvm_util_internal.h 	struct vcpu *vcpu_head;
vcpu               66 tools/testing/selftests/kvm/lib/kvm_util_internal.h struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid);
vcpu              274 tools/testing/selftests/kvm/lib/s390x/processor.c 	struct vcpu *vcpu = vm->vcpu_head;
vcpu              277 tools/testing/selftests/kvm/lib/s390x/processor.c 		indent, "", vcpu->state->psw_mask, vcpu->state->psw_addr);
vcpu              785 tools/testing/selftests/kvm/lib/x86_64/processor.c 	struct vcpu *vcpu = vcpu_find(vm, vcpuid);
vcpu              788 tools/testing/selftests/kvm/lib/x86_64/processor.c 	TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
vcpu              790 tools/testing/selftests/kvm/lib/x86_64/processor.c 	rc = ioctl(vcpu->fd, KVM_SET_CPUID2, cpuid);
vcpu              855 tools/testing/selftests/kvm/lib/x86_64/processor.c 	struct vcpu *vcpu = vcpu_find(vm, vcpuid);
vcpu              862 tools/testing/selftests/kvm/lib/x86_64/processor.c 	TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
vcpu              865 tools/testing/selftests/kvm/lib/x86_64/processor.c 	r = ioctl(vcpu->fd, KVM_GET_MSRS, &buffer.header);
vcpu              889 tools/testing/selftests/kvm/lib/x86_64/processor.c 	struct vcpu *vcpu = vcpu_find(vm, vcpuid);
vcpu              896 tools/testing/selftests/kvm/lib/x86_64/processor.c 	TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
vcpu              901 tools/testing/selftests/kvm/lib/x86_64/processor.c 	r = ioctl(vcpu->fd, KVM_SET_MSRS, &buffer.header);
vcpu             1018 tools/testing/selftests/kvm/lib/x86_64/processor.c 	struct vcpu *vcpu = vcpu_find(vm, vcpuid);
vcpu             1047 tools/testing/selftests/kvm/lib/x86_64/processor.c 	r = ioctl(vcpu->fd, KVM_GET_VCPU_EVENTS, &state->events);
vcpu             1051 tools/testing/selftests/kvm/lib/x86_64/processor.c 	r = ioctl(vcpu->fd, KVM_GET_MP_STATE, &state->mp_state);
vcpu             1055 tools/testing/selftests/kvm/lib/x86_64/processor.c 	r = ioctl(vcpu->fd, KVM_GET_REGS, &state->regs);
vcpu             1059 tools/testing/selftests/kvm/lib/x86_64/processor.c 	r = ioctl(vcpu->fd, KVM_GET_XSAVE, &state->xsave);
vcpu             1064 tools/testing/selftests/kvm/lib/x86_64/processor.c 		r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs);
vcpu             1069 tools/testing/selftests/kvm/lib/x86_64/processor.c 	r = ioctl(vcpu->fd, KVM_GET_SREGS, &state->sregs);
vcpu             1075 tools/testing/selftests/kvm/lib/x86_64/processor.c 		r = ioctl(vcpu->fd, KVM_GET_NESTED_STATE, &state->nested);
vcpu             1087 tools/testing/selftests/kvm/lib/x86_64/processor.c 	r = ioctl(vcpu->fd, KVM_GET_MSRS, &state->msrs);
vcpu             1091 tools/testing/selftests/kvm/lib/x86_64/processor.c 	r = ioctl(vcpu->fd, KVM_GET_DEBUGREGS, &state->debugregs);
vcpu             1101 tools/testing/selftests/kvm/lib/x86_64/processor.c 	struct vcpu *vcpu = vcpu_find(vm, vcpuid);
vcpu             1104 tools/testing/selftests/kvm/lib/x86_64/processor.c 	r = ioctl(vcpu->fd, KVM_SET_XSAVE, &state->xsave);
vcpu             1109 tools/testing/selftests/kvm/lib/x86_64/processor.c 		r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs);
vcpu             1114 tools/testing/selftests/kvm/lib/x86_64/processor.c 	r = ioctl(vcpu->fd, KVM_SET_SREGS, &state->sregs);
vcpu             1118 tools/testing/selftests/kvm/lib/x86_64/processor.c 	r = ioctl(vcpu->fd, KVM_SET_MSRS, &state->msrs);
vcpu             1122 tools/testing/selftests/kvm/lib/x86_64/processor.c 	r = ioctl(vcpu->fd, KVM_SET_VCPU_EVENTS, &state->events);
vcpu             1126 tools/testing/selftests/kvm/lib/x86_64/processor.c 	r = ioctl(vcpu->fd, KVM_SET_MP_STATE, &state->mp_state);
vcpu             1130 tools/testing/selftests/kvm/lib/x86_64/processor.c 	r = ioctl(vcpu->fd, KVM_SET_DEBUGREGS, &state->debugregs);
vcpu             1134 tools/testing/selftests/kvm/lib/x86_64/processor.c 	r = ioctl(vcpu->fd, KVM_SET_REGS, &state->regs);
vcpu             1139 tools/testing/selftests/kvm/lib/x86_64/processor.c 		r = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, &state->nested);
vcpu               36 virt/kvm/arm/aarch32.c static bool pre_fault_synchronize(struct kvm_vcpu *vcpu)
vcpu               39 virt/kvm/arm/aarch32.c 	if (kvm_arm_vcpu_loaded(vcpu)) {
vcpu               40 virt/kvm/arm/aarch32.c 		kvm_arch_vcpu_put(vcpu);
vcpu               48 virt/kvm/arm/aarch32.c static void post_fault_synchronize(struct kvm_vcpu *vcpu, bool loaded)
vcpu               51 virt/kvm/arm/aarch32.c 		kvm_arch_vcpu_load(vcpu, smp_processor_id());
vcpu               74 virt/kvm/arm/aarch32.c static unsigned long get_except32_cpsr(struct kvm_vcpu *vcpu, u32 mode)
vcpu               76 virt/kvm/arm/aarch32.c 	u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
vcpu               79 virt/kvm/arm/aarch32.c 	old = *vcpu_cpsr(vcpu);
vcpu              154 virt/kvm/arm/aarch32.c static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
vcpu              156 virt/kvm/arm/aarch32.c 	unsigned long spsr = *vcpu_cpsr(vcpu);
vcpu              159 virt/kvm/arm/aarch32.c 	u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
vcpu              161 virt/kvm/arm/aarch32.c 	*vcpu_cpsr(vcpu) = get_except32_cpsr(vcpu, mode);
vcpu              164 virt/kvm/arm/aarch32.c 	vcpu_write_spsr(vcpu, host_spsr_to_spsr32(spsr));
vcpu              165 virt/kvm/arm/aarch32.c 	*vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
vcpu              171 virt/kvm/arm/aarch32.c 		vect_offset += vcpu_cp15(vcpu, c12_VBAR);
vcpu              173 virt/kvm/arm/aarch32.c 	*vcpu_pc(vcpu) = vect_offset;
vcpu              176 virt/kvm/arm/aarch32.c void kvm_inject_undef32(struct kvm_vcpu *vcpu)
vcpu              178 virt/kvm/arm/aarch32.c 	bool loaded = pre_fault_synchronize(vcpu);
vcpu              180 virt/kvm/arm/aarch32.c 	prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4);
vcpu              181 virt/kvm/arm/aarch32.c 	post_fault_synchronize(vcpu, loaded);
vcpu              188 virt/kvm/arm/aarch32.c static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
vcpu              196 virt/kvm/arm/aarch32.c 	loaded = pre_fault_synchronize(vcpu);
vcpu              200 virt/kvm/arm/aarch32.c 		far = &vcpu_cp15(vcpu, c6_IFAR);
vcpu              201 virt/kvm/arm/aarch32.c 		fsr = &vcpu_cp15(vcpu, c5_IFSR);
vcpu              204 virt/kvm/arm/aarch32.c 		far = &vcpu_cp15(vcpu, c6_DFAR);
vcpu              205 virt/kvm/arm/aarch32.c 		fsr = &vcpu_cp15(vcpu, c5_DFSR);
vcpu              208 virt/kvm/arm/aarch32.c 	prepare_fault32(vcpu, PSR_AA32_MODE_ABT, vect_offset);
vcpu              213 virt/kvm/arm/aarch32.c 	is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
vcpu              221 virt/kvm/arm/aarch32.c 	post_fault_synchronize(vcpu, loaded);
vcpu              224 virt/kvm/arm/aarch32.c void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr)
vcpu              226 virt/kvm/arm/aarch32.c 	inject_abt32(vcpu, false, addr);
vcpu              229 virt/kvm/arm/aarch32.c void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr)
vcpu              231 virt/kvm/arm/aarch32.c 	inject_abt32(vcpu, true, addr);
vcpu               43 virt/kvm/arm/arch_timer.c static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
vcpu               46 virt/kvm/arm/arch_timer.c static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
vcpu               50 virt/kvm/arm/arch_timer.c static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
vcpu               59 virt/kvm/arm/arch_timer.c static void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
vcpu               62 virt/kvm/arm/arch_timer.c 		map->direct_vtimer = vcpu_vtimer(vcpu);
vcpu               63 virt/kvm/arm/arch_timer.c 		map->direct_ptimer = vcpu_ptimer(vcpu);
vcpu               66 virt/kvm/arm/arch_timer.c 		map->direct_vtimer = vcpu_vtimer(vcpu);
vcpu               68 virt/kvm/arm/arch_timer.c 		map->emul_ptimer = vcpu_ptimer(vcpu);
vcpu               71 virt/kvm/arm/arch_timer.c 	trace_kvm_get_timer_map(vcpu->vcpu_id, map);
vcpu               93 virt/kvm/arm/arch_timer.c 	struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
vcpu              103 virt/kvm/arm/arch_timer.c 	if (!vcpu)
vcpu              106 virt/kvm/arm/arch_timer.c 	get_timer_map(vcpu, &map);
vcpu              114 virt/kvm/arm/arch_timer.c 		kvm_timer_update_irq(vcpu, true, ctx);
vcpu              116 virt/kvm/arm/arch_timer.c 	if (userspace_irqchip(vcpu->kvm) &&
vcpu              155 virt/kvm/arm/arch_timer.c static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu)
vcpu              161 virt/kvm/arm/arch_timer.c 		struct arch_timer_context *ctx = &vcpu->arch.timer_cpu.timers[i];
vcpu              178 virt/kvm/arm/arch_timer.c 	struct kvm_vcpu *vcpu;
vcpu              182 virt/kvm/arm/arch_timer.c 	vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
vcpu              189 virt/kvm/arm/arch_timer.c 	ns = kvm_timer_earliest_exp(vcpu);
vcpu              195 virt/kvm/arm/arch_timer.c 	kvm_vcpu_wake_up(vcpu);
vcpu              202 virt/kvm/arm/arch_timer.c 	struct kvm_vcpu *vcpu;
vcpu              206 virt/kvm/arm/arch_timer.c 	vcpu = ctx->vcpu;
vcpu              221 virt/kvm/arm/arch_timer.c 	kvm_timer_update_irq(vcpu, true, ctx);
vcpu              265 virt/kvm/arm/arch_timer.c bool kvm_timer_is_pending(struct kvm_vcpu *vcpu)
vcpu              269 virt/kvm/arm/arch_timer.c 	get_timer_map(vcpu, &map);
vcpu              279 virt/kvm/arm/arch_timer.c void kvm_timer_update_run(struct kvm_vcpu *vcpu)
vcpu              281 virt/kvm/arm/arch_timer.c 	struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
vcpu              282 virt/kvm/arm/arch_timer.c 	struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
vcpu              283 virt/kvm/arm/arch_timer.c 	struct kvm_sync_regs *regs = &vcpu->run->s.regs;
vcpu              294 virt/kvm/arm/arch_timer.c static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
vcpu              300 virt/kvm/arm/arch_timer.c 	trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq,
vcpu              303 virt/kvm/arm/arch_timer.c 	if (!userspace_irqchip(vcpu->kvm)) {
vcpu              304 virt/kvm/arm/arch_timer.c 		ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
vcpu              320 virt/kvm/arm/arch_timer.c 		kvm_timer_update_irq(ctx->vcpu, should_fire, ctx);
vcpu              339 virt/kvm/arm/arch_timer.c 	struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
vcpu              386 virt/kvm/arm/arch_timer.c static void kvm_timer_blocking(struct kvm_vcpu *vcpu)
vcpu              388 virt/kvm/arm/arch_timer.c 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
vcpu              391 virt/kvm/arm/arch_timer.c 	get_timer_map(vcpu, &map);
vcpu              406 virt/kvm/arm/arch_timer.c 	soft_timer_start(&timer->bg_timer, kvm_timer_earliest_exp(vcpu));
vcpu              409 virt/kvm/arm/arch_timer.c static void kvm_timer_unblocking(struct kvm_vcpu *vcpu)
vcpu              411 virt/kvm/arm/arch_timer.c 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
vcpu              418 virt/kvm/arm/arch_timer.c 	struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
vcpu              476 virt/kvm/arm/arch_timer.c 	struct kvm_vcpu *vcpu = ctx->vcpu;
vcpu              485 virt/kvm/arm/arch_timer.c 	kvm_timer_update_irq(ctx->vcpu, kvm_timer_should_fire(ctx), ctx);
vcpu              487 virt/kvm/arm/arch_timer.c 	if (irqchip_in_kernel(vcpu->kvm))
vcpu              488 virt/kvm/arm/arch_timer.c 		phys_active = kvm_vgic_map_is_active(vcpu, ctx->irq.irq);
vcpu              495 virt/kvm/arm/arch_timer.c static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu)
vcpu              497 virt/kvm/arm/arch_timer.c 	struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
vcpu              505 virt/kvm/arm/arch_timer.c 	kvm_timer_update_irq(vcpu, kvm_timer_should_fire(vtimer), vtimer);
vcpu              523 virt/kvm/arm/arch_timer.c void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
vcpu              525 virt/kvm/arm/arch_timer.c 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
vcpu              531 virt/kvm/arm/arch_timer.c 	get_timer_map(vcpu, &map);
vcpu              538 virt/kvm/arm/arch_timer.c 		kvm_timer_vcpu_load_nogic(vcpu);
vcpu              543 virt/kvm/arm/arch_timer.c 	kvm_timer_unblocking(vcpu);
vcpu              553 virt/kvm/arm/arch_timer.c bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu)
vcpu              555 virt/kvm/arm/arch_timer.c 	struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
vcpu              556 virt/kvm/arm/arch_timer.c 	struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
vcpu              557 virt/kvm/arm/arch_timer.c 	struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
vcpu              560 virt/kvm/arm/arch_timer.c 	if (likely(irqchip_in_kernel(vcpu->kvm)))
vcpu              570 virt/kvm/arm/arch_timer.c void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
vcpu              572 virt/kvm/arm/arch_timer.c 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
vcpu              578 virt/kvm/arm/arch_timer.c 	get_timer_map(vcpu, &map);
vcpu              596 virt/kvm/arm/arch_timer.c 	if (swait_active(kvm_arch_vcpu_wq(vcpu)))
vcpu              597 virt/kvm/arm/arch_timer.c 		kvm_timer_blocking(vcpu);
vcpu              614 virt/kvm/arm/arch_timer.c static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
vcpu              616 virt/kvm/arm/arch_timer.c 	struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
vcpu              619 virt/kvm/arm/arch_timer.c 		kvm_timer_update_irq(vcpu, false, vtimer);
vcpu              627 virt/kvm/arm/arch_timer.c void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
vcpu              629 virt/kvm/arm/arch_timer.c 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
vcpu              634 virt/kvm/arm/arch_timer.c 	if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
vcpu              635 virt/kvm/arm/arch_timer.c 		unmask_vtimer_irq_user(vcpu);
vcpu              638 virt/kvm/arm/arch_timer.c int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
vcpu              640 virt/kvm/arm/arch_timer.c 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
vcpu              643 virt/kvm/arm/arch_timer.c 	get_timer_map(vcpu, &map);
vcpu              651 virt/kvm/arm/arch_timer.c 	vcpu_vtimer(vcpu)->cnt_ctl = 0;
vcpu              652 virt/kvm/arm/arch_timer.c 	vcpu_ptimer(vcpu)->cnt_ctl = 0;
vcpu              655 virt/kvm/arm/arch_timer.c 		kvm_timer_update_irq(vcpu, false, vcpu_vtimer(vcpu));
vcpu              656 virt/kvm/arm/arch_timer.c 		kvm_timer_update_irq(vcpu, false, vcpu_ptimer(vcpu));
vcpu              658 virt/kvm/arm/arch_timer.c 		if (irqchip_in_kernel(vcpu->kvm)) {
vcpu              659 virt/kvm/arm/arch_timer.c 			kvm_vgic_reset_mapped_irq(vcpu, map.direct_vtimer->irq.irq);
vcpu              661 virt/kvm/arm/arch_timer.c 				kvm_vgic_reset_mapped_irq(vcpu, map.direct_ptimer->irq.irq);
vcpu              672 virt/kvm/arm/arch_timer.c static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff)
vcpu              675 virt/kvm/arm/arch_timer.c 	struct kvm *kvm = vcpu->kvm;
vcpu              686 virt/kvm/arm/arch_timer.c 	vcpu_vtimer(vcpu)->cntvoff = cntvoff;
vcpu              690 virt/kvm/arm/arch_timer.c void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
vcpu              692 virt/kvm/arm/arch_timer.c 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
vcpu              693 virt/kvm/arm/arch_timer.c 	struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
vcpu              694 virt/kvm/arm/arch_timer.c 	struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
vcpu              697 virt/kvm/arm/arch_timer.c 	update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
vcpu              717 virt/kvm/arm/arch_timer.c 	vtimer->vcpu = vcpu;
vcpu              718 virt/kvm/arm/arch_timer.c 	ptimer->vcpu = vcpu;
vcpu              727 virt/kvm/arm/arch_timer.c int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
vcpu              733 virt/kvm/arm/arch_timer.c 		timer = vcpu_vtimer(vcpu);
vcpu              734 virt/kvm/arm/arch_timer.c 		kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
vcpu              737 virt/kvm/arm/arch_timer.c 		timer = vcpu_vtimer(vcpu);
vcpu              738 virt/kvm/arm/arch_timer.c 		update_vtimer_cntvoff(vcpu, kvm_phys_timer_read() - value);
vcpu              741 virt/kvm/arm/arch_timer.c 		timer = vcpu_vtimer(vcpu);
vcpu              742 virt/kvm/arm/arch_timer.c 		kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
vcpu              745 virt/kvm/arm/arch_timer.c 		timer = vcpu_ptimer(vcpu);
vcpu              746 virt/kvm/arm/arch_timer.c 		kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
vcpu              749 virt/kvm/arm/arch_timer.c 		timer = vcpu_ptimer(vcpu);
vcpu              750 virt/kvm/arm/arch_timer.c 		kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
vcpu              774 virt/kvm/arm/arch_timer.c u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
vcpu              778 virt/kvm/arm/arch_timer.c 		return kvm_arm_timer_read(vcpu,
vcpu              779 virt/kvm/arm/arch_timer.c 					  vcpu_vtimer(vcpu), TIMER_REG_CTL);
vcpu              781 virt/kvm/arm/arch_timer.c 		return kvm_arm_timer_read(vcpu,
vcpu              782 virt/kvm/arm/arch_timer.c 					  vcpu_vtimer(vcpu), TIMER_REG_CNT);
vcpu              784 virt/kvm/arm/arch_timer.c 		return kvm_arm_timer_read(vcpu,
vcpu              785 virt/kvm/arm/arch_timer.c 					  vcpu_vtimer(vcpu), TIMER_REG_CVAL);
vcpu              787 virt/kvm/arm/arch_timer.c 		return kvm_arm_timer_read(vcpu,
vcpu              788 virt/kvm/arm/arch_timer.c 					  vcpu_ptimer(vcpu), TIMER_REG_CTL);
vcpu              790 virt/kvm/arm/arch_timer.c 		return kvm_arm_timer_read(vcpu,
vcpu              791 virt/kvm/arm/arch_timer.c 					  vcpu_vtimer(vcpu), TIMER_REG_CNT);
vcpu              793 virt/kvm/arm/arch_timer.c 		return kvm_arm_timer_read(vcpu,
vcpu              794 virt/kvm/arm/arch_timer.c 					  vcpu_ptimer(vcpu), TIMER_REG_CVAL);
vcpu              799 virt/kvm/arm/arch_timer.c static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
vcpu              830 virt/kvm/arm/arch_timer.c u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu,
vcpu              837 virt/kvm/arm/arch_timer.c 	kvm_timer_vcpu_put(vcpu);
vcpu              839 virt/kvm/arm/arch_timer.c 	val = kvm_arm_timer_read(vcpu, vcpu_get_timer(vcpu, tmr), treg);
vcpu              841 virt/kvm/arm/arch_timer.c 	kvm_timer_vcpu_load(vcpu);
vcpu              847 virt/kvm/arm/arch_timer.c static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
vcpu              870 virt/kvm/arm/arch_timer.c void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
vcpu              876 virt/kvm/arm/arch_timer.c 	kvm_timer_vcpu_put(vcpu);
vcpu              878 virt/kvm/arm/arch_timer.c 	kvm_arm_timer_write(vcpu, vcpu_get_timer(vcpu, tmr), treg, val);
vcpu              880 virt/kvm/arm/arch_timer.c 	kvm_timer_vcpu_load(vcpu);
vcpu              993 virt/kvm/arm/arch_timer.c void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
vcpu              995 virt/kvm/arm/arch_timer.c 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
vcpu             1000 virt/kvm/arm/arch_timer.c static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu)
vcpu             1005 virt/kvm/arm/arch_timer.c 	vtimer_irq = vcpu_vtimer(vcpu)->irq.irq;
vcpu             1006 virt/kvm/arm/arch_timer.c 	ret = kvm_vgic_set_owner(vcpu, vtimer_irq, vcpu_vtimer(vcpu));
vcpu             1010 virt/kvm/arm/arch_timer.c 	ptimer_irq = vcpu_ptimer(vcpu)->irq.irq;
vcpu             1011 virt/kvm/arm/arch_timer.c 	ret = kvm_vgic_set_owner(vcpu, ptimer_irq, vcpu_ptimer(vcpu));
vcpu             1015 virt/kvm/arm/arch_timer.c 	kvm_for_each_vcpu(i, vcpu, vcpu->kvm) {
vcpu             1016 virt/kvm/arm/arch_timer.c 		if (vcpu_vtimer(vcpu)->irq.irq != vtimer_irq ||
vcpu             1017 virt/kvm/arm/arch_timer.c 		    vcpu_ptimer(vcpu)->irq.irq != ptimer_irq)
vcpu             1026 virt/kvm/arm/arch_timer.c 	struct kvm_vcpu *vcpu = kvm_arm_get_running_vcpu();
vcpu             1029 virt/kvm/arm/arch_timer.c 	if (vintid == vcpu_vtimer(vcpu)->irq.irq)
vcpu             1030 virt/kvm/arm/arch_timer.c 		timer = vcpu_vtimer(vcpu);
vcpu             1031 virt/kvm/arm/arch_timer.c 	else if (vintid == vcpu_ptimer(vcpu)->irq.irq)
vcpu             1032 virt/kvm/arm/arch_timer.c 		timer = vcpu_ptimer(vcpu);
vcpu             1039 virt/kvm/arm/arch_timer.c int kvm_timer_enable(struct kvm_vcpu *vcpu)
vcpu             1041 virt/kvm/arm/arch_timer.c 	struct arch_timer_cpu *timer = vcpu_timer(vcpu);
vcpu             1049 virt/kvm/arm/arch_timer.c 	if (!irqchip_in_kernel(vcpu->kvm))
vcpu             1052 virt/kvm/arm/arch_timer.c 	if (!vgic_initialized(vcpu->kvm))
vcpu             1055 virt/kvm/arm/arch_timer.c 	if (!timer_irqs_are_valid(vcpu)) {
vcpu             1060 virt/kvm/arm/arch_timer.c 	get_timer_map(vcpu, &map);
vcpu             1062 virt/kvm/arm/arch_timer.c 	ret = kvm_vgic_map_phys_irq(vcpu,
vcpu             1070 virt/kvm/arm/arch_timer.c 		ret = kvm_vgic_map_phys_irq(vcpu,
vcpu             1108 virt/kvm/arm/arch_timer.c 	struct kvm_vcpu *vcpu;
vcpu             1111 virt/kvm/arm/arch_timer.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu             1112 virt/kvm/arm/arch_timer.c 		vcpu_vtimer(vcpu)->irq.irq = vtimer_irq;
vcpu             1113 virt/kvm/arm/arch_timer.c 		vcpu_ptimer(vcpu)->irq.irq = ptimer_irq;
vcpu             1117 virt/kvm/arm/arch_timer.c int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
vcpu             1120 virt/kvm/arm/arch_timer.c 	struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
vcpu             1121 virt/kvm/arm/arch_timer.c 	struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
vcpu             1124 virt/kvm/arm/arch_timer.c 	if (!irqchip_in_kernel(vcpu->kvm))
vcpu             1133 virt/kvm/arm/arch_timer.c 	if (vcpu->arch.timer_cpu.enabled)
vcpu             1138 virt/kvm/arm/arch_timer.c 		set_timer_irqs(vcpu->kvm, irq, ptimer->irq.irq);
vcpu             1141 virt/kvm/arm/arch_timer.c 		set_timer_irqs(vcpu->kvm, vtimer->irq.irq, irq);
vcpu             1150 virt/kvm/arm/arch_timer.c int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
vcpu             1158 virt/kvm/arm/arch_timer.c 		timer = vcpu_vtimer(vcpu);
vcpu             1161 virt/kvm/arm/arch_timer.c 		timer = vcpu_ptimer(vcpu);
vcpu             1171 virt/kvm/arm/arch_timer.c int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
vcpu               62 virt/kvm/arm/arm.c static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
vcpu               64 virt/kvm/arm/arm.c 	__this_cpu_write(kvm_arm_running_vcpu, vcpu);
vcpu               86 virt/kvm/arm/arm.c int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
vcpu               88 virt/kvm/arm/arm.c 	return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
vcpu              147 virt/kvm/arm/arm.c int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
vcpu              152 virt/kvm/arm/arm.c vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
vcpu              259 virt/kvm/arm/arm.c 	struct kvm_vcpu *vcpu;
vcpu              271 virt/kvm/arm/arm.c 	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
vcpu              272 virt/kvm/arm/arm.c 	if (!vcpu) {
vcpu              277 virt/kvm/arm/arm.c 	err = kvm_vcpu_init(vcpu, kvm, id);
vcpu              281 virt/kvm/arm/arm.c 	err = create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP);
vcpu              285 virt/kvm/arm/arm.c 	return vcpu;
vcpu              287 virt/kvm/arm/arm.c 	kvm_vcpu_uninit(vcpu);
vcpu              289 virt/kvm/arm/arm.c 	kmem_cache_free(kvm_vcpu_cache, vcpu);
vcpu              294 virt/kvm/arm/arm.c void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
vcpu              298 virt/kvm/arm/arm.c void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
vcpu              300 virt/kvm/arm/arm.c 	if (vcpu->arch.has_run_once && unlikely(!irqchip_in_kernel(vcpu->kvm)))
vcpu              303 virt/kvm/arm/arm.c 	kvm_mmu_free_memory_caches(vcpu);
vcpu              304 virt/kvm/arm/arm.c 	kvm_timer_vcpu_terminate(vcpu);
vcpu              305 virt/kvm/arm/arm.c 	kvm_pmu_vcpu_destroy(vcpu);
vcpu              306 virt/kvm/arm/arm.c 	kvm_vcpu_uninit(vcpu);
vcpu              307 virt/kvm/arm/arm.c 	kmem_cache_free(kvm_vcpu_cache, vcpu);
vcpu              310 virt/kvm/arm/arm.c void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
vcpu              312 virt/kvm/arm/arm.c 	kvm_arch_vcpu_free(vcpu);
vcpu              315 virt/kvm/arm/arm.c int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
vcpu              317 virt/kvm/arm/arm.c 	return kvm_timer_is_pending(vcpu);
vcpu              320 virt/kvm/arm/arm.c void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
vcpu              330 virt/kvm/arm/arm.c 	kvm_vgic_vmcr_sync(vcpu);
vcpu              333 virt/kvm/arm/arm.c 	kvm_vgic_v4_enable_doorbell(vcpu);
vcpu              336 virt/kvm/arm/arm.c void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
vcpu              338 virt/kvm/arm/arm.c 	kvm_vgic_v4_disable_doorbell(vcpu);
vcpu              341 virt/kvm/arm/arm.c int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
vcpu              344 virt/kvm/arm/arm.c 	vcpu->arch.target = -1;
vcpu              345 virt/kvm/arm/arm.c 	bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
vcpu              348 virt/kvm/arm/arm.c 	kvm_timer_vcpu_init(vcpu);
vcpu              350 virt/kvm/arm/arm.c 	kvm_pmu_vcpu_init(vcpu);
vcpu              352 virt/kvm/arm/arm.c 	kvm_arm_reset_debug_ptr(vcpu);
vcpu              354 virt/kvm/arm/arm.c 	return kvm_vgic_vcpu_init(vcpu);
vcpu              367 virt/kvm/arm/arm.c void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu              372 virt/kvm/arm/arm.c 	last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
vcpu              379 virt/kvm/arm/arm.c 	if (*last_ran != vcpu->vcpu_id) {
vcpu              380 virt/kvm/arm/arm.c 		kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu);
vcpu              381 virt/kvm/arm/arm.c 		*last_ran = vcpu->vcpu_id;
vcpu              384 virt/kvm/arm/arm.c 	vcpu->cpu = cpu;
vcpu              385 virt/kvm/arm/arm.c 	vcpu->arch.host_cpu_context = &cpu_data->host_ctxt;
vcpu              387 virt/kvm/arm/arm.c 	kvm_arm_set_running_vcpu(vcpu);
vcpu              388 virt/kvm/arm/arm.c 	kvm_vgic_load(vcpu);
vcpu              389 virt/kvm/arm/arm.c 	kvm_timer_vcpu_load(vcpu);
vcpu              390 virt/kvm/arm/arm.c 	kvm_vcpu_load_sysregs(vcpu);
vcpu              391 virt/kvm/arm/arm.c 	kvm_arch_vcpu_load_fp(vcpu);
vcpu              392 virt/kvm/arm/arm.c 	kvm_vcpu_pmu_restore_guest(vcpu);
vcpu              395 virt/kvm/arm/arm.c 		vcpu_clear_wfe_traps(vcpu);
vcpu              397 virt/kvm/arm/arm.c 		vcpu_set_wfe_traps(vcpu);
vcpu              399 virt/kvm/arm/arm.c 	if (vcpu_has_ptrauth(vcpu)) {
vcpu              400 virt/kvm/arm/arm.c 		struct kvm_cpu_context __maybe_unused *ctxt = vcpu->arch.host_cpu_context;
vcpu              408 virt/kvm/arm/arm.c 		vcpu_ptrauth_disable(vcpu);
vcpu              412 virt/kvm/arm/arm.c void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
vcpu              414 virt/kvm/arm/arm.c 	kvm_arch_vcpu_put_fp(vcpu);
vcpu              415 virt/kvm/arm/arm.c 	kvm_vcpu_put_sysregs(vcpu);
vcpu              416 virt/kvm/arm/arm.c 	kvm_timer_vcpu_put(vcpu);
vcpu              417 virt/kvm/arm/arm.c 	kvm_vgic_put(vcpu);
vcpu              418 virt/kvm/arm/arm.c 	kvm_vcpu_pmu_restore_host(vcpu);
vcpu              420 virt/kvm/arm/arm.c 	vcpu->cpu = -1;
vcpu              425 virt/kvm/arm/arm.c static void vcpu_power_off(struct kvm_vcpu *vcpu)
vcpu              427 virt/kvm/arm/arm.c 	vcpu->arch.power_off = true;
vcpu              428 virt/kvm/arm/arm.c 	kvm_make_request(KVM_REQ_SLEEP, vcpu);
vcpu              429 virt/kvm/arm/arm.c 	kvm_vcpu_kick(vcpu);
vcpu              432 virt/kvm/arm/arm.c int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
vcpu              435 virt/kvm/arm/arm.c 	if (vcpu->arch.power_off)
vcpu              443 virt/kvm/arm/arm.c int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
vcpu              450 virt/kvm/arm/arm.c 		vcpu->arch.power_off = false;
vcpu              453 virt/kvm/arm/arm.c 		vcpu_power_off(vcpu);
vcpu              476 virt/kvm/arm/arm.c bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
vcpu              478 virt/kvm/arm/arm.c 	return vcpu_mode_priv(vcpu);
vcpu              563 virt/kvm/arm/arm.c static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
vcpu              565 virt/kvm/arm/arm.c 	struct kvm *kvm = vcpu->kvm;
vcpu              568 virt/kvm/arm/arm.c 	if (likely(vcpu->arch.has_run_once))
vcpu              571 virt/kvm/arm/arm.c 	if (!kvm_arm_vcpu_is_finalized(vcpu))
vcpu              574 virt/kvm/arm/arm.c 	vcpu->arch.has_run_once = true;
vcpu              594 virt/kvm/arm/arm.c 	ret = kvm_timer_enable(vcpu);
vcpu              598 virt/kvm/arm/arm.c 	ret = kvm_arm_pmu_v3_enable(vcpu);
vcpu              611 virt/kvm/arm/arm.c 	struct kvm_vcpu *vcpu;
vcpu              613 virt/kvm/arm/arm.c 	kvm_for_each_vcpu(i, vcpu, kvm)
vcpu              614 virt/kvm/arm/arm.c 		vcpu->arch.pause = true;
vcpu              621 virt/kvm/arm/arm.c 	struct kvm_vcpu *vcpu;
vcpu              623 virt/kvm/arm/arm.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu              624 virt/kvm/arm/arm.c 		vcpu->arch.pause = false;
vcpu              625 virt/kvm/arm/arm.c 		swake_up_one(kvm_arch_vcpu_wq(vcpu));
vcpu              629 virt/kvm/arm/arm.c static void vcpu_req_sleep(struct kvm_vcpu *vcpu)
vcpu              631 virt/kvm/arm/arm.c 	struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
vcpu              633 virt/kvm/arm/arm.c 	swait_event_interruptible_exclusive(*wq, ((!vcpu->arch.power_off) &&
vcpu              634 virt/kvm/arm/arm.c 				       (!vcpu->arch.pause)));
vcpu              636 virt/kvm/arm/arm.c 	if (vcpu->arch.power_off || vcpu->arch.pause) {
vcpu              638 virt/kvm/arm/arm.c 		kvm_make_request(KVM_REQ_SLEEP, vcpu);
vcpu              649 virt/kvm/arm/arm.c static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
vcpu              651 virt/kvm/arm/arm.c 	return vcpu->arch.target >= 0;
vcpu              654 virt/kvm/arm/arm.c static void check_vcpu_requests(struct kvm_vcpu *vcpu)
vcpu              656 virt/kvm/arm/arm.c 	if (kvm_request_pending(vcpu)) {
vcpu              657 virt/kvm/arm/arm.c 		if (kvm_check_request(KVM_REQ_SLEEP, vcpu))
vcpu              658 virt/kvm/arm/arm.c 			vcpu_req_sleep(vcpu);
vcpu              660 virt/kvm/arm/arm.c 		if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
vcpu              661 virt/kvm/arm/arm.c 			kvm_reset_vcpu(vcpu);
vcpu              667 virt/kvm/arm/arm.c 		kvm_check_request(KVM_REQ_IRQ_PENDING, vcpu);
vcpu              682 virt/kvm/arm/arm.c int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu              686 virt/kvm/arm/arm.c 	if (unlikely(!kvm_vcpu_initialized(vcpu)))
vcpu              689 virt/kvm/arm/arm.c 	ret = kvm_vcpu_first_run_init(vcpu);
vcpu              694 virt/kvm/arm/arm.c 		ret = kvm_handle_mmio_return(vcpu, vcpu->run);
vcpu              702 virt/kvm/arm/arm.c 	vcpu_load(vcpu);
vcpu              704 virt/kvm/arm/arm.c 	kvm_sigset_activate(vcpu);
vcpu              714 virt/kvm/arm/arm.c 		update_vmid(&vcpu->kvm->arch.vmid);
vcpu              716 virt/kvm/arm/arm.c 		check_vcpu_requests(vcpu);
vcpu              725 virt/kvm/arm/arm.c 		kvm_pmu_flush_hwstate(vcpu);
vcpu              729 virt/kvm/arm/arm.c 		kvm_vgic_flush_hwstate(vcpu);
vcpu              748 virt/kvm/arm/arm.c 			if (kvm_timer_should_notify_user(vcpu) ||
vcpu              749 virt/kvm/arm/arm.c 			    kvm_pmu_should_notify_user(vcpu)) {
vcpu              761 virt/kvm/arm/arm.c 		smp_store_mb(vcpu->mode, IN_GUEST_MODE);
vcpu              763 virt/kvm/arm/arm.c 		if (ret <= 0 || need_new_vmid_gen(&vcpu->kvm->arch.vmid) ||
vcpu              764 virt/kvm/arm/arm.c 		    kvm_request_pending(vcpu)) {
vcpu              765 virt/kvm/arm/arm.c 			vcpu->mode = OUTSIDE_GUEST_MODE;
vcpu              767 virt/kvm/arm/arm.c 			kvm_pmu_sync_hwstate(vcpu);
vcpu              769 virt/kvm/arm/arm.c 				kvm_timer_sync_hwstate(vcpu);
vcpu              770 virt/kvm/arm/arm.c 			kvm_vgic_sync_hwstate(vcpu);
vcpu              776 virt/kvm/arm/arm.c 		kvm_arm_setup_debug(vcpu);
vcpu              781 virt/kvm/arm/arm.c 		trace_kvm_entry(*vcpu_pc(vcpu));
vcpu              786 virt/kvm/arm/arm.c 			ret = kvm_vcpu_run_vhe(vcpu);
vcpu              789 virt/kvm/arm/arm.c 			ret = kvm_call_hyp_ret(__kvm_vcpu_run_nvhe, vcpu);
vcpu              792 virt/kvm/arm/arm.c 		vcpu->mode = OUTSIDE_GUEST_MODE;
vcpu              793 virt/kvm/arm/arm.c 		vcpu->stat.exits++;
vcpu              798 virt/kvm/arm/arm.c 		kvm_arm_clear_debug(vcpu);
vcpu              805 virt/kvm/arm/arm.c 		kvm_pmu_sync_hwstate(vcpu);
vcpu              812 virt/kvm/arm/arm.c 		kvm_vgic_sync_hwstate(vcpu);
vcpu              820 virt/kvm/arm/arm.c 			kvm_timer_sync_hwstate(vcpu);
vcpu              822 virt/kvm/arm/arm.c 		kvm_arch_vcpu_ctxsync_fp(vcpu);
vcpu              845 virt/kvm/arm/arm.c 		trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
vcpu              848 virt/kvm/arm/arm.c 		handle_exit_early(vcpu, run, ret);
vcpu              852 virt/kvm/arm/arm.c 		ret = handle_exit(vcpu, run, ret);
vcpu              856 virt/kvm/arm/arm.c 	if (unlikely(!irqchip_in_kernel(vcpu->kvm))) {
vcpu              857 virt/kvm/arm/arm.c 		kvm_timer_update_run(vcpu);
vcpu              858 virt/kvm/arm/arm.c 		kvm_pmu_update_run(vcpu);
vcpu              861 virt/kvm/arm/arm.c 	kvm_sigset_deactivate(vcpu);
vcpu              863 virt/kvm/arm/arm.c 	vcpu_put(vcpu);
vcpu              867 virt/kvm/arm/arm.c static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
vcpu              878 virt/kvm/arm/arm.c 	hcr = vcpu_hcr(vcpu);
vcpu              895 virt/kvm/arm/arm.c 	kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
vcpu              896 virt/kvm/arm/arm.c 	kvm_vcpu_kick(vcpu);
vcpu              907 virt/kvm/arm/arm.c 	struct kvm_vcpu *vcpu = NULL;
vcpu              925 virt/kvm/arm/arm.c 		vcpu = kvm_get_vcpu(kvm, vcpu_idx);
vcpu              926 virt/kvm/arm/arm.c 		if (!vcpu)
vcpu              932 virt/kvm/arm/arm.c 		return vcpu_interrupt_line(vcpu, irq_num, level);
vcpu              940 virt/kvm/arm/arm.c 		vcpu = kvm_get_vcpu(kvm, vcpu_idx);
vcpu              941 virt/kvm/arm/arm.c 		if (!vcpu)
vcpu              947 virt/kvm/arm/arm.c 		return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level, NULL);
vcpu              961 virt/kvm/arm/arm.c static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
vcpu              974 virt/kvm/arm/arm.c 	if (vcpu->arch.target != -1 && vcpu->arch.target != init->target)
vcpu              988 virt/kvm/arm/arm.c 		if (vcpu->arch.target != -1 && i < KVM_VCPU_MAX_FEATURES &&
vcpu              989 virt/kvm/arm/arm.c 		    test_bit(i, vcpu->arch.features) != set)
vcpu              993 virt/kvm/arm/arm.c 			set_bit(i, vcpu->arch.features);
vcpu              996 virt/kvm/arm/arm.c 	vcpu->arch.target = phys_target;
vcpu              999 virt/kvm/arm/arm.c 	ret = kvm_reset_vcpu(vcpu);
vcpu             1001 virt/kvm/arm/arm.c 		vcpu->arch.target = -1;
vcpu             1002 virt/kvm/arm/arm.c 		bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
vcpu             1008 virt/kvm/arm/arm.c static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
vcpu             1013 virt/kvm/arm/arm.c 	ret = kvm_vcpu_set_target(vcpu, init);
vcpu             1021 virt/kvm/arm/arm.c 	if (vcpu->arch.has_run_once)
vcpu             1022 virt/kvm/arm/arm.c 		stage2_unmap_vm(vcpu->kvm);
vcpu             1024 virt/kvm/arm/arm.c 	vcpu_reset_hcr(vcpu);
vcpu             1029 virt/kvm/arm/arm.c 	if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
vcpu             1030 virt/kvm/arm/arm.c 		vcpu_power_off(vcpu);
vcpu             1032 virt/kvm/arm/arm.c 		vcpu->arch.power_off = false;
vcpu             1037 virt/kvm/arm/arm.c static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu,
vcpu             1044 virt/kvm/arm/arm.c 		ret = kvm_arm_vcpu_arch_set_attr(vcpu, attr);
vcpu             1051 virt/kvm/arm/arm.c static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu,
vcpu             1058 virt/kvm/arm/arm.c 		ret = kvm_arm_vcpu_arch_get_attr(vcpu, attr);
vcpu             1065 virt/kvm/arm/arm.c static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu,
vcpu             1072 virt/kvm/arm/arm.c 		ret = kvm_arm_vcpu_arch_has_attr(vcpu, attr);
vcpu             1079 virt/kvm/arm/arm.c static int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
vcpu             1084 virt/kvm/arm/arm.c 	return __kvm_arm_vcpu_get_events(vcpu, events);
vcpu             1087 virt/kvm/arm/arm.c static int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
vcpu             1102 virt/kvm/arm/arm.c 	return __kvm_arm_vcpu_set_events(vcpu, events);
vcpu             1108 virt/kvm/arm/arm.c 	struct kvm_vcpu *vcpu = filp->private_data;
vcpu             1121 virt/kvm/arm/arm.c 		r = kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init);
vcpu             1129 virt/kvm/arm/arm.c 		if (unlikely(!kvm_vcpu_initialized(vcpu)))
vcpu             1137 virt/kvm/arm/arm.c 			r = kvm_arm_set_reg(vcpu, &reg);
vcpu             1139 virt/kvm/arm/arm.c 			r = kvm_arm_get_reg(vcpu, &reg);
vcpu             1148 virt/kvm/arm/arm.c 		if (unlikely(!kvm_vcpu_initialized(vcpu)))
vcpu             1152 virt/kvm/arm/arm.c 		if (!kvm_arm_vcpu_is_finalized(vcpu))
vcpu             1159 virt/kvm/arm/arm.c 		reg_list.n = kvm_arm_num_regs(vcpu);
vcpu             1165 virt/kvm/arm/arm.c 		r = kvm_arm_copy_reg_indices(vcpu, user_list->reg);
vcpu             1172 virt/kvm/arm/arm.c 		r = kvm_arm_vcpu_set_attr(vcpu, &attr);
vcpu             1179 virt/kvm/arm/arm.c 		r = kvm_arm_vcpu_get_attr(vcpu, &attr);
vcpu             1186 virt/kvm/arm/arm.c 		r = kvm_arm_vcpu_has_attr(vcpu, &attr);
vcpu             1192 virt/kvm/arm/arm.c 		if (kvm_arm_vcpu_get_events(vcpu, &events))
vcpu             1206 virt/kvm/arm/arm.c 		return kvm_arm_vcpu_set_events(vcpu, &events);
vcpu             1211 virt/kvm/arm/arm.c 		if (!kvm_vcpu_initialized(vcpu))
vcpu             1217 virt/kvm/arm/arm.c 		return kvm_arm_vcpu_finalize(vcpu, what);
vcpu             1631 virt/kvm/arm/arm.c 	struct kvm_vcpu *vcpu;
vcpu             1635 virt/kvm/arm/arm.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu             1636 virt/kvm/arm/arm.c 		if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
vcpu             1637 virt/kvm/arm/arm.c 			return vcpu;
vcpu               47 virt/kvm/arm/hyp/aarch32.c bool __hyp_text kvm_condition_valid32(const struct kvm_vcpu *vcpu)
vcpu               54 virt/kvm/arm/hyp/aarch32.c 	if (kvm_vcpu_get_hsr(vcpu) >> 30)
vcpu               58 virt/kvm/arm/hyp/aarch32.c 	cond = kvm_vcpu_get_condition(vcpu);
vcpu               62 virt/kvm/arm/hyp/aarch32.c 	cpsr = *vcpu_cpsr(vcpu);
vcpu               96 virt/kvm/arm/hyp/aarch32.c static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
vcpu               99 virt/kvm/arm/hyp/aarch32.c 	unsigned long cpsr = *vcpu_cpsr(vcpu);
vcpu              119 virt/kvm/arm/hyp/aarch32.c 	*vcpu_cpsr(vcpu) = cpsr;
vcpu              126 virt/kvm/arm/hyp/aarch32.c void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
vcpu              128 virt/kvm/arm/hyp/aarch32.c 	u32 pc = *vcpu_pc(vcpu);
vcpu              131 virt/kvm/arm/hyp/aarch32.c 	is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT);
vcpu              137 virt/kvm/arm/hyp/aarch32.c 	*vcpu_pc(vcpu) = pc;
vcpu              139 virt/kvm/arm/hyp/aarch32.c 	kvm_adjust_itstate(vcpu);
vcpu               23 virt/kvm/arm/hyp/timer-sr.c void __hyp_text __timer_disable_traps(struct kvm_vcpu *vcpu)
vcpu               37 virt/kvm/arm/hyp/timer-sr.c void __hyp_text __timer_enable_traps(struct kvm_vcpu *vcpu)
vcpu              197 virt/kvm/arm/hyp/vgic-v3-sr.c void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
vcpu              199 virt/kvm/arm/hyp/vgic-v3-sr.c 	struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
vcpu              200 virt/kvm/arm/hyp/vgic-v3-sr.c 	u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
vcpu              233 virt/kvm/arm/hyp/vgic-v3-sr.c void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
vcpu              235 virt/kvm/arm/hyp/vgic-v3-sr.c 	struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
vcpu              236 virt/kvm/arm/hyp/vgic-v3-sr.c 	u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
vcpu              260 virt/kvm/arm/hyp/vgic-v3-sr.c void __hyp_text __vgic_v3_activate_traps(struct kvm_vcpu *vcpu)
vcpu              262 virt/kvm/arm/hyp/vgic-v3-sr.c 	struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
vcpu              309 virt/kvm/arm/hyp/vgic-v3-sr.c void __hyp_text __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu)
vcpu              311 virt/kvm/arm/hyp/vgic-v3-sr.c 	struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
vcpu              336 virt/kvm/arm/hyp/vgic-v3-sr.c void __hyp_text __vgic_v3_save_aprs(struct kvm_vcpu *vcpu)
vcpu              342 virt/kvm/arm/hyp/vgic-v3-sr.c 	vcpu = kern_hyp_va(vcpu);
vcpu              343 virt/kvm/arm/hyp/vgic-v3-sr.c 	cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
vcpu              373 virt/kvm/arm/hyp/vgic-v3-sr.c void __hyp_text __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu)
vcpu              379 virt/kvm/arm/hyp/vgic-v3-sr.c 	vcpu = kern_hyp_va(vcpu);
vcpu              380 virt/kvm/arm/hyp/vgic-v3-sr.c 	cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
vcpu              442 virt/kvm/arm/hyp/vgic-v3-sr.c static int __hyp_text __vgic_v3_get_group(struct kvm_vcpu *vcpu)
vcpu              444 virt/kvm/arm/hyp/vgic-v3-sr.c 	u32 esr = kvm_vcpu_get_hsr(vcpu);
vcpu              452 virt/kvm/arm/hyp/vgic-v3-sr.c static int __hyp_text __vgic_v3_highest_priority_lr(struct kvm_vcpu *vcpu,
vcpu              456 virt/kvm/arm/hyp/vgic-v3-sr.c 	unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs;
vcpu              492 virt/kvm/arm/hyp/vgic-v3-sr.c static int __hyp_text __vgic_v3_find_active_lr(struct kvm_vcpu *vcpu,
vcpu              495 virt/kvm/arm/hyp/vgic-v3-sr.c 	unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs;
vcpu              643 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_read_iar(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
vcpu              649 virt/kvm/arm/hyp/vgic-v3-sr.c 	grp = __vgic_v3_get_group(vcpu);
vcpu              651 virt/kvm/arm/hyp/vgic-v3-sr.c 	lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val);
vcpu              672 virt/kvm/arm/hyp/vgic-v3-sr.c 	vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK);
vcpu              676 virt/kvm/arm/hyp/vgic-v3-sr.c 	vcpu_set_reg(vcpu, rt, ICC_IAR1_EL1_SPURIOUS);
vcpu              701 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_write_dir(struct kvm_vcpu *vcpu,
vcpu              704 virt/kvm/arm/hyp/vgic-v3-sr.c 	u32 vid = vcpu_get_reg(vcpu, rt);
vcpu              716 virt/kvm/arm/hyp/vgic-v3-sr.c 	lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val);
vcpu              725 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_write_eoir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
vcpu              727 virt/kvm/arm/hyp/vgic-v3-sr.c 	u32 vid = vcpu_get_reg(vcpu, rt);
vcpu              732 virt/kvm/arm/hyp/vgic-v3-sr.c 	grp = __vgic_v3_get_group(vcpu);
vcpu              745 virt/kvm/arm/hyp/vgic-v3-sr.c 	lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val);
vcpu              762 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_read_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
vcpu              764 virt/kvm/arm/hyp/vgic-v3-sr.c 	vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG0_MASK));
vcpu              767 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_read_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
vcpu              769 virt/kvm/arm/hyp/vgic-v3-sr.c 	vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG1_MASK));
vcpu              772 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_write_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
vcpu              774 virt/kvm/arm/hyp/vgic-v3-sr.c 	u64 val = vcpu_get_reg(vcpu, rt);
vcpu              784 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_write_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
vcpu              786 virt/kvm/arm/hyp/vgic-v3-sr.c 	u64 val = vcpu_get_reg(vcpu, rt);
vcpu              796 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_read_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
vcpu              798 virt/kvm/arm/hyp/vgic-v3-sr.c 	vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr0(vmcr));
vcpu              801 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_read_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
vcpu              803 virt/kvm/arm/hyp/vgic-v3-sr.c 	vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr1(vmcr));
vcpu              806 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_write_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
vcpu              808 virt/kvm/arm/hyp/vgic-v3-sr.c 	u64 val = vcpu_get_reg(vcpu, rt);
vcpu              823 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_write_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
vcpu              825 virt/kvm/arm/hyp/vgic-v3-sr.c 	u64 val = vcpu_get_reg(vcpu, rt);
vcpu              843 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_read_apxrn(struct kvm_vcpu *vcpu, int rt, int n)
vcpu              847 virt/kvm/arm/hyp/vgic-v3-sr.c 	if (!__vgic_v3_get_group(vcpu))
vcpu              852 virt/kvm/arm/hyp/vgic-v3-sr.c 	vcpu_set_reg(vcpu, rt, val);
vcpu              855 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_write_apxrn(struct kvm_vcpu *vcpu, int rt, int n)
vcpu              857 virt/kvm/arm/hyp/vgic-v3-sr.c 	u32 val = vcpu_get_reg(vcpu, rt);
vcpu              859 virt/kvm/arm/hyp/vgic-v3-sr.c 	if (!__vgic_v3_get_group(vcpu))
vcpu              865 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_read_apxr0(struct kvm_vcpu *vcpu,
vcpu              868 virt/kvm/arm/hyp/vgic-v3-sr.c 	__vgic_v3_read_apxrn(vcpu, rt, 0);
vcpu              871 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_read_apxr1(struct kvm_vcpu *vcpu,
vcpu              874 virt/kvm/arm/hyp/vgic-v3-sr.c 	__vgic_v3_read_apxrn(vcpu, rt, 1);
vcpu              877 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_read_apxr2(struct kvm_vcpu *vcpu,
vcpu              880 virt/kvm/arm/hyp/vgic-v3-sr.c 	__vgic_v3_read_apxrn(vcpu, rt, 2);
vcpu              883 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_read_apxr3(struct kvm_vcpu *vcpu,
vcpu              886 virt/kvm/arm/hyp/vgic-v3-sr.c 	__vgic_v3_read_apxrn(vcpu, rt, 3);
vcpu              889 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_write_apxr0(struct kvm_vcpu *vcpu,
vcpu              892 virt/kvm/arm/hyp/vgic-v3-sr.c 	__vgic_v3_write_apxrn(vcpu, rt, 0);
vcpu              895 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_write_apxr1(struct kvm_vcpu *vcpu,
vcpu              898 virt/kvm/arm/hyp/vgic-v3-sr.c 	__vgic_v3_write_apxrn(vcpu, rt, 1);
vcpu              901 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_write_apxr2(struct kvm_vcpu *vcpu,
vcpu              904 virt/kvm/arm/hyp/vgic-v3-sr.c 	__vgic_v3_write_apxrn(vcpu, rt, 2);
vcpu              907 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_write_apxr3(struct kvm_vcpu *vcpu,
vcpu              910 virt/kvm/arm/hyp/vgic-v3-sr.c 	__vgic_v3_write_apxrn(vcpu, rt, 3);
vcpu              913 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_read_hppir(struct kvm_vcpu *vcpu,
vcpu              919 virt/kvm/arm/hyp/vgic-v3-sr.c 	grp = __vgic_v3_get_group(vcpu);
vcpu              921 virt/kvm/arm/hyp/vgic-v3-sr.c 	lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val);
vcpu              930 virt/kvm/arm/hyp/vgic-v3-sr.c 	vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK);
vcpu              933 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_read_pmr(struct kvm_vcpu *vcpu,
vcpu              938 virt/kvm/arm/hyp/vgic-v3-sr.c 	vcpu_set_reg(vcpu, rt, vmcr);
vcpu              941 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_write_pmr(struct kvm_vcpu *vcpu,
vcpu              944 virt/kvm/arm/hyp/vgic-v3-sr.c 	u32 val = vcpu_get_reg(vcpu, rt);
vcpu              954 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_read_rpr(struct kvm_vcpu *vcpu,
vcpu              958 virt/kvm/arm/hyp/vgic-v3-sr.c 	vcpu_set_reg(vcpu, rt, val);
vcpu              961 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_read_ctlr(struct kvm_vcpu *vcpu,
vcpu              980 virt/kvm/arm/hyp/vgic-v3-sr.c 	vcpu_set_reg(vcpu, rt, val);
vcpu              983 virt/kvm/arm/hyp/vgic-v3-sr.c static void __hyp_text __vgic_v3_write_ctlr(struct kvm_vcpu *vcpu,
vcpu              986 virt/kvm/arm/hyp/vgic-v3-sr.c 	u32 val = vcpu_get_reg(vcpu, rt);
vcpu             1001 virt/kvm/arm/hyp/vgic-v3-sr.c int __hyp_text __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
vcpu             1010 virt/kvm/arm/hyp/vgic-v3-sr.c 	esr = kvm_vcpu_get_hsr(vcpu);
vcpu             1011 virt/kvm/arm/hyp/vgic-v3-sr.c 	if (vcpu_mode_is_32bit(vcpu)) {
vcpu             1012 virt/kvm/arm/hyp/vgic-v3-sr.c 		if (!kvm_condition_valid(vcpu)) {
vcpu             1013 virt/kvm/arm/hyp/vgic-v3-sr.c 			__kvm_skip_instr(vcpu);
vcpu             1122 virt/kvm/arm/hyp/vgic-v3-sr.c 	rt = kvm_vcpu_sys_get_rt(vcpu);
vcpu             1123 virt/kvm/arm/hyp/vgic-v3-sr.c 	fn(vcpu, vmcr, rt);
vcpu             1125 virt/kvm/arm/hyp/vgic-v3-sr.c 	__kvm_skip_instr(vcpu);
vcpu               83 virt/kvm/arm/mmio.c int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu               90 virt/kvm/arm/mmio.c 	if (unlikely(!vcpu->mmio_needed))
vcpu               93 virt/kvm/arm/mmio.c 	vcpu->mmio_needed = 0;
vcpu              102 virt/kvm/arm/mmio.c 		if (vcpu->arch.mmio_decode.sign_extend &&
vcpu              108 virt/kvm/arm/mmio.c 		if (!vcpu->arch.mmio_decode.sixty_four)
vcpu              113 virt/kvm/arm/mmio.c 		data = vcpu_data_host_to_guest(vcpu, data, len);
vcpu              114 virt/kvm/arm/mmio.c 		vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
vcpu              121 virt/kvm/arm/mmio.c 	kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
vcpu              126 virt/kvm/arm/mmio.c static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len)
vcpu              133 virt/kvm/arm/mmio.c 	if (kvm_vcpu_dabt_iss1tw(vcpu)) {
vcpu              135 virt/kvm/arm/mmio.c 		kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
vcpu              139 virt/kvm/arm/mmio.c 	access_size = kvm_vcpu_dabt_get_as(vcpu);
vcpu              143 virt/kvm/arm/mmio.c 	*is_write = kvm_vcpu_dabt_iswrite(vcpu);
vcpu              144 virt/kvm/arm/mmio.c 	sign_extend = kvm_vcpu_dabt_issext(vcpu);
vcpu              145 virt/kvm/arm/mmio.c 	sixty_four = kvm_vcpu_dabt_issf(vcpu);
vcpu              146 virt/kvm/arm/mmio.c 	rt = kvm_vcpu_dabt_get_rd(vcpu);
vcpu              149 virt/kvm/arm/mmio.c 	vcpu->arch.mmio_decode.sign_extend = sign_extend;
vcpu              150 virt/kvm/arm/mmio.c 	vcpu->arch.mmio_decode.rt = rt;
vcpu              151 virt/kvm/arm/mmio.c 	vcpu->arch.mmio_decode.sixty_four = sixty_four;
vcpu              156 virt/kvm/arm/mmio.c int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
vcpu              171 virt/kvm/arm/mmio.c 	if (kvm_vcpu_dabt_isvalid(vcpu)) {
vcpu              172 virt/kvm/arm/mmio.c 		ret = decode_hsr(vcpu, &is_write, &len);
vcpu              180 virt/kvm/arm/mmio.c 	rt = vcpu->arch.mmio_decode.rt;
vcpu              183 virt/kvm/arm/mmio.c 		data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
vcpu              189 virt/kvm/arm/mmio.c 		ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
vcpu              195 virt/kvm/arm/mmio.c 		ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
vcpu              203 virt/kvm/arm/mmio.c 	vcpu->mmio_needed	= 1;
vcpu              209 virt/kvm/arm/mmio.c 		vcpu->stat.mmio_exit_kernel++;
vcpu              210 virt/kvm/arm/mmio.c 		kvm_handle_mmio_return(vcpu, run);
vcpu              216 virt/kvm/arm/mmio.c 	vcpu->stat.mmio_exit_user++;
vcpu             1669 virt/kvm/arm/mmu.c static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
vcpu             1678 virt/kvm/arm/mmu.c 	struct kvm *kvm = vcpu->kvm;
vcpu             1679 virt/kvm/arm/mmu.c 	struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
vcpu             1686 virt/kvm/arm/mmu.c 	write_fault = kvm_is_write_fault(vcpu);
vcpu             1687 virt/kvm/arm/mmu.c 	exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
vcpu             1730 virt/kvm/arm/mmu.c 	mmu_seq = vcpu->kvm->mmu_notifier_seq;
vcpu             1861 virt/kvm/arm/mmu.c static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
vcpu             1871 virt/kvm/arm/mmu.c 	spin_lock(&vcpu->kvm->mmu_lock);
vcpu             1873 virt/kvm/arm/mmu.c 	if (!stage2_get_leaf_entry(vcpu->kvm, fault_ipa, &pud, &pmd, &pte))
vcpu             1891 virt/kvm/arm/mmu.c 	spin_unlock(&vcpu->kvm->mmu_lock);
vcpu             1908 virt/kvm/arm/mmu.c int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu             1918 virt/kvm/arm/mmu.c 	fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
vcpu             1920 virt/kvm/arm/mmu.c 	fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
vcpu             1921 virt/kvm/arm/mmu.c 	is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
vcpu             1924 virt/kvm/arm/mmu.c 	if (kvm_vcpu_dabt_isextabt(vcpu)) {
vcpu             1929 virt/kvm/arm/mmu.c 		if (!kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_hsr(vcpu)))
vcpu             1933 virt/kvm/arm/mmu.c 			kvm_inject_vabt(vcpu);
vcpu             1938 virt/kvm/arm/mmu.c 	trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
vcpu             1939 virt/kvm/arm/mmu.c 			      kvm_vcpu_get_hfar(vcpu), fault_ipa);
vcpu             1945 virt/kvm/arm/mmu.c 			kvm_vcpu_trap_get_class(vcpu),
vcpu             1946 virt/kvm/arm/mmu.c 			(unsigned long)kvm_vcpu_trap_get_fault(vcpu),
vcpu             1947 virt/kvm/arm/mmu.c 			(unsigned long)kvm_vcpu_get_hsr(vcpu));
vcpu             1951 virt/kvm/arm/mmu.c 	idx = srcu_read_lock(&vcpu->kvm->srcu);
vcpu             1954 virt/kvm/arm/mmu.c 	memslot = gfn_to_memslot(vcpu->kvm, gfn);
vcpu             1956 virt/kvm/arm/mmu.c 	write_fault = kvm_is_write_fault(vcpu);
vcpu             1974 virt/kvm/arm/mmu.c 		if (kvm_vcpu_dabt_is_cm(vcpu)) {
vcpu             1975 virt/kvm/arm/mmu.c 			kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
vcpu             1986 virt/kvm/arm/mmu.c 		fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
vcpu             1987 virt/kvm/arm/mmu.c 		ret = io_mem_abort(vcpu, run, fault_ipa);
vcpu             1992 virt/kvm/arm/mmu.c 	VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm));
vcpu             1995 virt/kvm/arm/mmu.c 		handle_access_fault(vcpu, fault_ipa);
vcpu             2000 virt/kvm/arm/mmu.c 	ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
vcpu             2005 virt/kvm/arm/mmu.c 		kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
vcpu             2009 virt/kvm/arm/mmu.c 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
vcpu             2154 virt/kvm/arm/mmu.c void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
vcpu             2156 virt/kvm/arm/mmu.c 	mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
vcpu             2429 virt/kvm/arm/mmu.c void kvm_set_way_flush(struct kvm_vcpu *vcpu)
vcpu             2431 virt/kvm/arm/mmu.c 	unsigned long hcr = *vcpu_hcr(vcpu);
vcpu             2443 virt/kvm/arm/mmu.c 		trace_kvm_set_way_flush(*vcpu_pc(vcpu),
vcpu             2444 virt/kvm/arm/mmu.c 					vcpu_has_cache_enabled(vcpu));
vcpu             2445 virt/kvm/arm/mmu.c 		stage2_flush_vm(vcpu->kvm);
vcpu             2446 virt/kvm/arm/mmu.c 		*vcpu_hcr(vcpu) = hcr | HCR_TVM;
vcpu             2450 virt/kvm/arm/mmu.c void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
vcpu             2452 virt/kvm/arm/mmu.c 	bool now_enabled = vcpu_has_cache_enabled(vcpu);
vcpu             2460 virt/kvm/arm/mmu.c 		stage2_flush_vm(vcpu->kvm);
vcpu             2464 virt/kvm/arm/mmu.c 		*vcpu_hcr(vcpu) &= ~HCR_TVM;
vcpu             2466 virt/kvm/arm/mmu.c 	trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);
vcpu               21 virt/kvm/arm/perf.c 	struct kvm_vcpu *vcpu;
vcpu               23 virt/kvm/arm/perf.c 	vcpu = kvm_arm_get_running_vcpu();
vcpu               25 virt/kvm/arm/perf.c 	if (vcpu)
vcpu               26 virt/kvm/arm/perf.c 		return !vcpu_mode_priv(vcpu);
vcpu               33 virt/kvm/arm/perf.c 	struct kvm_vcpu *vcpu;
vcpu               35 virt/kvm/arm/perf.c 	vcpu = kvm_arm_get_running_vcpu();
vcpu               37 virt/kvm/arm/perf.c 	if (vcpu)
vcpu               38 virt/kvm/arm/perf.c 		return *vcpu_pc(vcpu);
vcpu               17 virt/kvm/arm/pmu.c static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
vcpu               26 virt/kvm/arm/pmu.c static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx)
vcpu               29 virt/kvm/arm/pmu.c 		__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC);
vcpu               49 virt/kvm/arm/pmu.c 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
vcpu               51 virt/kvm/arm/pmu.c 	return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
vcpu               84 virt/kvm/arm/pmu.c static bool kvm_pmu_idx_has_chain_evtype(struct kvm_vcpu *vcpu, u64 select_idx)
vcpu               94 virt/kvm/arm/pmu.c 	eventsel = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_EVENT;
vcpu              104 virt/kvm/arm/pmu.c static u64 kvm_pmu_get_pair_counter_value(struct kvm_vcpu *vcpu,
vcpu              113 virt/kvm/arm/pmu.c 		counter = __vcpu_sys_reg(vcpu, reg);
vcpu              114 virt/kvm/arm/pmu.c 		counter_high = __vcpu_sys_reg(vcpu, reg + 1);
vcpu              120 virt/kvm/arm/pmu.c 		counter = __vcpu_sys_reg(vcpu, reg);
vcpu              139 virt/kvm/arm/pmu.c u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
vcpu              142 virt/kvm/arm/pmu.c 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
vcpu              145 virt/kvm/arm/pmu.c 	counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
vcpu              162 virt/kvm/arm/pmu.c void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
vcpu              168 virt/kvm/arm/pmu.c 	__vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
vcpu              171 virt/kvm/arm/pmu.c 	kvm_pmu_create_perf_event(vcpu, select_idx);
vcpu              194 virt/kvm/arm/pmu.c static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
vcpu              202 virt/kvm/arm/pmu.c 	counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
vcpu              212 virt/kvm/arm/pmu.c 	__vcpu_sys_reg(vcpu, reg) = val;
vcpu              215 virt/kvm/arm/pmu.c 		__vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);
vcpu              225 virt/kvm/arm/pmu.c void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
vcpu              228 virt/kvm/arm/pmu.c 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
vcpu              239 virt/kvm/arm/pmu.c void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
vcpu              242 virt/kvm/arm/pmu.c 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
vcpu              245 virt/kvm/arm/pmu.c 		kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
vcpu              247 virt/kvm/arm/pmu.c 	bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
vcpu              255 virt/kvm/arm/pmu.c void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
vcpu              258 virt/kvm/arm/pmu.c 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
vcpu              264 virt/kvm/arm/pmu.c u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
vcpu              266 virt/kvm/arm/pmu.c 	u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
vcpu              282 virt/kvm/arm/pmu.c void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
vcpu              285 virt/kvm/arm/pmu.c 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
vcpu              288 virt/kvm/arm/pmu.c 	if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
vcpu              303 virt/kvm/arm/pmu.c 			kvm_pmu_create_perf_event(vcpu, i);
vcpu              323 virt/kvm/arm/pmu.c void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
vcpu              326 virt/kvm/arm/pmu.c 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
vcpu              344 virt/kvm/arm/pmu.c 			kvm_pmu_create_perf_event(vcpu, i);
vcpu              354 virt/kvm/arm/pmu.c static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
vcpu              358 virt/kvm/arm/pmu.c 	if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
vcpu              359 virt/kvm/arm/pmu.c 		reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
vcpu              360 virt/kvm/arm/pmu.c 		reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
vcpu              361 virt/kvm/arm/pmu.c 		reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
vcpu              362 virt/kvm/arm/pmu.c 		reg &= kvm_pmu_valid_counter_mask(vcpu);
vcpu              368 virt/kvm/arm/pmu.c static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
vcpu              370 virt/kvm/arm/pmu.c 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
vcpu              373 virt/kvm/arm/pmu.c 	if (!kvm_arm_pmu_v3_ready(vcpu))
vcpu              376 virt/kvm/arm/pmu.c 	overflow = !!kvm_pmu_overflow_status(vcpu);
vcpu              382 virt/kvm/arm/pmu.c 	if (likely(irqchip_in_kernel(vcpu->kvm))) {
vcpu              383 virt/kvm/arm/pmu.c 		int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
vcpu              389 virt/kvm/arm/pmu.c bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
vcpu              391 virt/kvm/arm/pmu.c 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
vcpu              392 virt/kvm/arm/pmu.c 	struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
vcpu              395 virt/kvm/arm/pmu.c 	if (likely(irqchip_in_kernel(vcpu->kvm)))
vcpu              404 virt/kvm/arm/pmu.c void kvm_pmu_update_run(struct kvm_vcpu *vcpu)
vcpu              406 virt/kvm/arm/pmu.c 	struct kvm_sync_regs *regs = &vcpu->run->s.regs;
vcpu              410 virt/kvm/arm/pmu.c 	if (vcpu->arch.pmu.irq_level)
vcpu              421 virt/kvm/arm/pmu.c void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
vcpu              423 virt/kvm/arm/pmu.c 	kvm_pmu_update_state(vcpu);
vcpu              433 virt/kvm/arm/pmu.c void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
vcpu              435 virt/kvm/arm/pmu.c 	kvm_pmu_update_state(vcpu);
vcpu              447 virt/kvm/arm/pmu.c 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
vcpu              459 virt/kvm/arm/pmu.c 	if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
vcpu              466 virt/kvm/arm/pmu.c 	__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
vcpu              468 virt/kvm/arm/pmu.c 	if (kvm_pmu_overflow_status(vcpu)) {
vcpu              469 virt/kvm/arm/pmu.c 		kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
vcpu              470 virt/kvm/arm/pmu.c 		kvm_vcpu_kick(vcpu);
vcpu              481 virt/kvm/arm/pmu.c void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
vcpu              483 virt/kvm/arm/pmu.c 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
vcpu              486 virt/kvm/arm/pmu.c 	if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
vcpu              490 virt/kvm/arm/pmu.c 	val &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
vcpu              499 virt/kvm/arm/pmu.c 		type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i);
vcpu              505 virt/kvm/arm/pmu.c 		reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
vcpu              507 virt/kvm/arm/pmu.c 		__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
vcpu              514 virt/kvm/arm/pmu.c 			reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) + 1;
vcpu              516 virt/kvm/arm/pmu.c 			__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) = reg;
vcpu              518 virt/kvm/arm/pmu.c 				__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i + 1);
vcpu              521 virt/kvm/arm/pmu.c 			__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
vcpu              531 virt/kvm/arm/pmu.c void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
vcpu              536 virt/kvm/arm/pmu.c 	mask = kvm_pmu_valid_counter_mask(vcpu);
vcpu              538 virt/kvm/arm/pmu.c 		kvm_pmu_enable_counter_mask(vcpu,
vcpu              539 virt/kvm/arm/pmu.c 		       __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask);
vcpu              541 virt/kvm/arm/pmu.c 		kvm_pmu_disable_counter_mask(vcpu, mask);
vcpu              545 virt/kvm/arm/pmu.c 		kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
vcpu              549 virt/kvm/arm/pmu.c 			kvm_pmu_set_counter_value(vcpu, i, 0);
vcpu              553 virt/kvm/arm/pmu.c static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
vcpu              555 virt/kvm/arm/pmu.c 	return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
vcpu              556 virt/kvm/arm/pmu.c 	       (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx));
vcpu              564 virt/kvm/arm/pmu.c static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
vcpu              566 virt/kvm/arm/pmu.c 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
vcpu              581 virt/kvm/arm/pmu.c 	data = __vcpu_sys_reg(vcpu, reg);
vcpu              583 virt/kvm/arm/pmu.c 	kvm_pmu_stop_counter(vcpu, pmc);
vcpu              595 virt/kvm/arm/pmu.c 	attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, pmc->idx);
vcpu              603 virt/kvm/arm/pmu.c 	counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
vcpu              605 virt/kvm/arm/pmu.c 	if (kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx)) {
vcpu              612 virt/kvm/arm/pmu.c 		if (kvm_pmu_counter_is_enabled(vcpu, pmc->idx + 1))
vcpu              620 virt/kvm/arm/pmu.c 		if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
vcpu              646 virt/kvm/arm/pmu.c static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx)
vcpu              648 virt/kvm/arm/pmu.c 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
vcpu              651 virt/kvm/arm/pmu.c 	if (kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx)) {
vcpu              657 virt/kvm/arm/pmu.c 			kvm_pmu_stop_counter(vcpu, pmc);
vcpu              659 virt/kvm/arm/pmu.c 		set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
vcpu              661 virt/kvm/arm/pmu.c 		clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
vcpu              675 virt/kvm/arm/pmu.c void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
vcpu              683 virt/kvm/arm/pmu.c 	__vcpu_sys_reg(vcpu, reg) = event_type;
vcpu              685 virt/kvm/arm/pmu.c 	kvm_pmu_update_pmc_chained(vcpu, select_idx);
vcpu              686 virt/kvm/arm/pmu.c 	kvm_pmu_create_perf_event(vcpu, select_idx);
vcpu              699 virt/kvm/arm/pmu.c int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
vcpu              701 virt/kvm/arm/pmu.c 	if (!vcpu->arch.pmu.created)
vcpu              709 virt/kvm/arm/pmu.c 	if (irqchip_in_kernel(vcpu->kvm)) {
vcpu              710 virt/kvm/arm/pmu.c 		int irq = vcpu->arch.pmu.irq_num;
vcpu              711 virt/kvm/arm/pmu.c 		if (!kvm_arm_pmu_irq_initialized(vcpu))
vcpu              720 virt/kvm/arm/pmu.c 		if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq))
vcpu              722 virt/kvm/arm/pmu.c 	} else if (kvm_arm_pmu_irq_initialized(vcpu)) {
vcpu              726 virt/kvm/arm/pmu.c 	kvm_pmu_vcpu_reset(vcpu);
vcpu              727 virt/kvm/arm/pmu.c 	vcpu->arch.pmu.ready = true;
vcpu              732 virt/kvm/arm/pmu.c static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
vcpu              737 virt/kvm/arm/pmu.c 	if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
vcpu              740 virt/kvm/arm/pmu.c 	if (vcpu->arch.pmu.created)
vcpu              743 virt/kvm/arm/pmu.c 	if (irqchip_in_kernel(vcpu->kvm)) {
vcpu              751 virt/kvm/arm/pmu.c 		if (!vgic_initialized(vcpu->kvm))
vcpu              754 virt/kvm/arm/pmu.c 		if (!kvm_arm_pmu_irq_initialized(vcpu))
vcpu              757 virt/kvm/arm/pmu.c 		ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num,
vcpu              758 virt/kvm/arm/pmu.c 					 &vcpu->arch.pmu);
vcpu              763 virt/kvm/arm/pmu.c 	vcpu->arch.pmu.created = true;
vcpu              775 virt/kvm/arm/pmu.c 	struct kvm_vcpu *vcpu;
vcpu              777 virt/kvm/arm/pmu.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu              778 virt/kvm/arm/pmu.c 		if (!kvm_arm_pmu_irq_initialized(vcpu))
vcpu              782 virt/kvm/arm/pmu.c 			if (vcpu->arch.pmu.irq_num != irq)
vcpu              785 virt/kvm/arm/pmu.c 			if (vcpu->arch.pmu.irq_num == irq)
vcpu              793 virt/kvm/arm/pmu.c int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
vcpu              800 virt/kvm/arm/pmu.c 		if (!irqchip_in_kernel(vcpu->kvm))
vcpu              803 virt/kvm/arm/pmu.c 		if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
vcpu              813 virt/kvm/arm/pmu.c 		if (!pmu_irq_is_valid(vcpu->kvm, irq))
vcpu              816 virt/kvm/arm/pmu.c 		if (kvm_arm_pmu_irq_initialized(vcpu))
vcpu              820 virt/kvm/arm/pmu.c 		vcpu->arch.pmu.irq_num = irq;
vcpu              824 virt/kvm/arm/pmu.c 		return kvm_arm_pmu_v3_init(vcpu);
vcpu              830 virt/kvm/arm/pmu.c int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
vcpu              837 virt/kvm/arm/pmu.c 		if (!irqchip_in_kernel(vcpu->kvm))
vcpu              840 virt/kvm/arm/pmu.c 		if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
vcpu              843 virt/kvm/arm/pmu.c 		if (!kvm_arm_pmu_irq_initialized(vcpu))
vcpu              846 virt/kvm/arm/pmu.c 		irq = vcpu->arch.pmu.irq_num;
vcpu              854 virt/kvm/arm/pmu.c int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
vcpu              860 virt/kvm/arm/pmu.c 		    test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
vcpu               26 virt/kvm/arm/psci.c static u32 smccc_get_function(struct kvm_vcpu *vcpu)
vcpu               28 virt/kvm/arm/psci.c 	return vcpu_get_reg(vcpu, 0);
vcpu               31 virt/kvm/arm/psci.c static unsigned long smccc_get_arg1(struct kvm_vcpu *vcpu)
vcpu               33 virt/kvm/arm/psci.c 	return vcpu_get_reg(vcpu, 1);
vcpu               36 virt/kvm/arm/psci.c static unsigned long smccc_get_arg2(struct kvm_vcpu *vcpu)
vcpu               38 virt/kvm/arm/psci.c 	return vcpu_get_reg(vcpu, 2);
vcpu               41 virt/kvm/arm/psci.c static unsigned long smccc_get_arg3(struct kvm_vcpu *vcpu)
vcpu               43 virt/kvm/arm/psci.c 	return vcpu_get_reg(vcpu, 3);
vcpu               46 virt/kvm/arm/psci.c static void smccc_set_retval(struct kvm_vcpu *vcpu,
vcpu               52 virt/kvm/arm/psci.c 	vcpu_set_reg(vcpu, 0, a0);
vcpu               53 virt/kvm/arm/psci.c 	vcpu_set_reg(vcpu, 1, a1);
vcpu               54 virt/kvm/arm/psci.c 	vcpu_set_reg(vcpu, 2, a2);
vcpu               55 virt/kvm/arm/psci.c 	vcpu_set_reg(vcpu, 3, a3);
vcpu               66 virt/kvm/arm/psci.c static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
vcpu               81 virt/kvm/arm/psci.c 	kvm_vcpu_block(vcpu);
vcpu               82 virt/kvm/arm/psci.c 	kvm_clear_request(KVM_REQ_UNHALT, vcpu);
vcpu               87 virt/kvm/arm/psci.c static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
vcpu               89 virt/kvm/arm/psci.c 	vcpu->arch.power_off = true;
vcpu               90 virt/kvm/arm/psci.c 	kvm_make_request(KVM_REQ_SLEEP, vcpu);
vcpu               91 virt/kvm/arm/psci.c 	kvm_vcpu_kick(vcpu);
vcpu               98 virt/kvm/arm/psci.c 	struct kvm_vcpu *vcpu = NULL;
vcpu              105 virt/kvm/arm/psci.c 	vcpu = kvm_mpidr_to_vcpu(kvm, cpu_id);
vcpu              111 virt/kvm/arm/psci.c 	if (!vcpu)
vcpu              113 virt/kvm/arm/psci.c 	if (!vcpu->arch.power_off) {
vcpu              120 virt/kvm/arm/psci.c 	reset_state = &vcpu->arch.reset_state;
vcpu              134 virt/kvm/arm/psci.c 	kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
vcpu              142 virt/kvm/arm/psci.c 	vcpu->arch.power_off = false;
vcpu              143 virt/kvm/arm/psci.c 	kvm_vcpu_wake_up(vcpu);
vcpu              148 virt/kvm/arm/psci.c static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
vcpu              155 virt/kvm/arm/psci.c 	struct kvm *kvm = vcpu->kvm;
vcpu              158 virt/kvm/arm/psci.c 	target_affinity = smccc_get_arg1(vcpu);
vcpu              159 virt/kvm/arm/psci.c 	lowest_affinity_level = smccc_get_arg2(vcpu);
vcpu              188 virt/kvm/arm/psci.c static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
vcpu              202 virt/kvm/arm/psci.c 	kvm_for_each_vcpu(i, tmp, vcpu->kvm)
vcpu              204 virt/kvm/arm/psci.c 	kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
vcpu              206 virt/kvm/arm/psci.c 	memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
vcpu              207 virt/kvm/arm/psci.c 	vcpu->run->system_event.type = type;
vcpu              208 virt/kvm/arm/psci.c 	vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
vcpu              211 virt/kvm/arm/psci.c static void kvm_psci_system_off(struct kvm_vcpu *vcpu)
vcpu              213 virt/kvm/arm/psci.c 	kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN);
vcpu              216 virt/kvm/arm/psci.c static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
vcpu              218 virt/kvm/arm/psci.c 	kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
vcpu              221 virt/kvm/arm/psci.c static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
vcpu              223 virt/kvm/arm/psci.c 	struct kvm *kvm = vcpu->kvm;
vcpu              224 virt/kvm/arm/psci.c 	u32 psci_fn = smccc_get_function(vcpu);
vcpu              238 virt/kvm/arm/psci.c 		val = kvm_psci_vcpu_suspend(vcpu);
vcpu              241 virt/kvm/arm/psci.c 		kvm_psci_vcpu_off(vcpu);
vcpu              247 virt/kvm/arm/psci.c 		val = kvm_psci_vcpu_on(vcpu);
vcpu              252 virt/kvm/arm/psci.c 		val = kvm_psci_vcpu_affinity_info(vcpu);
vcpu              263 virt/kvm/arm/psci.c 		kvm_psci_system_off(vcpu);
vcpu              278 virt/kvm/arm/psci.c 		kvm_psci_system_reset(vcpu);
vcpu              291 virt/kvm/arm/psci.c 	smccc_set_retval(vcpu, val, 0, 0, 0);
vcpu              295 virt/kvm/arm/psci.c static int kvm_psci_1_0_call(struct kvm_vcpu *vcpu)
vcpu              297 virt/kvm/arm/psci.c 	u32 psci_fn = smccc_get_function(vcpu);
vcpu              307 virt/kvm/arm/psci.c 		feature = smccc_get_arg1(vcpu);
vcpu              330 virt/kvm/arm/psci.c 		return kvm_psci_0_2_call(vcpu);
vcpu              333 virt/kvm/arm/psci.c 	smccc_set_retval(vcpu, val, 0, 0, 0);
vcpu              337 virt/kvm/arm/psci.c static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
vcpu              339 virt/kvm/arm/psci.c 	struct kvm *kvm = vcpu->kvm;
vcpu              340 virt/kvm/arm/psci.c 	u32 psci_fn = smccc_get_function(vcpu);
vcpu              345 virt/kvm/arm/psci.c 		kvm_psci_vcpu_off(vcpu);
vcpu              350 virt/kvm/arm/psci.c 		val = kvm_psci_vcpu_on(vcpu);
vcpu              358 virt/kvm/arm/psci.c 	smccc_set_retval(vcpu, val, 0, 0, 0);
vcpu              376 virt/kvm/arm/psci.c static int kvm_psci_call(struct kvm_vcpu *vcpu)
vcpu              378 virt/kvm/arm/psci.c 	switch (kvm_psci_version(vcpu, vcpu->kvm)) {
vcpu              380 virt/kvm/arm/psci.c 		return kvm_psci_1_0_call(vcpu);
vcpu              382 virt/kvm/arm/psci.c 		return kvm_psci_0_2_call(vcpu);
vcpu              384 virt/kvm/arm/psci.c 		return kvm_psci_0_1_call(vcpu);
vcpu              390 virt/kvm/arm/psci.c int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
vcpu              392 virt/kvm/arm/psci.c 	u32 func_id = smccc_get_function(vcpu);
vcpu              401 virt/kvm/arm/psci.c 		feature = smccc_get_arg1(vcpu);
vcpu              432 virt/kvm/arm/psci.c 		return kvm_psci_call(vcpu);
vcpu              435 virt/kvm/arm/psci.c 	smccc_set_retval(vcpu, val, 0, 0, 0);
vcpu              439 virt/kvm/arm/psci.c int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
vcpu              444 virt/kvm/arm/psci.c int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
vcpu              496 virt/kvm/arm/psci.c int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
vcpu              503 virt/kvm/arm/psci.c 		val = kvm_psci_version(vcpu, vcpu->kvm);
vcpu              512 virt/kvm/arm/psci.c 		    kvm_arm_get_vcpu_workaround_2_flag(vcpu))
vcpu              525 virt/kvm/arm/psci.c int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
vcpu              539 virt/kvm/arm/psci.c 		wants_02 = test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features);
vcpu              545 virt/kvm/arm/psci.c 			vcpu->kvm->arch.psci_version = val;
vcpu              551 virt/kvm/arm/psci.c 			vcpu->kvm->arch.psci_version = val;
vcpu              591 virt/kvm/arm/psci.c 			kvm_arm_set_vcpu_workaround_2_flag(vcpu,
vcpu              595 virt/kvm/arm/psci.c 			kvm_arm_set_vcpu_workaround_2_flag(vcpu, true);
vcpu              162 virt/kvm/arm/vgic/vgic-debug.c 			 struct kvm_vcpu *vcpu)
vcpu              167 virt/kvm/arm/vgic/vgic-debug.c 	if (vcpu) {
vcpu              169 virt/kvm/arm/vgic/vgic-debug.c 		id = vcpu->vcpu_id;
vcpu              178 virt/kvm/arm/vgic/vgic-debug.c 			    struct kvm_vcpu *vcpu)
vcpu              191 virt/kvm/arm/vgic/vgic-debug.c 		print_header(s, irq, vcpu);
vcpu              215 virt/kvm/arm/vgic/vgic-debug.c 			(irq->vcpu) ? irq->vcpu->vcpu_id : -1);
vcpu              223 virt/kvm/arm/vgic/vgic-debug.c 	struct kvm_vcpu *vcpu = NULL;
vcpu              235 virt/kvm/arm/vgic/vgic-debug.c 		vcpu = kvm_get_vcpu(kvm, iter->vcpu_id);
vcpu              237 virt/kvm/arm/vgic/vgic-debug.c 	irq = vgic_get_irq(kvm, vcpu, iter->intid);
vcpu              244 virt/kvm/arm/vgic/vgic-debug.c 	print_irq_state(s, irq, vcpu);
vcpu               74 virt/kvm/arm/vgic/vgic-init.c 	struct kvm_vcpu *vcpu;
vcpu               95 virt/kvm/arm/vgic/vgic-init.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu               96 virt/kvm/arm/vgic/vgic-init.c 		if (!mutex_trylock(&vcpu->mutex))
vcpu              101 virt/kvm/arm/vgic/vgic-init.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu              102 virt/kvm/arm/vgic/vgic-init.c 		if (vcpu->arch.has_run_once)
vcpu              129 virt/kvm/arm/vgic/vgic-init.c 		vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
vcpu              130 virt/kvm/arm/vgic/vgic-init.c 		mutex_unlock(&vcpu->mutex);
vcpu              166 virt/kvm/arm/vgic/vgic-init.c 		irq->vcpu = NULL;
vcpu              195 virt/kvm/arm/vgic/vgic-init.c int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
vcpu              197 virt/kvm/arm/vgic/vgic-init.c 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
vcpu              198 virt/kvm/arm/vgic/vgic-init.c 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
vcpu              217 virt/kvm/arm/vgic/vgic-init.c 		irq->vcpu = NULL;
vcpu              218 virt/kvm/arm/vgic/vgic-init.c 		irq->target_vcpu = vcpu;
vcpu              230 virt/kvm/arm/vgic/vgic-init.c 	if (!irqchip_in_kernel(vcpu->kvm))
vcpu              238 virt/kvm/arm/vgic/vgic-init.c 		mutex_lock(&vcpu->kvm->lock);
vcpu              239 virt/kvm/arm/vgic/vgic-init.c 		ret = vgic_register_redist_iodev(vcpu);
vcpu              240 virt/kvm/arm/vgic/vgic-init.c 		mutex_unlock(&vcpu->kvm->lock);
vcpu              245 virt/kvm/arm/vgic/vgic-init.c static void kvm_vgic_vcpu_enable(struct kvm_vcpu *vcpu)
vcpu              248 virt/kvm/arm/vgic/vgic-init.c 		vgic_v2_enable(vcpu);
vcpu              250 virt/kvm/arm/vgic/vgic-init.c 		vgic_v3_enable(vcpu);
vcpu              266 virt/kvm/arm/vgic/vgic-init.c 	struct kvm_vcpu *vcpu;
vcpu              285 virt/kvm/arm/vgic/vgic-init.c 	kvm_for_each_vcpu(idx, vcpu, kvm) {
vcpu              286 virt/kvm/arm/vgic/vgic-init.c 		struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
vcpu              293 virt/kvm/arm/vgic/vgic-init.c 				irq->mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
vcpu              313 virt/kvm/arm/vgic/vgic-init.c 	kvm_for_each_vcpu(i, vcpu, kvm)
vcpu              314 virt/kvm/arm/vgic/vgic-init.c 		kvm_vgic_vcpu_enable(vcpu);
vcpu              356 virt/kvm/arm/vgic/vgic-init.c void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
vcpu              358 virt/kvm/arm/vgic/vgic-init.c 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
vcpu              366 virt/kvm/arm/vgic/vgic-init.c 	struct kvm_vcpu *vcpu;
vcpu              373 virt/kvm/arm/vgic/vgic-init.c 	kvm_for_each_vcpu(i, vcpu, kvm)
vcpu              374 virt/kvm/arm/vgic/vgic-init.c 		kvm_vgic_vcpu_destroy(vcpu);
vcpu               40 virt/kvm/arm/vgic/vgic-its.c 				     struct kvm_vcpu *vcpu)
vcpu               62 virt/kvm/arm/vgic/vgic-its.c 	irq->target_vcpu = vcpu;
vcpu              312 virt/kvm/arm/vgic/vgic-its.c int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
vcpu              337 virt/kvm/arm/vgic/vgic-its.c 		if (vcpu && irq->target_vcpu != vcpu)
vcpu              347 virt/kvm/arm/vgic/vgic-its.c static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
vcpu              353 virt/kvm/arm/vgic/vgic-its.c 	irq->target_vcpu = vcpu;
vcpu              363 virt/kvm/arm/vgic/vgic-its.c 		map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
vcpu              379 virt/kvm/arm/vgic/vgic-its.c 	struct kvm_vcpu *vcpu;
vcpu              384 virt/kvm/arm/vgic/vgic-its.c 	vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
vcpu              385 virt/kvm/arm/vgic/vgic-its.c 	update_affinity(ite->irq, vcpu);
vcpu              418 virt/kvm/arm/vgic/vgic-its.c static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
vcpu              420 virt/kvm/arm/vgic/vgic-its.c 	gpa_t pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
vcpu              429 virt/kvm/arm/vgic/vgic-its.c 	nr_irqs = vgic_copy_lpi_list(vcpu->kvm, vcpu, &intids);
vcpu              444 virt/kvm/arm/vgic/vgic-its.c 			ret = kvm_read_guest_lock(vcpu->kvm,
vcpu              454 virt/kvm/arm/vgic/vgic-its.c 		irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
vcpu              457 virt/kvm/arm/vgic/vgic-its.c 		vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
vcpu              458 virt/kvm/arm/vgic/vgic-its.c 		vgic_put_irq(vcpu->kvm, irq);
vcpu              662 virt/kvm/arm/vgic/vgic-its.c 	struct kvm_vcpu *vcpu;
vcpu              672 virt/kvm/arm/vgic/vgic-its.c 	vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
vcpu              673 virt/kvm/arm/vgic/vgic-its.c 	if (!vcpu)
vcpu              676 virt/kvm/arm/vgic/vgic-its.c 	if (!vcpu->arch.vgic_cpu.lpis_enabled)
vcpu              866 virt/kvm/arm/vgic/vgic-its.c 	struct kvm_vcpu *vcpu;
vcpu              882 virt/kvm/arm/vgic/vgic-its.c 	vcpu = kvm_get_vcpu(kvm, collection->target_addr);
vcpu              886 virt/kvm/arm/vgic/vgic-its.c 	return update_affinity(ite->irq, vcpu);
vcpu             1046 virt/kvm/arm/vgic/vgic-its.c 	struct kvm_vcpu *vcpu = NULL;
vcpu             1087 virt/kvm/arm/vgic/vgic-its.c 		vcpu = kvm_get_vcpu(kvm, collection->target_addr);
vcpu             1089 virt/kvm/arm/vgic/vgic-its.c 	irq = vgic_add_lpi(kvm, lpi_nr, vcpu);
vcpu             1299 virt/kvm/arm/vgic/vgic-its.c 	struct kvm_vcpu *vcpu;
vcpu             1308 virt/kvm/arm/vgic/vgic-its.c 	vcpu = kvm_get_vcpu(kvm, collection->target_addr);
vcpu             1310 virt/kvm/arm/vgic/vgic-its.c 	irq_count = vgic_copy_lpi_list(kvm, vcpu, &intids);
vcpu             1318 virt/kvm/arm/vgic/vgic-its.c 		update_lpi_config(kvm, irq, vcpu, false);
vcpu             1324 virt/kvm/arm/vgic/vgic-its.c 	if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.its_vm)
vcpu             1325 virt/kvm/arm/vgic/vgic-its.c 		its_invall_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe);
vcpu             1689 virt/kvm/arm/vgic/vgic-its.c static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
vcpu             1790 virt/kvm/arm/vgic/vgic-its.c void vgic_enable_lpis(struct kvm_vcpu *vcpu)
vcpu             1792 virt/kvm/arm/vgic/vgic-its.c 	if (!(vcpu->arch.vgic_cpu.pendbaser & GICR_PENDBASER_PTZ))
vcpu             1793 virt/kvm/arm/vgic/vgic-its.c 		its_sync_lpi_pending_table(vcpu);
vcpu             2141 virt/kvm/arm/vgic/vgic-its.c 	struct kvm_vcpu *vcpu = NULL;
vcpu             2175 virt/kvm/arm/vgic/vgic-its.c 		vcpu = kvm_get_vcpu(kvm, collection->target_addr);
vcpu             2177 virt/kvm/arm/vgic/vgic-its.c 	irq = vgic_add_lpi(kvm, lpi_id, vcpu);
vcpu              292 virt/kvm/arm/vgic/vgic-kvm-device.c 	reg_attr->vcpu = kvm_get_vcpu(dev->kvm, cpuid);
vcpu              350 virt/kvm/arm/vgic/vgic-kvm-device.c 	struct kvm_vcpu *vcpu;
vcpu              357 virt/kvm/arm/vgic/vgic-kvm-device.c 	vcpu = reg_attr.vcpu;
vcpu              373 virt/kvm/arm/vgic/vgic-kvm-device.c 		ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, reg);
vcpu              376 virt/kvm/arm/vgic/vgic-kvm-device.c 		ret = vgic_v2_dist_uaccess(vcpu, is_write, addr, reg);
vcpu              487 virt/kvm/arm/vgic/vgic-kvm-device.c 		reg_attr->vcpu = kvm_mpidr_to_vcpu(dev->kvm, mpidr_reg);
vcpu              489 virt/kvm/arm/vgic/vgic-kvm-device.c 		reg_attr->vcpu = kvm_get_vcpu(dev->kvm, 0);
vcpu              492 virt/kvm/arm/vgic/vgic-kvm-device.c 	if (!reg_attr->vcpu)
vcpu              514 virt/kvm/arm/vgic/vgic-kvm-device.c 	struct kvm_vcpu *vcpu;
vcpu              522 virt/kvm/arm/vgic/vgic-kvm-device.c 	vcpu = reg_attr.vcpu;
vcpu              542 virt/kvm/arm/vgic/vgic-kvm-device.c 		ret = vgic_v3_dist_uaccess(vcpu, is_write, addr, &tmp32);
vcpu              550 virt/kvm/arm/vgic/vgic-kvm-device.c 		ret = vgic_v3_redist_uaccess(vcpu, is_write, addr, &tmp32);
vcpu              558 virt/kvm/arm/vgic/vgic-kvm-device.c 		ret = vgic_v3_cpu_sysregs_uaccess(vcpu, is_write,
vcpu              570 virt/kvm/arm/vgic/vgic-kvm-device.c 			ret = vgic_v3_line_level_info_uaccess(vcpu, is_write,
vcpu               25 virt/kvm/arm/vgic/vgic-mmio-v2.c static unsigned long vgic_mmio_read_v2_misc(struct kvm_vcpu *vcpu,
vcpu               28 virt/kvm/arm/vgic/vgic-mmio-v2.c 	struct vgic_dist *vgic = &vcpu->kvm->arch.vgic;
vcpu               38 virt/kvm/arm/vgic/vgic-mmio-v2.c 		value |= (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
vcpu               52 virt/kvm/arm/vgic/vgic-mmio-v2.c static void vgic_mmio_write_v2_misc(struct kvm_vcpu *vcpu,
vcpu               56 virt/kvm/arm/vgic/vgic-mmio-v2.c 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
vcpu               63 virt/kvm/arm/vgic/vgic-mmio-v2.c 			vgic_kick_vcpus(vcpu->kvm);
vcpu               72 virt/kvm/arm/vgic/vgic-mmio-v2.c static int vgic_mmio_uaccess_write_v2_misc(struct kvm_vcpu *vcpu,
vcpu               78 virt/kvm/arm/vgic/vgic-mmio-v2.c 		if (val != vgic_mmio_read_v2_misc(vcpu, addr, len))
vcpu               90 virt/kvm/arm/vgic/vgic-mmio-v2.c 		vcpu->kvm->arch.vgic.v2_groups_user_writable = true;
vcpu               94 virt/kvm/arm/vgic/vgic-mmio-v2.c 	vgic_mmio_write_v2_misc(vcpu, addr, len, val);
vcpu               98 virt/kvm/arm/vgic/vgic-mmio-v2.c static int vgic_mmio_uaccess_write_v2_group(struct kvm_vcpu *vcpu,
vcpu              102 virt/kvm/arm/vgic/vgic-mmio-v2.c 	if (vcpu->kvm->arch.vgic.v2_groups_user_writable)
vcpu              103 virt/kvm/arm/vgic/vgic-mmio-v2.c 		vgic_mmio_write_group(vcpu, addr, len, val);
vcpu              117 virt/kvm/arm/vgic/vgic-mmio-v2.c 	struct kvm_vcpu *vcpu;
vcpu              134 virt/kvm/arm/vgic/vgic-mmio-v2.c 	kvm_for_each_vcpu(c, vcpu, source_vcpu->kvm) {
vcpu              140 virt/kvm/arm/vgic/vgic-mmio-v2.c 		irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid);
vcpu              151 virt/kvm/arm/vgic/vgic-mmio-v2.c static unsigned long vgic_mmio_read_target(struct kvm_vcpu *vcpu,
vcpu              159 virt/kvm/arm/vgic/vgic-mmio-v2.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
vcpu              163 virt/kvm/arm/vgic/vgic-mmio-v2.c 		vgic_put_irq(vcpu->kvm, irq);
vcpu              169 virt/kvm/arm/vgic/vgic-mmio-v2.c static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
vcpu              174 virt/kvm/arm/vgic/vgic-mmio-v2.c 	u8 cpu_mask = GENMASK(atomic_read(&vcpu->kvm->online_vcpus) - 1, 0);
vcpu              183 virt/kvm/arm/vgic/vgic-mmio-v2.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i);
vcpu              190 virt/kvm/arm/vgic/vgic-mmio-v2.c 		irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);
vcpu              193 virt/kvm/arm/vgic/vgic-mmio-v2.c 		vgic_put_irq(vcpu->kvm, irq);
vcpu              197 virt/kvm/arm/vgic/vgic-mmio-v2.c static unsigned long vgic_mmio_read_sgipend(struct kvm_vcpu *vcpu,
vcpu              205 virt/kvm/arm/vgic/vgic-mmio-v2.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
vcpu              209 virt/kvm/arm/vgic/vgic-mmio-v2.c 		vgic_put_irq(vcpu->kvm, irq);
vcpu              214 virt/kvm/arm/vgic/vgic-mmio-v2.c static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu,
vcpu              223 virt/kvm/arm/vgic/vgic-mmio-v2.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
vcpu              232 virt/kvm/arm/vgic/vgic-mmio-v2.c 		vgic_put_irq(vcpu->kvm, irq);
vcpu              236 virt/kvm/arm/vgic/vgic-mmio-v2.c static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
vcpu              245 virt/kvm/arm/vgic/vgic-mmio-v2.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
vcpu              253 virt/kvm/arm/vgic/vgic-mmio-v2.c 			vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
vcpu              257 virt/kvm/arm/vgic/vgic-mmio-v2.c 		vgic_put_irq(vcpu->kvm, irq);
vcpu              264 virt/kvm/arm/vgic/vgic-mmio-v2.c static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu,
vcpu              270 virt/kvm/arm/vgic/vgic-mmio-v2.c 	vgic_get_vmcr(vcpu, &vmcr);
vcpu              311 virt/kvm/arm/vgic/vgic-mmio-v2.c static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
vcpu              317 virt/kvm/arm/vgic/vgic-mmio-v2.c 	vgic_get_vmcr(vcpu, &vmcr);
vcpu              348 virt/kvm/arm/vgic/vgic-mmio-v2.c 	vgic_set_vmcr(vcpu, &vmcr);
vcpu              351 virt/kvm/arm/vgic/vgic-mmio-v2.c static unsigned long vgic_mmio_read_apr(struct kvm_vcpu *vcpu,
vcpu              362 virt/kvm/arm/vgic/vgic-mmio-v2.c 		return vcpu->arch.vgic_cpu.vgic_v2.vgic_apr;
vcpu              364 virt/kvm/arm/vgic/vgic-mmio-v2.c 		struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
vcpu              366 virt/kvm/arm/vgic/vgic-mmio-v2.c 		if (n > vgic_v3_max_apr_idx(vcpu))
vcpu              376 virt/kvm/arm/vgic/vgic-mmio-v2.c static void vgic_mmio_write_apr(struct kvm_vcpu *vcpu,
vcpu              388 virt/kvm/arm/vgic/vgic-mmio-v2.c 		vcpu->arch.vgic_cpu.vgic_v2.vgic_apr = val;
vcpu              390 virt/kvm/arm/vgic/vgic-mmio-v2.c 		struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
vcpu              392 virt/kvm/arm/vgic/vgic-mmio-v2.c 		if (n > vgic_v3_max_apr_idx(vcpu))
vcpu              487 virt/kvm/arm/vgic/vgic-mmio-v2.c 	struct kvm_vcpu *vcpu;
vcpu              495 virt/kvm/arm/vgic/vgic-mmio-v2.c 	vcpu = reg_attr.vcpu;
vcpu              517 virt/kvm/arm/vgic/vgic-mmio-v2.c 	region = vgic_get_mmio_region(vcpu, &iodev, addr, sizeof(u32));
vcpu              524 virt/kvm/arm/vgic/vgic-mmio-v2.c int vgic_v2_cpuif_uaccess(struct kvm_vcpu *vcpu, bool is_write,
vcpu              533 virt/kvm/arm/vgic/vgic-mmio-v2.c 	return vgic_uaccess(vcpu, &dev, is_write, offset, val);
vcpu              536 virt/kvm/arm/vgic/vgic-mmio-v2.c int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
vcpu              545 virt/kvm/arm/vgic/vgic-mmio-v2.c 	return vgic_uaccess(vcpu, &dev, is_write, offset, val);
vcpu               61 virt/kvm/arm/vgic/vgic-mmio-v3.c static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu,
vcpu               64 virt/kvm/arm/vgic/vgic-mmio-v3.c 	struct vgic_dist *vgic = &vcpu->kvm->arch.vgic;
vcpu               76 virt/kvm/arm/vgic/vgic-mmio-v3.c 		if (vgic_has_its(vcpu->kvm)) {
vcpu               95 virt/kvm/arm/vgic/vgic-mmio-v3.c static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
vcpu               99 virt/kvm/arm/vgic/vgic-mmio-v3.c 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
vcpu              107 virt/kvm/arm/vgic/vgic-mmio-v3.c 			vgic_kick_vcpus(vcpu->kvm);
vcpu              115 virt/kvm/arm/vgic/vgic-mmio-v3.c static int vgic_mmio_uaccess_write_v3_misc(struct kvm_vcpu *vcpu,
vcpu              121 virt/kvm/arm/vgic/vgic-mmio-v3.c 		if (val != vgic_mmio_read_v3_misc(vcpu, addr, len))
vcpu              125 virt/kvm/arm/vgic/vgic-mmio-v3.c 	vgic_mmio_write_v3_misc(vcpu, addr, len, val);
vcpu              129 virt/kvm/arm/vgic/vgic-mmio-v3.c static unsigned long vgic_mmio_read_irouter(struct kvm_vcpu *vcpu,
vcpu              133 virt/kvm/arm/vgic/vgic-mmio-v3.c 	struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid);
vcpu              143 virt/kvm/arm/vgic/vgic-mmio-v3.c 	vgic_put_irq(vcpu->kvm, irq);
vcpu              147 virt/kvm/arm/vgic/vgic-mmio-v3.c static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu,
vcpu              159 virt/kvm/arm/vgic/vgic-mmio-v3.c 	irq = vgic_get_irq(vcpu->kvm, NULL, intid);
vcpu              168 virt/kvm/arm/vgic/vgic-mmio-v3.c 	irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr);
vcpu              171 virt/kvm/arm/vgic/vgic-mmio-v3.c 	vgic_put_irq(vcpu->kvm, irq);
vcpu              174 virt/kvm/arm/vgic/vgic-mmio-v3.c static unsigned long vgic_mmio_read_v3r_ctlr(struct kvm_vcpu *vcpu,
vcpu              177 virt/kvm/arm/vgic/vgic-mmio-v3.c 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
vcpu              183 virt/kvm/arm/vgic/vgic-mmio-v3.c static void vgic_mmio_write_v3r_ctlr(struct kvm_vcpu *vcpu,
vcpu              187 virt/kvm/arm/vgic/vgic-mmio-v3.c 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
vcpu              190 virt/kvm/arm/vgic/vgic-mmio-v3.c 	if (!vgic_has_its(vcpu->kvm))
vcpu              196 virt/kvm/arm/vgic/vgic-mmio-v3.c 		vgic_flush_pending_lpis(vcpu);
vcpu              197 virt/kvm/arm/vgic/vgic-mmio-v3.c 		vgic_its_invalidate_cache(vcpu->kvm);
vcpu              201 virt/kvm/arm/vgic/vgic-mmio-v3.c 		vgic_enable_lpis(vcpu);
vcpu              204 virt/kvm/arm/vgic/vgic-mmio-v3.c static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu,
vcpu              207 virt/kvm/arm/vgic/vgic-mmio-v3.c 	unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
vcpu              208 virt/kvm/arm/vgic/vgic-mmio-v3.c 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
vcpu              210 virt/kvm/arm/vgic/vgic-mmio-v3.c 	int target_vcpu_id = vcpu->vcpu_id;
vcpu              220 virt/kvm/arm/vgic/vgic-mmio-v3.c 	if (vgic_has_its(vcpu->kvm))
vcpu              226 virt/kvm/arm/vgic/vgic-mmio-v3.c static unsigned long vgic_mmio_read_v3r_iidr(struct kvm_vcpu *vcpu,
vcpu              232 virt/kvm/arm/vgic/vgic-mmio-v3.c static unsigned long vgic_mmio_read_v3_idregs(struct kvm_vcpu *vcpu,
vcpu              244 virt/kvm/arm/vgic/vgic-mmio-v3.c static unsigned long vgic_v3_uaccess_read_pending(struct kvm_vcpu *vcpu,
vcpu              259 virt/kvm/arm/vgic/vgic-mmio-v3.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
vcpu              264 virt/kvm/arm/vgic/vgic-mmio-v3.c 		vgic_put_irq(vcpu->kvm, irq);
vcpu              270 virt/kvm/arm/vgic/vgic-mmio-v3.c static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
vcpu              279 virt/kvm/arm/vgic/vgic-mmio-v3.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
vcpu              289 virt/kvm/arm/vgic/vgic-mmio-v3.c 			vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
vcpu              295 virt/kvm/arm/vgic/vgic-mmio-v3.c 		vgic_put_irq(vcpu->kvm, irq);
vcpu              384 virt/kvm/arm/vgic/vgic-mmio-v3.c static unsigned long vgic_mmio_read_propbase(struct kvm_vcpu *vcpu,
vcpu              387 virt/kvm/arm/vgic/vgic-mmio-v3.c 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
vcpu              392 virt/kvm/arm/vgic/vgic-mmio-v3.c static void vgic_mmio_write_propbase(struct kvm_vcpu *vcpu,
vcpu              396 virt/kvm/arm/vgic/vgic-mmio-v3.c 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
vcpu              397 virt/kvm/arm/vgic/vgic-mmio-v3.c 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
vcpu              413 virt/kvm/arm/vgic/vgic-mmio-v3.c static unsigned long vgic_mmio_read_pendbase(struct kvm_vcpu *vcpu,
vcpu              416 virt/kvm/arm/vgic/vgic-mmio-v3.c 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
vcpu              421 virt/kvm/arm/vgic/vgic-mmio-v3.c static void vgic_mmio_write_pendbase(struct kvm_vcpu *vcpu,
vcpu              425 virt/kvm/arm/vgic/vgic-mmio-v3.c 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
vcpu              605 virt/kvm/arm/vgic/vgic-mmio-v3.c int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
vcpu              607 virt/kvm/arm/vgic/vgic-mmio-v3.c 	struct kvm *kvm = vcpu->kvm;
vcpu              609 virt/kvm/arm/vgic/vgic-mmio-v3.c 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
vcpu              610 virt/kvm/arm/vgic/vgic-mmio-v3.c 	struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
vcpu              640 virt/kvm/arm/vgic/vgic-mmio-v3.c 	rd_dev->redist_vcpu = vcpu;
vcpu              654 virt/kvm/arm/vgic/vgic-mmio-v3.c static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu)
vcpu              656 virt/kvm/arm/vgic/vgic-mmio-v3.c 	struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
vcpu              658 virt/kvm/arm/vgic/vgic-mmio-v3.c 	kvm_io_bus_unregister_dev(vcpu->kvm, KVM_MMIO_BUS, &rd_dev->dev);
vcpu              663 virt/kvm/arm/vgic/vgic-mmio-v3.c 	struct kvm_vcpu *vcpu;
vcpu              666 virt/kvm/arm/vgic/vgic-mmio-v3.c 	kvm_for_each_vcpu(c, vcpu, kvm) {
vcpu              667 virt/kvm/arm/vgic/vgic-mmio-v3.c 		ret = vgic_register_redist_iodev(vcpu);
vcpu              676 virt/kvm/arm/vgic/vgic-mmio-v3.c 			vcpu = kvm_get_vcpu(kvm, c);
vcpu              677 virt/kvm/arm/vgic/vgic-mmio-v3.c 			vgic_unregister_redist_iodev(vcpu);
vcpu              790 virt/kvm/arm/vgic/vgic-mmio-v3.c 	struct kvm_vcpu *vcpu;
vcpu              798 virt/kvm/arm/vgic/vgic-mmio-v3.c 	vcpu = reg_attr.vcpu;
vcpu              817 virt/kvm/arm/vgic/vgic-mmio-v3.c 		return vgic_v3_has_cpu_sysregs_attr(vcpu, 0, id, &reg);
vcpu              827 virt/kvm/arm/vgic/vgic-mmio-v3.c 	region = vgic_get_mmio_region(vcpu, &iodev, addr, sizeof(u32));
vcpu              839 virt/kvm/arm/vgic/vgic-mmio-v3.c static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu)
vcpu              848 virt/kvm/arm/vgic/vgic-mmio-v3.c 	affinity = kvm_vcpu_get_mpidr_aff(vcpu);
vcpu              886 virt/kvm/arm/vgic/vgic-mmio-v3.c void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
vcpu              888 virt/kvm/arm/vgic/vgic-mmio-v3.c 	struct kvm *kvm = vcpu->kvm;
vcpu              893 virt/kvm/arm/vgic/vgic-mmio-v3.c 	int vcpu_id = vcpu->vcpu_id;
vcpu              932 virt/kvm/arm/vgic/vgic-mmio-v3.c 		irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi);
vcpu              943 virt/kvm/arm/vgic/vgic-mmio-v3.c 			vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
vcpu              948 virt/kvm/arm/vgic/vgic-mmio-v3.c 		vgic_put_irq(vcpu->kvm, irq);
vcpu              952 virt/kvm/arm/vgic/vgic-mmio-v3.c int vgic_v3_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
vcpu              960 virt/kvm/arm/vgic/vgic-mmio-v3.c 	return vgic_uaccess(vcpu, &dev, is_write, offset, val);
vcpu              963 virt/kvm/arm/vgic/vgic-mmio-v3.c int vgic_v3_redist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
vcpu              971 virt/kvm/arm/vgic/vgic-mmio-v3.c 	return vgic_uaccess(vcpu, &rd_dev, is_write, offset, val);
vcpu              974 virt/kvm/arm/vgic/vgic-mmio-v3.c int vgic_v3_line_level_info_uaccess(struct kvm_vcpu *vcpu, bool is_write,
vcpu              981 virt/kvm/arm/vgic/vgic-mmio-v3.c 		vgic_write_irq_line_level_info(vcpu, intid, *val);
vcpu              983 virt/kvm/arm/vgic/vgic-mmio-v3.c 		*val = vgic_read_irq_line_level_info(vcpu, intid);
vcpu               17 virt/kvm/arm/vgic/vgic-mmio.c unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
vcpu               23 virt/kvm/arm/vgic/vgic-mmio.c unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
vcpu               29 virt/kvm/arm/vgic/vgic-mmio.c void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
vcpu               35 virt/kvm/arm/vgic/vgic-mmio.c int vgic_mmio_uaccess_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
vcpu               42 virt/kvm/arm/vgic/vgic-mmio.c unsigned long vgic_mmio_read_group(struct kvm_vcpu *vcpu,
vcpu               51 virt/kvm/arm/vgic/vgic-mmio.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
vcpu               56 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_put_irq(vcpu->kvm, irq);
vcpu               62 virt/kvm/arm/vgic/vgic-mmio.c void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
vcpu               70 virt/kvm/arm/vgic/vgic-mmio.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
vcpu               74 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
vcpu               76 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_put_irq(vcpu->kvm, irq);
vcpu               84 virt/kvm/arm/vgic/vgic-mmio.c unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
vcpu               93 virt/kvm/arm/vgic/vgic-mmio.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
vcpu               98 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_put_irq(vcpu->kvm, irq);
vcpu              104 virt/kvm/arm/vgic/vgic-mmio.c void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
vcpu              113 virt/kvm/arm/vgic/vgic-mmio.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
vcpu              133 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
vcpu              135 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_put_irq(vcpu->kvm, irq);
vcpu              139 virt/kvm/arm/vgic/vgic-mmio.c void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
vcpu              148 virt/kvm/arm/vgic/vgic-mmio.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
vcpu              155 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_put_irq(vcpu->kvm, irq);
vcpu              159 virt/kvm/arm/vgic/vgic-mmio.c unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
vcpu              168 virt/kvm/arm/vgic/vgic-mmio.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
vcpu              176 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_put_irq(vcpu->kvm, irq);
vcpu              195 virt/kvm/arm/vgic/vgic-mmio.c 	struct kvm_vcpu *vcpu;
vcpu              198 virt/kvm/arm/vgic/vgic-mmio.c 	vcpu = kvm_arm_get_running_vcpu();
vcpu              200 virt/kvm/arm/vgic/vgic-mmio.c 	return vcpu;
vcpu              204 virt/kvm/arm/vgic/vgic-mmio.c static void vgic_hw_irq_spending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
vcpu              214 virt/kvm/arm/vgic/vgic-mmio.c static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
vcpu              217 virt/kvm/arm/vgic/vgic-mmio.c 		vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2);
vcpu              220 virt/kvm/arm/vgic/vgic-mmio.c void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
vcpu              230 virt/kvm/arm/vgic/vgic-mmio.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
vcpu              233 virt/kvm/arm/vgic/vgic-mmio.c 		if (is_vgic_v2_sgi(vcpu, irq)) {
vcpu              234 virt/kvm/arm/vgic/vgic-mmio.c 			vgic_put_irq(vcpu->kvm, irq);
vcpu              240 virt/kvm/arm/vgic/vgic-mmio.c 			vgic_hw_irq_spending(vcpu, irq, is_uaccess);
vcpu              243 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
vcpu              244 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_put_irq(vcpu->kvm, irq);
vcpu              249 virt/kvm/arm/vgic/vgic-mmio.c static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
vcpu              273 virt/kvm/arm/vgic/vgic-mmio.c void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
vcpu              283 virt/kvm/arm/vgic/vgic-mmio.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
vcpu              286 virt/kvm/arm/vgic/vgic-mmio.c 		if (is_vgic_v2_sgi(vcpu, irq)) {
vcpu              287 virt/kvm/arm/vgic/vgic-mmio.c 			vgic_put_irq(vcpu->kvm, irq);
vcpu              294 virt/kvm/arm/vgic/vgic-mmio.c 			vgic_hw_irq_cpending(vcpu, irq, is_uaccess);
vcpu              299 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_put_irq(vcpu->kvm, irq);
vcpu              319 virt/kvm/arm/vgic/vgic-mmio.c static void vgic_access_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
vcpu              321 virt/kvm/arm/vgic/vgic-mmio.c 	if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
vcpu              323 virt/kvm/arm/vgic/vgic-mmio.c 		kvm_arm_halt_guest(vcpu->kvm);
vcpu              327 virt/kvm/arm/vgic/vgic-mmio.c static void vgic_access_active_finish(struct kvm_vcpu *vcpu, u32 intid)
vcpu              329 virt/kvm/arm/vgic/vgic-mmio.c 	if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
vcpu              331 virt/kvm/arm/vgic/vgic-mmio.c 		kvm_arm_resume_guest(vcpu->kvm);
vcpu              334 virt/kvm/arm/vgic/vgic-mmio.c static unsigned long __vgic_mmio_read_active(struct kvm_vcpu *vcpu,
vcpu              343 virt/kvm/arm/vgic/vgic-mmio.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
vcpu              352 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_put_irq(vcpu->kvm, irq);
vcpu              358 virt/kvm/arm/vgic/vgic-mmio.c unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
vcpu              364 virt/kvm/arm/vgic/vgic-mmio.c 	mutex_lock(&vcpu->kvm->lock);
vcpu              365 virt/kvm/arm/vgic/vgic-mmio.c 	vgic_access_active_prepare(vcpu, intid);
vcpu              367 virt/kvm/arm/vgic/vgic-mmio.c 	val = __vgic_mmio_read_active(vcpu, addr, len);
vcpu              369 virt/kvm/arm/vgic/vgic-mmio.c 	vgic_access_active_finish(vcpu, intid);
vcpu              370 virt/kvm/arm/vgic/vgic-mmio.c 	mutex_unlock(&vcpu->kvm->lock);
vcpu              375 virt/kvm/arm/vgic/vgic-mmio.c unsigned long vgic_uaccess_read_active(struct kvm_vcpu *vcpu,
vcpu              378 virt/kvm/arm/vgic/vgic-mmio.c 	return __vgic_mmio_read_active(vcpu, addr, len);
vcpu              382 virt/kvm/arm/vgic/vgic-mmio.c static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
vcpu              392 virt/kvm/arm/vgic/vgic-mmio.c static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
vcpu              401 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
vcpu              403 virt/kvm/arm/vgic/vgic-mmio.c 		u32 model = vcpu->kvm->arch.vgic.vgic_model;
vcpu              427 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
vcpu              432 virt/kvm/arm/vgic/vgic-mmio.c static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
vcpu              440 virt/kvm/arm/vgic/vgic-mmio.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
vcpu              441 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_mmio_change_active(vcpu, irq, false);
vcpu              442 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_put_irq(vcpu->kvm, irq);
vcpu              446 virt/kvm/arm/vgic/vgic-mmio.c void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
vcpu              452 virt/kvm/arm/vgic/vgic-mmio.c 	mutex_lock(&vcpu->kvm->lock);
vcpu              453 virt/kvm/arm/vgic/vgic-mmio.c 	vgic_access_active_prepare(vcpu, intid);
vcpu              455 virt/kvm/arm/vgic/vgic-mmio.c 	__vgic_mmio_write_cactive(vcpu, addr, len, val);
vcpu              457 virt/kvm/arm/vgic/vgic-mmio.c 	vgic_access_active_finish(vcpu, intid);
vcpu              458 virt/kvm/arm/vgic/vgic-mmio.c 	mutex_unlock(&vcpu->kvm->lock);
vcpu              461 virt/kvm/arm/vgic/vgic-mmio.c int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
vcpu              465 virt/kvm/arm/vgic/vgic-mmio.c 	__vgic_mmio_write_cactive(vcpu, addr, len, val);
vcpu              469 virt/kvm/arm/vgic/vgic-mmio.c static void __vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
vcpu              477 virt/kvm/arm/vgic/vgic-mmio.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
vcpu              478 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_mmio_change_active(vcpu, irq, true);
vcpu              479 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_put_irq(vcpu->kvm, irq);
vcpu              483 virt/kvm/arm/vgic/vgic-mmio.c void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
vcpu              489 virt/kvm/arm/vgic/vgic-mmio.c 	mutex_lock(&vcpu->kvm->lock);
vcpu              490 virt/kvm/arm/vgic/vgic-mmio.c 	vgic_access_active_prepare(vcpu, intid);
vcpu              492 virt/kvm/arm/vgic/vgic-mmio.c 	__vgic_mmio_write_sactive(vcpu, addr, len, val);
vcpu              494 virt/kvm/arm/vgic/vgic-mmio.c 	vgic_access_active_finish(vcpu, intid);
vcpu              495 virt/kvm/arm/vgic/vgic-mmio.c 	mutex_unlock(&vcpu->kvm->lock);
vcpu              498 virt/kvm/arm/vgic/vgic-mmio.c int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
vcpu              502 virt/kvm/arm/vgic/vgic-mmio.c 	__vgic_mmio_write_sactive(vcpu, addr, len, val);
vcpu              506 virt/kvm/arm/vgic/vgic-mmio.c unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
vcpu              514 virt/kvm/arm/vgic/vgic-mmio.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
vcpu              518 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_put_irq(vcpu->kvm, irq);
vcpu              531 virt/kvm/arm/vgic/vgic-mmio.c void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
vcpu              540 virt/kvm/arm/vgic/vgic-mmio.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
vcpu              547 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_put_irq(vcpu->kvm, irq);
vcpu              551 virt/kvm/arm/vgic/vgic-mmio.c unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
vcpu              559 virt/kvm/arm/vgic/vgic-mmio.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
vcpu              564 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_put_irq(vcpu->kvm, irq);
vcpu              570 virt/kvm/arm/vgic/vgic-mmio.c void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
vcpu              590 virt/kvm/arm/vgic/vgic-mmio.c 		irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
vcpu              599 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_put_irq(vcpu->kvm, irq);
vcpu              603 virt/kvm/arm/vgic/vgic-mmio.c u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid)
vcpu              607 virt/kvm/arm/vgic/vgic-mmio.c 	int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
vcpu              615 virt/kvm/arm/vgic/vgic-mmio.c 		irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
vcpu              619 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_put_irq(vcpu->kvm, irq);
vcpu              625 virt/kvm/arm/vgic/vgic-mmio.c void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
vcpu              629 virt/kvm/arm/vgic/vgic-mmio.c 	int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
vcpu              639 virt/kvm/arm/vgic/vgic-mmio.c 		irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
vcpu              650 virt/kvm/arm/vgic/vgic-mmio.c 			vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
vcpu              654 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_put_irq(vcpu->kvm, irq);
vcpu              680 virt/kvm/arm/vgic/vgic-mmio.c void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
vcpu              683 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_v2_set_vmcr(vcpu, vmcr);
vcpu              685 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_v3_set_vmcr(vcpu, vmcr);
vcpu              688 virt/kvm/arm/vgic/vgic-mmio.c void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
vcpu              691 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_v2_get_vmcr(vcpu, vmcr);
vcpu              693 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_v3_get_vmcr(vcpu, vmcr);
vcpu              787 virt/kvm/arm/vgic/vgic-mmio.c vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
vcpu              794 virt/kvm/arm/vgic/vgic-mmio.c 	if (!region || !check_region(vcpu->kvm, region, addr, len))
vcpu              800 virt/kvm/arm/vgic/vgic-mmio.c static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
vcpu              807 virt/kvm/arm/vgic/vgic-mmio.c 	region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
vcpu              813 virt/kvm/arm/vgic/vgic-mmio.c 	r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
vcpu              822 virt/kvm/arm/vgic/vgic-mmio.c static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
vcpu              829 virt/kvm/arm/vgic/vgic-mmio.c 	region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
vcpu              833 virt/kvm/arm/vgic/vgic-mmio.c 	r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
vcpu              844 virt/kvm/arm/vgic/vgic-mmio.c int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
vcpu              848 virt/kvm/arm/vgic/vgic-mmio.c 		return vgic_uaccess_write(vcpu, &dev->dev, offset, val);
vcpu              850 virt/kvm/arm/vgic/vgic-mmio.c 		return vgic_uaccess_read(vcpu, &dev->dev, offset, val);
vcpu              853 virt/kvm/arm/vgic/vgic-mmio.c static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
vcpu              860 virt/kvm/arm/vgic/vgic-mmio.c 	region = vgic_get_mmio_region(vcpu, iodev, addr, len);
vcpu              868 virt/kvm/arm/vgic/vgic-mmio.c 		data = region->read(vcpu, addr, len);
vcpu              871 virt/kvm/arm/vgic/vgic-mmio.c 		data = region->read(vcpu, addr, len);
vcpu              877 virt/kvm/arm/vgic/vgic-mmio.c 		data = region->its_read(vcpu->kvm, iodev->its, addr, len);
vcpu              885 virt/kvm/arm/vgic/vgic-mmio.c static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
vcpu              892 virt/kvm/arm/vgic/vgic-mmio.c 	region = vgic_get_mmio_region(vcpu, iodev, addr, len);
vcpu              898 virt/kvm/arm/vgic/vgic-mmio.c 		region->write(vcpu, addr, len, data);
vcpu              901 virt/kvm/arm/vgic/vgic-mmio.c 		region->write(vcpu, addr, len, data);
vcpu              907 virt/kvm/arm/vgic/vgic-mmio.c 		region->its_write(vcpu->kvm, iodev->its, addr, len, data);
vcpu               14 virt/kvm/arm/vgic/vgic-mmio.h 		unsigned long (*read)(struct kvm_vcpu *vcpu, gpa_t addr,
vcpu               20 virt/kvm/arm/vgic/vgic-mmio.h 		void (*write)(struct kvm_vcpu *vcpu, gpa_t addr,
vcpu               26 virt/kvm/arm/vgic/vgic-mmio.h 	unsigned long (*uaccess_read)(struct kvm_vcpu *vcpu, gpa_t addr,
vcpu               29 virt/kvm/arm/vgic/vgic-mmio.h 		int (*uaccess_write)(struct kvm_vcpu *vcpu, gpa_t addr,
vcpu              101 virt/kvm/arm/vgic/vgic-mmio.h int kvm_vgic_register_mmio_region(struct kvm *kvm, struct kvm_vcpu *vcpu,
vcpu              117 virt/kvm/arm/vgic/vgic-mmio.h unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
vcpu              120 virt/kvm/arm/vgic/vgic-mmio.h unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
vcpu              123 virt/kvm/arm/vgic/vgic-mmio.h void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
vcpu              126 virt/kvm/arm/vgic/vgic-mmio.h int vgic_mmio_uaccess_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
vcpu              129 virt/kvm/arm/vgic/vgic-mmio.h unsigned long vgic_mmio_read_group(struct kvm_vcpu *vcpu, gpa_t addr,
vcpu              132 virt/kvm/arm/vgic/vgic-mmio.h void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
vcpu              135 virt/kvm/arm/vgic/vgic-mmio.h unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
vcpu              138 virt/kvm/arm/vgic/vgic-mmio.h void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
vcpu              142 virt/kvm/arm/vgic/vgic-mmio.h void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
vcpu              146 virt/kvm/arm/vgic/vgic-mmio.h unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
vcpu              149 virt/kvm/arm/vgic/vgic-mmio.h void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
vcpu              153 virt/kvm/arm/vgic/vgic-mmio.h void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
vcpu              157 virt/kvm/arm/vgic/vgic-mmio.h unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
vcpu              160 virt/kvm/arm/vgic/vgic-mmio.h unsigned long vgic_uaccess_read_active(struct kvm_vcpu *vcpu,
vcpu              163 virt/kvm/arm/vgic/vgic-mmio.h void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
vcpu              167 virt/kvm/arm/vgic/vgic-mmio.h void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
vcpu              171 virt/kvm/arm/vgic/vgic-mmio.h int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
vcpu              175 virt/kvm/arm/vgic/vgic-mmio.h int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
vcpu              179 virt/kvm/arm/vgic/vgic-mmio.h unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
vcpu              182 virt/kvm/arm/vgic/vgic-mmio.h void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
vcpu              186 virt/kvm/arm/vgic/vgic-mmio.h unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
vcpu              189 virt/kvm/arm/vgic/vgic-mmio.h void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
vcpu              193 virt/kvm/arm/vgic/vgic-mmio.h int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
vcpu              196 virt/kvm/arm/vgic/vgic-mmio.h u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid);
vcpu              198 virt/kvm/arm/vgic/vgic-mmio.h void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
vcpu               29 virt/kvm/arm/vgic/vgic-v2.c void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
vcpu               31 virt/kvm/arm/vgic/vgic-v2.c 	struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
vcpu               49 virt/kvm/arm/vgic/vgic-v2.c void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
vcpu               51 virt/kvm/arm/vgic/vgic-v2.c 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
vcpu               70 virt/kvm/arm/vgic/vgic-v2.c 		if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
vcpu               71 virt/kvm/arm/vgic/vgic-v2.c 			kvm_notify_acked_irq(vcpu->kvm, 0,
vcpu               74 virt/kvm/arm/vgic/vgic-v2.c 		irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
vcpu              120 virt/kvm/arm/vgic/vgic-v2.c 		vgic_put_irq(vcpu->kvm, irq);
vcpu              137 virt/kvm/arm/vgic/vgic-v2.c void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
vcpu              212 virt/kvm/arm/vgic/vgic-v2.c 	vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = val;
vcpu              215 virt/kvm/arm/vgic/vgic-v2.c void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr)
vcpu              217 virt/kvm/arm/vgic/vgic-v2.c 	vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = 0;
vcpu              220 virt/kvm/arm/vgic/vgic-v2.c void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
vcpu              222 virt/kvm/arm/vgic/vgic-v2.c 	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
vcpu              247 virt/kvm/arm/vgic/vgic-v2.c void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
vcpu              249 virt/kvm/arm/vgic/vgic-v2.c 	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
vcpu              275 virt/kvm/arm/vgic/vgic-v2.c void vgic_v2_enable(struct kvm_vcpu *vcpu)
vcpu              282 virt/kvm/arm/vgic/vgic-v2.c 	vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0;
vcpu              285 virt/kvm/arm/vgic/vgic-v2.c 	vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN;
vcpu              376 virt/kvm/arm/vgic/vgic-v2.c 	if (!PAGE_ALIGNED(info->vcpu.start) ||
vcpu              377 virt/kvm/arm/vgic/vgic-v2.c 	    !PAGE_ALIGNED(resource_size(&info->vcpu))) {
vcpu              380 virt/kvm/arm/vgic/vgic-v2.c 		ret = create_hyp_io_mappings(info->vcpu.start,
vcpu              381 virt/kvm/arm/vgic/vgic-v2.c 					     resource_size(&info->vcpu),
vcpu              411 virt/kvm/arm/vgic/vgic-v2.c 	kvm_vgic_global_state.vcpu_base = info->vcpu.start;
vcpu              427 virt/kvm/arm/vgic/vgic-v2.c static void save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
vcpu              429 virt/kvm/arm/vgic/vgic-v2.c 	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
vcpu              430 virt/kvm/arm/vgic/vgic-v2.c 	u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
vcpu              448 virt/kvm/arm/vgic/vgic-v2.c void vgic_v2_save_state(struct kvm_vcpu *vcpu)
vcpu              451 virt/kvm/arm/vgic/vgic-v2.c 	u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
vcpu              457 virt/kvm/arm/vgic/vgic-v2.c 		save_lrs(vcpu, base);
vcpu              462 virt/kvm/arm/vgic/vgic-v2.c void vgic_v2_restore_state(struct kvm_vcpu *vcpu)
vcpu              464 virt/kvm/arm/vgic/vgic-v2.c 	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
vcpu              466 virt/kvm/arm/vgic/vgic-v2.c 	u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
vcpu              481 virt/kvm/arm/vgic/vgic-v2.c void vgic_v2_load(struct kvm_vcpu *vcpu)
vcpu              483 virt/kvm/arm/vgic/vgic-v2.c 	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
vcpu              491 virt/kvm/arm/vgic/vgic-v2.c void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu)
vcpu              493 virt/kvm/arm/vgic/vgic-v2.c 	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
vcpu              498 virt/kvm/arm/vgic/vgic-v2.c void vgic_v2_put(struct kvm_vcpu *vcpu)
vcpu              500 virt/kvm/arm/vgic/vgic-v2.c 	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
vcpu              502 virt/kvm/arm/vgic/vgic-v2.c 	vgic_v2_vmcr_sync(vcpu);
vcpu               18 virt/kvm/arm/vgic/vgic-v3.c void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
vcpu               20 virt/kvm/arm/vgic/vgic-v3.c 	struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
vcpu               31 virt/kvm/arm/vgic/vgic-v3.c void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
vcpu               33 virt/kvm/arm/vgic/vgic-v3.c 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
vcpu               35 virt/kvm/arm/vgic/vgic-v3.c 	u32 model = vcpu->kvm->arch.vgic.vgic_model;
vcpu               59 virt/kvm/arm/vgic/vgic-v3.c 		if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
vcpu               60 virt/kvm/arm/vgic/vgic-v3.c 			kvm_notify_acked_irq(vcpu->kvm, 0,
vcpu               63 virt/kvm/arm/vgic/vgic-v3.c 		irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
vcpu              111 virt/kvm/arm/vgic/vgic-v3.c 		vgic_put_irq(vcpu->kvm, irq);
vcpu              118 virt/kvm/arm/vgic/vgic-v3.c void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
vcpu              120 virt/kvm/arm/vgic/vgic-v3.c 	u32 model = vcpu->kvm->arch.vgic.vgic_model;
vcpu              197 virt/kvm/arm/vgic/vgic-v3.c 	vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = val;
vcpu              200 virt/kvm/arm/vgic/vgic-v3.c void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr)
vcpu              202 virt/kvm/arm/vgic/vgic-v3.c 	vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = 0;
vcpu              205 virt/kvm/arm/vgic/vgic-v3.c void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
vcpu              207 virt/kvm/arm/vgic/vgic-v3.c 	struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
vcpu              208 virt/kvm/arm/vgic/vgic-v3.c 	u32 model = vcpu->kvm->arch.vgic.vgic_model;
vcpu              235 virt/kvm/arm/vgic/vgic-v3.c void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
vcpu              237 virt/kvm/arm/vgic/vgic-v3.c 	struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
vcpu              238 virt/kvm/arm/vgic/vgic-v3.c 	u32 model = vcpu->kvm->arch.vgic.vgic_model;
vcpu              271 virt/kvm/arm/vgic/vgic-v3.c void vgic_v3_enable(struct kvm_vcpu *vcpu)
vcpu              273 virt/kvm/arm/vgic/vgic-v3.c 	struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
vcpu              288 virt/kvm/arm/vgic/vgic-v3.c 	if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
vcpu              292 virt/kvm/arm/vgic/vgic-v3.c 		vcpu->arch.vgic_cpu.pendbaser = INITIAL_PENDBASER_VALUE;
vcpu              297 virt/kvm/arm/vgic/vgic-v3.c 	vcpu->arch.vgic_cpu.num_id_bits = (kvm_vgic_global_state.ich_vtr_el2 &
vcpu              300 virt/kvm/arm/vgic/vgic-v3.c 	vcpu->arch.vgic_cpu.num_pri_bits = ((kvm_vgic_global_state.ich_vtr_el2 &
vcpu              316 virt/kvm/arm/vgic/vgic-v3.c 	struct kvm_vcpu *vcpu;
vcpu              325 virt/kvm/arm/vgic/vgic-v3.c 	vcpu = irq->target_vcpu;
vcpu              326 virt/kvm/arm/vgic/vgic-v3.c 	if (!vcpu)
vcpu              329 virt/kvm/arm/vgic/vgic-v3.c 	pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
vcpu              342 virt/kvm/arm/vgic/vgic-v3.c 	if (irq->target_vcpu != vcpu) {
vcpu              347 virt/kvm/arm/vgic/vgic-v3.c 	vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
vcpu              373 virt/kvm/arm/vgic/vgic-v3.c 		struct kvm_vcpu *vcpu;
vcpu              377 virt/kvm/arm/vgic/vgic-v3.c 		vcpu = irq->target_vcpu;
vcpu              378 virt/kvm/arm/vgic/vgic-v3.c 		if (!vcpu)
vcpu              381 virt/kvm/arm/vgic/vgic-v3.c 		pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
vcpu              499 virt/kvm/arm/vgic/vgic-v3.c 	struct kvm_vcpu *vcpu;
vcpu              506 virt/kvm/arm/vgic/vgic-v3.c 	kvm_for_each_vcpu(c, vcpu, kvm) {
vcpu              507 virt/kvm/arm/vgic/vgic-v3.c 		struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
vcpu              602 virt/kvm/arm/vgic/vgic-v3.c 	if (!info->vcpu.start) {
vcpu              605 virt/kvm/arm/vgic/vgic-v3.c 	} else if (!PAGE_ALIGNED(info->vcpu.start)) {
vcpu              607 virt/kvm/arm/vgic/vgic-v3.c 			(unsigned long long)info->vcpu.start);
vcpu              610 virt/kvm/arm/vgic/vgic-v3.c 		kvm_vgic_global_state.vcpu_base = info->vcpu.start;
vcpu              617 virt/kvm/arm/vgic/vgic-v3.c 		kvm_info("vgic-v2@%llx\n", info->vcpu.start);
vcpu              651 virt/kvm/arm/vgic/vgic-v3.c void vgic_v3_load(struct kvm_vcpu *vcpu)
vcpu              653 virt/kvm/arm/vgic/vgic-v3.c 	struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
vcpu              663 virt/kvm/arm/vgic/vgic-v3.c 	kvm_call_hyp(__vgic_v3_restore_aprs, vcpu);
vcpu              666 virt/kvm/arm/vgic/vgic-v3.c 		__vgic_v3_activate_traps(vcpu);
vcpu              669 virt/kvm/arm/vgic/vgic-v3.c void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
vcpu              671 virt/kvm/arm/vgic/vgic-v3.c 	struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
vcpu              677 virt/kvm/arm/vgic/vgic-v3.c void vgic_v3_put(struct kvm_vcpu *vcpu)
vcpu              679 virt/kvm/arm/vgic/vgic-v3.c 	vgic_v3_vmcr_sync(vcpu);
vcpu              681 virt/kvm/arm/vgic/vgic-v3.c 	kvm_call_hyp(__vgic_v3_save_aprs, vcpu);
vcpu              684 virt/kvm/arm/vgic/vgic-v3.c 		__vgic_v3_deactivate_traps(vcpu);
vcpu               86 virt/kvm/arm/vgic/vgic-v4.c 	struct kvm_vcpu *vcpu = info;
vcpu               88 virt/kvm/arm/vgic/vgic-v4.c 	vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
vcpu               89 virt/kvm/arm/vgic/vgic-v4.c 	kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
vcpu               90 virt/kvm/arm/vgic/vgic-v4.c 	kvm_vcpu_kick(vcpu);
vcpu              107 virt/kvm/arm/vgic/vgic-v4.c 	struct kvm_vcpu *vcpu;
vcpu              125 virt/kvm/arm/vgic/vgic-v4.c 	kvm_for_each_vcpu(i, vcpu, kvm)
vcpu              126 virt/kvm/arm/vgic/vgic-v4.c 		dist->its_vm.vpes[i] = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
vcpu              137 virt/kvm/arm/vgic/vgic-v4.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu              149 virt/kvm/arm/vgic/vgic-v4.c 				  0, "vcpu", vcpu);
vcpu              182 virt/kvm/arm/vgic/vgic-v4.c 		struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, i);
vcpu              186 virt/kvm/arm/vgic/vgic-v4.c 		free_irq(irq, vcpu);
vcpu              195 virt/kvm/arm/vgic/vgic-v4.c int vgic_v4_sync_hwstate(struct kvm_vcpu *vcpu)
vcpu              197 virt/kvm/arm/vgic/vgic-v4.c 	if (!vgic_supports_direct_msis(vcpu->kvm))
vcpu              200 virt/kvm/arm/vgic/vgic-v4.c 	return its_schedule_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe, false);
vcpu              203 virt/kvm/arm/vgic/vgic-v4.c int vgic_v4_flush_hwstate(struct kvm_vcpu *vcpu)
vcpu              205 virt/kvm/arm/vgic/vgic-v4.c 	int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq;
vcpu              208 virt/kvm/arm/vgic/vgic-v4.c 	if (!vgic_supports_direct_msis(vcpu->kvm))
vcpu              221 virt/kvm/arm/vgic/vgic-v4.c 	err = its_schedule_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe, true);
vcpu              339 virt/kvm/arm/vgic/vgic-v4.c void kvm_vgic_v4_enable_doorbell(struct kvm_vcpu *vcpu)
vcpu              341 virt/kvm/arm/vgic/vgic-v4.c 	if (vgic_supports_direct_msis(vcpu->kvm)) {
vcpu              342 virt/kvm/arm/vgic/vgic-v4.c 		int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq;
vcpu              348 virt/kvm/arm/vgic/vgic-v4.c void kvm_vgic_v4_disable_doorbell(struct kvm_vcpu *vcpu)
vcpu              350 virt/kvm/arm/vgic/vgic-v4.c 	if (vgic_supports_direct_msis(vcpu->kvm)) {
vcpu              351 virt/kvm/arm/vgic/vgic-v4.c 		int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq;
vcpu               90 virt/kvm/arm/vgic/vgic.c struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
vcpu               96 virt/kvm/arm/vgic/vgic.c 		return &vcpu->arch.vgic_cpu.private_irqs[intid];
vcpu              151 virt/kvm/arm/vgic/vgic.c void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu)
vcpu              153 virt/kvm/arm/vgic/vgic.c 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
vcpu              163 virt/kvm/arm/vgic/vgic.c 			irq->vcpu = NULL;
vcpu              165 virt/kvm/arm/vgic/vgic.c 			vgic_put_irq(vcpu->kvm, irq);
vcpu              221 virt/kvm/arm/vgic/vgic.c 		return irq->vcpu ? : irq->target_vcpu;
vcpu              297 virt/kvm/arm/vgic/vgic.c static void vgic_sort_ap_list(struct kvm_vcpu *vcpu)
vcpu              299 virt/kvm/arm/vgic/vgic.c 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
vcpu              337 virt/kvm/arm/vgic/vgic.c 	struct kvm_vcpu *vcpu;
vcpu              342 virt/kvm/arm/vgic/vgic.c 	vcpu = vgic_target_oracle(irq);
vcpu              343 virt/kvm/arm/vgic/vgic.c 	if (irq->vcpu || !vcpu) {
vcpu              364 virt/kvm/arm/vgic/vgic.c 		if (vcpu) {
vcpu              365 virt/kvm/arm/vgic/vgic.c 			kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
vcpu              366 virt/kvm/arm/vgic/vgic.c 			kvm_vcpu_kick(vcpu);
vcpu              379 virt/kvm/arm/vgic/vgic.c 	raw_spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
vcpu              394 virt/kvm/arm/vgic/vgic.c 	if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
vcpu              396 virt/kvm/arm/vgic/vgic.c 		raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock,
vcpu              408 virt/kvm/arm/vgic/vgic.c 	list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
vcpu              409 virt/kvm/arm/vgic/vgic.c 	irq->vcpu = vcpu;
vcpu              412 virt/kvm/arm/vgic/vgic.c 	raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
vcpu              414 virt/kvm/arm/vgic/vgic.c 	kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
vcpu              415 virt/kvm/arm/vgic/vgic.c 	kvm_vcpu_kick(vcpu);
vcpu              440 virt/kvm/arm/vgic/vgic.c 	struct kvm_vcpu *vcpu;
vcpu              451 virt/kvm/arm/vgic/vgic.c 	vcpu = kvm_get_vcpu(kvm, cpuid);
vcpu              452 virt/kvm/arm/vgic/vgic.c 	if (!vcpu && intid < VGIC_NR_PRIVATE_IRQS)
vcpu              455 virt/kvm/arm/vgic/vgic.c 	irq = vgic_get_irq(kvm, vcpu, intid);
vcpu              480 virt/kvm/arm/vgic/vgic.c static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
vcpu              514 virt/kvm/arm/vgic/vgic.c int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
vcpu              517 virt/kvm/arm/vgic/vgic.c 	struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
vcpu              524 virt/kvm/arm/vgic/vgic.c 	ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level);
vcpu              526 virt/kvm/arm/vgic/vgic.c 	vgic_put_irq(vcpu->kvm, irq);
vcpu              540 virt/kvm/arm/vgic/vgic.c void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid)
vcpu              542 virt/kvm/arm/vgic/vgic.c 	struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
vcpu              554 virt/kvm/arm/vgic/vgic.c 	vgic_put_irq(vcpu->kvm, irq);
vcpu              557 virt/kvm/arm/vgic/vgic.c int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
vcpu              562 virt/kvm/arm/vgic/vgic.c 	if (!vgic_initialized(vcpu->kvm))
vcpu              565 virt/kvm/arm/vgic/vgic.c 	irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
vcpu              571 virt/kvm/arm/vgic/vgic.c 	vgic_put_irq(vcpu->kvm, irq);
vcpu              586 virt/kvm/arm/vgic/vgic.c int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
vcpu              592 virt/kvm/arm/vgic/vgic.c 	if (!vgic_initialized(vcpu->kvm))
vcpu              596 virt/kvm/arm/vgic/vgic.c 	if (!irq_is_ppi(intid) && !vgic_valid_spi(vcpu->kvm, intid))
vcpu              599 virt/kvm/arm/vgic/vgic.c 	irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
vcpu              618 virt/kvm/arm/vgic/vgic.c static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
vcpu              620 virt/kvm/arm/vgic/vgic.c 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
vcpu              634 virt/kvm/arm/vgic/vgic.c 		BUG_ON(vcpu != irq->vcpu);
vcpu              644 virt/kvm/arm/vgic/vgic.c 			irq->vcpu = NULL;
vcpu              654 virt/kvm/arm/vgic/vgic.c 			vgic_put_irq(vcpu->kvm, irq);
vcpu              658 virt/kvm/arm/vgic/vgic.c 		if (target_vcpu == vcpu) {
vcpu              673 virt/kvm/arm/vgic/vgic.c 		if (vcpu->vcpu_id < target_vcpu->vcpu_id) {
vcpu              674 virt/kvm/arm/vgic/vgic.c 			vcpuA = vcpu;
vcpu              678 virt/kvm/arm/vgic/vgic.c 			vcpuB = vcpu;
vcpu              699 virt/kvm/arm/vgic/vgic.c 			irq->vcpu = target_vcpu;
vcpu              719 virt/kvm/arm/vgic/vgic.c static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
vcpu              722 virt/kvm/arm/vgic/vgic.c 		vgic_v2_fold_lr_state(vcpu);
vcpu              724 virt/kvm/arm/vgic/vgic.c 		vgic_v3_fold_lr_state(vcpu);
vcpu              728 virt/kvm/arm/vgic/vgic.c static inline void vgic_populate_lr(struct kvm_vcpu *vcpu,
vcpu              734 virt/kvm/arm/vgic/vgic.c 		vgic_v2_populate_lr(vcpu, irq, lr);
vcpu              736 virt/kvm/arm/vgic/vgic.c 		vgic_v3_populate_lr(vcpu, irq, lr);
vcpu              739 virt/kvm/arm/vgic/vgic.c static inline void vgic_clear_lr(struct kvm_vcpu *vcpu, int lr)
vcpu              742 virt/kvm/arm/vgic/vgic.c 		vgic_v2_clear_lr(vcpu, lr);
vcpu              744 virt/kvm/arm/vgic/vgic.c 		vgic_v3_clear_lr(vcpu, lr);
vcpu              747 virt/kvm/arm/vgic/vgic.c static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
vcpu              750 virt/kvm/arm/vgic/vgic.c 		vgic_v2_set_underflow(vcpu);
vcpu              752 virt/kvm/arm/vgic/vgic.c 		vgic_v3_set_underflow(vcpu);
vcpu              756 virt/kvm/arm/vgic/vgic.c static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
vcpu              759 virt/kvm/arm/vgic/vgic.c 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
vcpu              782 virt/kvm/arm/vgic/vgic.c static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
vcpu              784 virt/kvm/arm/vgic/vgic.c 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
vcpu              792 virt/kvm/arm/vgic/vgic.c 	count = compute_ap_list_depth(vcpu, &multi_sgi);
vcpu              794 virt/kvm/arm/vgic/vgic.c 		vgic_sort_ap_list(vcpu);
vcpu              813 virt/kvm/arm/vgic/vgic.c 		if (likely(vgic_target_oracle(irq) == vcpu)) {
vcpu              814 virt/kvm/arm/vgic/vgic.c 			vgic_populate_lr(vcpu, irq, count++);
vcpu              825 virt/kvm/arm/vgic/vgic.c 				vgic_set_underflow(vcpu);
vcpu              830 virt/kvm/arm/vgic/vgic.c 	vcpu->arch.vgic_cpu.used_lrs = count;
vcpu              834 virt/kvm/arm/vgic/vgic.c 		vgic_clear_lr(vcpu, count);
vcpu              847 virt/kvm/arm/vgic/vgic.c static inline void vgic_save_state(struct kvm_vcpu *vcpu)
vcpu              850 virt/kvm/arm/vgic/vgic.c 		vgic_v2_save_state(vcpu);
vcpu              852 virt/kvm/arm/vgic/vgic.c 		__vgic_v3_save_state(vcpu);
vcpu              856 virt/kvm/arm/vgic/vgic.c void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
vcpu              858 virt/kvm/arm/vgic/vgic.c 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
vcpu              860 virt/kvm/arm/vgic/vgic.c 	WARN_ON(vgic_v4_sync_hwstate(vcpu));
vcpu              863 virt/kvm/arm/vgic/vgic.c 	if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
vcpu              867 virt/kvm/arm/vgic/vgic.c 		vgic_save_state(vcpu);
vcpu              870 virt/kvm/arm/vgic/vgic.c 		vgic_fold_lr_state(vcpu);
vcpu              871 virt/kvm/arm/vgic/vgic.c 	vgic_prune_ap_list(vcpu);
vcpu              874 virt/kvm/arm/vgic/vgic.c static inline void vgic_restore_state(struct kvm_vcpu *vcpu)
vcpu              877 virt/kvm/arm/vgic/vgic.c 		vgic_v2_restore_state(vcpu);
vcpu              879 virt/kvm/arm/vgic/vgic.c 		__vgic_v3_restore_state(vcpu);
vcpu              883 virt/kvm/arm/vgic/vgic.c void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
vcpu              885 virt/kvm/arm/vgic/vgic.c 	WARN_ON(vgic_v4_flush_hwstate(vcpu));
vcpu              899 virt/kvm/arm/vgic/vgic.c 	if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head) &&
vcpu              900 virt/kvm/arm/vgic/vgic.c 	    !vgic_supports_direct_msis(vcpu->kvm))
vcpu              905 virt/kvm/arm/vgic/vgic.c 	if (!list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) {
vcpu              906 virt/kvm/arm/vgic/vgic.c 		raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
vcpu              907 virt/kvm/arm/vgic/vgic.c 		vgic_flush_lr_state(vcpu);
vcpu              908 virt/kvm/arm/vgic/vgic.c 		raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
vcpu              912 virt/kvm/arm/vgic/vgic.c 		vgic_restore_state(vcpu);
vcpu              915 virt/kvm/arm/vgic/vgic.c void kvm_vgic_load(struct kvm_vcpu *vcpu)
vcpu              917 virt/kvm/arm/vgic/vgic.c 	if (unlikely(!vgic_initialized(vcpu->kvm)))
vcpu              921 virt/kvm/arm/vgic/vgic.c 		vgic_v2_load(vcpu);
vcpu              923 virt/kvm/arm/vgic/vgic.c 		vgic_v3_load(vcpu);
vcpu              926 virt/kvm/arm/vgic/vgic.c void kvm_vgic_put(struct kvm_vcpu *vcpu)
vcpu              928 virt/kvm/arm/vgic/vgic.c 	if (unlikely(!vgic_initialized(vcpu->kvm)))
vcpu              932 virt/kvm/arm/vgic/vgic.c 		vgic_v2_put(vcpu);
vcpu              934 virt/kvm/arm/vgic/vgic.c 		vgic_v3_put(vcpu);
vcpu              937 virt/kvm/arm/vgic/vgic.c void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu)
vcpu              939 virt/kvm/arm/vgic/vgic.c 	if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
vcpu              943 virt/kvm/arm/vgic/vgic.c 		vgic_v2_vmcr_sync(vcpu);
vcpu              945 virt/kvm/arm/vgic/vgic.c 		vgic_v3_vmcr_sync(vcpu);
vcpu              948 virt/kvm/arm/vgic/vgic.c int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
vcpu              950 virt/kvm/arm/vgic/vgic.c 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
vcpu              956 virt/kvm/arm/vgic/vgic.c 	if (!vcpu->kvm->arch.vgic.enabled)
vcpu              959 virt/kvm/arm/vgic/vgic.c 	if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last)
vcpu              962 virt/kvm/arm/vgic/vgic.c 	vgic_get_vmcr(vcpu, &vmcr);
vcpu              984 virt/kvm/arm/vgic/vgic.c 	struct kvm_vcpu *vcpu;
vcpu              991 virt/kvm/arm/vgic/vgic.c 	kvm_for_each_vcpu(c, vcpu, kvm) {
vcpu              992 virt/kvm/arm/vgic/vgic.c 		if (kvm_vgic_vcpu_pending_irq(vcpu)) {
vcpu              993 virt/kvm/arm/vgic/vgic.c 			kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
vcpu              994 virt/kvm/arm/vgic/vgic.c 			kvm_vcpu_kick(vcpu);
vcpu              999 virt/kvm/arm/vgic/vgic.c bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
vcpu             1005 virt/kvm/arm/vgic/vgic.c 	if (!vgic_initialized(vcpu->kvm))
vcpu             1008 virt/kvm/arm/vgic/vgic.c 	irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
vcpu             1012 virt/kvm/arm/vgic/vgic.c 	vgic_put_irq(vcpu->kvm, irq);
vcpu              151 virt/kvm/arm/vgic/vgic.h 	struct kvm_vcpu *vcpu;
vcpu              160 virt/kvm/arm/vgic/vgic.h vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
vcpu              162 virt/kvm/arm/vgic/vgic.h struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
vcpu              176 virt/kvm/arm/vgic/vgic.h void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu);
vcpu              177 virt/kvm/arm/vgic/vgic.h void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
vcpu              178 virt/kvm/arm/vgic/vgic.h void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr);
vcpu              179 virt/kvm/arm/vgic/vgic.h void vgic_v2_set_underflow(struct kvm_vcpu *vcpu);
vcpu              180 virt/kvm/arm/vgic/vgic.h void vgic_v2_set_npie(struct kvm_vcpu *vcpu);
vcpu              182 virt/kvm/arm/vgic/vgic.h int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
vcpu              184 virt/kvm/arm/vgic/vgic.h int vgic_v2_cpuif_uaccess(struct kvm_vcpu *vcpu, bool is_write,
vcpu              186 virt/kvm/arm/vgic/vgic.h void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
vcpu              187 virt/kvm/arm/vgic/vgic.h void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
vcpu              188 virt/kvm/arm/vgic/vgic.h void vgic_v2_enable(struct kvm_vcpu *vcpu);
vcpu              195 virt/kvm/arm/vgic/vgic.h void vgic_v2_load(struct kvm_vcpu *vcpu);
vcpu              196 virt/kvm/arm/vgic/vgic.h void vgic_v2_put(struct kvm_vcpu *vcpu);
vcpu              197 virt/kvm/arm/vgic/vgic.h void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu);
vcpu              199 virt/kvm/arm/vgic/vgic.h void vgic_v2_save_state(struct kvm_vcpu *vcpu);
vcpu              200 virt/kvm/arm/vgic/vgic.h void vgic_v2_restore_state(struct kvm_vcpu *vcpu);
vcpu              210 virt/kvm/arm/vgic/vgic.h void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu);
vcpu              211 virt/kvm/arm/vgic/vgic.h void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
vcpu              212 virt/kvm/arm/vgic/vgic.h void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr);
vcpu              213 virt/kvm/arm/vgic/vgic.h void vgic_v3_set_underflow(struct kvm_vcpu *vcpu);
vcpu              214 virt/kvm/arm/vgic/vgic.h void vgic_v3_set_npie(struct kvm_vcpu *vcpu);
vcpu              215 virt/kvm/arm/vgic/vgic.h void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
vcpu              216 virt/kvm/arm/vgic/vgic.h void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
vcpu              217 virt/kvm/arm/vgic/vgic.h void vgic_v3_enable(struct kvm_vcpu *vcpu);
vcpu              223 virt/kvm/arm/vgic/vgic.h int vgic_register_redist_iodev(struct kvm_vcpu *vcpu);
vcpu              226 virt/kvm/arm/vgic/vgic.h void vgic_v3_load(struct kvm_vcpu *vcpu);
vcpu              227 virt/kvm/arm/vgic/vgic.h void vgic_v3_put(struct kvm_vcpu *vcpu);
vcpu              228 virt/kvm/arm/vgic/vgic.h void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu);
vcpu              232 virt/kvm/arm/vgic/vgic.h void vgic_enable_lpis(struct kvm_vcpu *vcpu);
vcpu              233 virt/kvm/arm/vgic/vgic.h void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu);
vcpu              236 virt/kvm/arm/vgic/vgic.h int vgic_v3_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
vcpu              238 virt/kvm/arm/vgic/vgic.h int vgic_v3_redist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
vcpu              240 virt/kvm/arm/vgic/vgic.h int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu, bool is_write,
vcpu              242 virt/kvm/arm/vgic/vgic.h int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, bool is_write, u64 id,
vcpu              244 virt/kvm/arm/vgic/vgic.h int vgic_v3_line_level_info_uaccess(struct kvm_vcpu *vcpu, bool is_write,
vcpu              247 virt/kvm/arm/vgic/vgic.h void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
vcpu              248 virt/kvm/arm/vgic/vgic.h void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
vcpu              258 virt/kvm/arm/vgic/vgic.h static inline int vgic_v3_max_apr_idx(struct kvm_vcpu *vcpu)
vcpu              260 virt/kvm/arm/vgic/vgic.h 	struct vgic_cpu *cpu_if = &vcpu->arch.vgic_cpu;
vcpu              307 virt/kvm/arm/vgic/vgic.h int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr);
vcpu              319 virt/kvm/arm/vgic/vgic.h int vgic_v4_sync_hwstate(struct kvm_vcpu *vcpu);
vcpu              320 virt/kvm/arm/vgic/vgic.h int vgic_v4_flush_hwstate(struct kvm_vcpu *vcpu);
vcpu               20 virt/kvm/async_pf.c static inline void kvm_async_page_present_sync(struct kvm_vcpu *vcpu,
vcpu               24 virt/kvm/async_pf.c 	kvm_arch_async_page_present(vcpu, work);
vcpu               27 virt/kvm/async_pf.c static inline void kvm_async_page_present_async(struct kvm_vcpu *vcpu,
vcpu               31 virt/kvm/async_pf.c 	kvm_arch_async_page_present(vcpu, work);
vcpu               53 virt/kvm/async_pf.c void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu)
vcpu               55 virt/kvm/async_pf.c 	INIT_LIST_HEAD(&vcpu->async_pf.done);
vcpu               56 virt/kvm/async_pf.c 	INIT_LIST_HEAD(&vcpu->async_pf.queue);
vcpu               57 virt/kvm/async_pf.c 	spin_lock_init(&vcpu->async_pf.lock);
vcpu               65 virt/kvm/async_pf.c 	struct kvm_vcpu *vcpu = apf->vcpu;
vcpu               83 virt/kvm/async_pf.c 	kvm_async_page_present_sync(vcpu, apf);
vcpu               85 virt/kvm/async_pf.c 	spin_lock(&vcpu->async_pf.lock);
vcpu               86 virt/kvm/async_pf.c 	list_add_tail(&apf->link, &vcpu->async_pf.done);
vcpu               87 virt/kvm/async_pf.c 	apf->vcpu = NULL;
vcpu               88 virt/kvm/async_pf.c 	spin_unlock(&vcpu->async_pf.lock);
vcpu               97 virt/kvm/async_pf.c 	if (swq_has_sleeper(&vcpu->wq))
vcpu               98 virt/kvm/async_pf.c 		swake_up_one(&vcpu->wq);
vcpu              101 virt/kvm/async_pf.c 	kvm_put_kvm(vcpu->kvm);
vcpu              104 virt/kvm/async_pf.c void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
vcpu              106 virt/kvm/async_pf.c 	spin_lock(&vcpu->async_pf.lock);
vcpu              109 virt/kvm/async_pf.c 	while (!list_empty(&vcpu->async_pf.queue)) {
vcpu              111 virt/kvm/async_pf.c 			list_first_entry(&vcpu->async_pf.queue,
vcpu              119 virt/kvm/async_pf.c 		if (!work->vcpu)
vcpu              122 virt/kvm/async_pf.c 		spin_unlock(&vcpu->async_pf.lock);
vcpu              128 virt/kvm/async_pf.c 			kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */
vcpu              132 virt/kvm/async_pf.c 		spin_lock(&vcpu->async_pf.lock);
vcpu              135 virt/kvm/async_pf.c 	while (!list_empty(&vcpu->async_pf.done)) {
vcpu              137 virt/kvm/async_pf.c 			list_first_entry(&vcpu->async_pf.done,
vcpu              142 virt/kvm/async_pf.c 	spin_unlock(&vcpu->async_pf.lock);
vcpu              144 virt/kvm/async_pf.c 	vcpu->async_pf.queued = 0;
vcpu              147 virt/kvm/async_pf.c void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
vcpu              151 virt/kvm/async_pf.c 	while (!list_empty_careful(&vcpu->async_pf.done) &&
vcpu              152 virt/kvm/async_pf.c 	      kvm_arch_can_inject_async_page_present(vcpu)) {
vcpu              153 virt/kvm/async_pf.c 		spin_lock(&vcpu->async_pf.lock);
vcpu              154 virt/kvm/async_pf.c 		work = list_first_entry(&vcpu->async_pf.done, typeof(*work),
vcpu              157 virt/kvm/async_pf.c 		spin_unlock(&vcpu->async_pf.lock);
vcpu              159 virt/kvm/async_pf.c 		kvm_arch_async_page_ready(vcpu, work);
vcpu              160 virt/kvm/async_pf.c 		kvm_async_page_present_async(vcpu, work);
vcpu              163 virt/kvm/async_pf.c 		vcpu->async_pf.queued--;
vcpu              168 virt/kvm/async_pf.c int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
vcpu              173 virt/kvm/async_pf.c 	if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU)
vcpu              187 virt/kvm/async_pf.c 	work->vcpu = vcpu;
vcpu              193 virt/kvm/async_pf.c 	kvm_get_kvm(work->vcpu->kvm);
vcpu              204 virt/kvm/async_pf.c 	list_add_tail(&work->queue, &vcpu->async_pf.queue);
vcpu              205 virt/kvm/async_pf.c 	vcpu->async_pf.queued++;
vcpu              206 virt/kvm/async_pf.c 	kvm_arch_async_page_not_present(vcpu, work);
vcpu              209 virt/kvm/async_pf.c 	kvm_put_kvm(work->vcpu->kvm);
vcpu              215 virt/kvm/async_pf.c int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
vcpu              219 virt/kvm/async_pf.c 	if (!list_empty_careful(&vcpu->async_pf.done))
vcpu              229 virt/kvm/async_pf.c 	spin_lock(&vcpu->async_pf.lock);
vcpu              230 virt/kvm/async_pf.c 	list_add_tail(&work->link, &vcpu->async_pf.done);
vcpu              231 virt/kvm/async_pf.c 	spin_unlock(&vcpu->async_pf.lock);
vcpu              233 virt/kvm/async_pf.c 	vcpu->async_pf.queued++;
vcpu               17 virt/kvm/async_pf.h void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu);
vcpu               64 virt/kvm/coalesced_mmio.c static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
vcpu              729 virt/kvm/eventfd.c ioeventfd_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr,
vcpu              196 virt/kvm/kvm_main.c void vcpu_load(struct kvm_vcpu *vcpu)
vcpu              199 virt/kvm/kvm_main.c 	preempt_notifier_register(&vcpu->preempt_notifier);
vcpu              200 virt/kvm/kvm_main.c 	kvm_arch_vcpu_load(vcpu, cpu);
vcpu              205 virt/kvm/kvm_main.c void vcpu_put(struct kvm_vcpu *vcpu)
vcpu              208 virt/kvm/kvm_main.c 	kvm_arch_vcpu_put(vcpu);
vcpu              209 virt/kvm/kvm_main.c 	preempt_notifier_unregister(&vcpu->preempt_notifier);
vcpu              215 virt/kvm/kvm_main.c static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
vcpu              217 virt/kvm/kvm_main.c 	int mode = kvm_vcpu_exiting_guest_mode(vcpu);
vcpu              252 virt/kvm/kvm_main.c 	struct kvm_vcpu *vcpu;
vcpu              257 virt/kvm/kvm_main.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu              261 virt/kvm/kvm_main.c 		kvm_make_request(req, vcpu);
vcpu              262 virt/kvm/kvm_main.c 		cpu = vcpu->cpu;
vcpu              264 virt/kvm/kvm_main.c 		if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
vcpu              268 virt/kvm/kvm_main.c 		    kvm_request_needs_ipi(vcpu, req))
vcpu              324 virt/kvm/kvm_main.c int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
vcpu              329 virt/kvm/kvm_main.c 	mutex_init(&vcpu->mutex);
vcpu              330 virt/kvm/kvm_main.c 	vcpu->cpu = -1;
vcpu              331 virt/kvm/kvm_main.c 	vcpu->kvm = kvm;
vcpu              332 virt/kvm/kvm_main.c 	vcpu->vcpu_id = id;
vcpu              333 virt/kvm/kvm_main.c 	vcpu->pid = NULL;
vcpu              334 virt/kvm/kvm_main.c 	init_swait_queue_head(&vcpu->wq);
vcpu              335 virt/kvm/kvm_main.c 	kvm_async_pf_vcpu_init(vcpu);
vcpu              337 virt/kvm/kvm_main.c 	vcpu->pre_pcpu = -1;
vcpu              338 virt/kvm/kvm_main.c 	INIT_LIST_HEAD(&vcpu->blocked_vcpu_list);
vcpu              345 virt/kvm/kvm_main.c 	vcpu->run = page_address(page);
vcpu              347 virt/kvm/kvm_main.c 	kvm_vcpu_set_in_spin_loop(vcpu, false);
vcpu              348 virt/kvm/kvm_main.c 	kvm_vcpu_set_dy_eligible(vcpu, false);
vcpu              349 virt/kvm/kvm_main.c 	vcpu->preempted = false;
vcpu              350 virt/kvm/kvm_main.c 	vcpu->ready = false;
vcpu              352 virt/kvm/kvm_main.c 	r = kvm_arch_vcpu_init(vcpu);
vcpu              358 virt/kvm/kvm_main.c 	free_page((unsigned long)vcpu->run);
vcpu              364 virt/kvm/kvm_main.c void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
vcpu              371 virt/kvm/kvm_main.c 	put_pid(rcu_dereference_protected(vcpu->pid, 1));
vcpu              372 virt/kvm/kvm_main.c 	kvm_arch_vcpu_uninit(vcpu);
vcpu              373 virt/kvm/kvm_main.c 	free_page((unsigned long)vcpu->run);
vcpu             1386 virt/kvm/kvm_main.c struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
vcpu             1388 virt/kvm/kvm_main.c 	return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn);
vcpu             1403 virt/kvm/kvm_main.c unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn)
vcpu             1410 virt/kvm/kvm_main.c 	addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL);
vcpu             1466 virt/kvm/kvm_main.c unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
vcpu             1468 virt/kvm/kvm_main.c 	return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL);
vcpu             1498 virt/kvm/kvm_main.c unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable)
vcpu             1500 virt/kvm/kvm_main.c 	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
vcpu             1760 virt/kvm/kvm_main.c kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn)
vcpu             1762 virt/kvm/kvm_main.c 	return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
vcpu             1772 virt/kvm/kvm_main.c kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
vcpu             1774 virt/kvm/kvm_main.c 	return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
vcpu             1898 virt/kvm/kvm_main.c int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
vcpu             1901 virt/kvm/kvm_main.c 	return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map,
vcpu             1906 virt/kvm/kvm_main.c int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
vcpu             1908 virt/kvm/kvm_main.c 	return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map,
vcpu             1949 virt/kvm/kvm_main.c int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
vcpu             1952 virt/kvm/kvm_main.c 	__kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map,
vcpu             1958 virt/kvm/kvm_main.c void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
vcpu             1960 virt/kvm/kvm_main.c 	__kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, NULL,
vcpu             1965 virt/kvm/kvm_main.c struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
vcpu             1969 virt/kvm/kvm_main.c 	pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn);
vcpu             2061 virt/kvm/kvm_main.c int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
vcpu             2064 virt/kvm/kvm_main.c 	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
vcpu             2090 virt/kvm/kvm_main.c int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
vcpu             2098 virt/kvm/kvm_main.c 		ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg);
vcpu             2138 virt/kvm/kvm_main.c int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
vcpu             2142 virt/kvm/kvm_main.c 	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
vcpu             2174 virt/kvm/kvm_main.c int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
vcpu             2177 virt/kvm/kvm_main.c 	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
vcpu             2204 virt/kvm/kvm_main.c int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
vcpu             2213 virt/kvm/kvm_main.c 		ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg);
vcpu             2377 virt/kvm/kvm_main.c void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
vcpu             2381 virt/kvm/kvm_main.c 	memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
vcpu             2386 virt/kvm/kvm_main.c void kvm_sigset_activate(struct kvm_vcpu *vcpu)
vcpu             2388 virt/kvm/kvm_main.c 	if (!vcpu->sigset_active)
vcpu             2397 virt/kvm/kvm_main.c 	sigprocmask(SIG_SETMASK, &vcpu->sigset, &current->real_blocked);
vcpu             2400 virt/kvm/kvm_main.c void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
vcpu             2402 virt/kvm/kvm_main.c 	if (!vcpu->sigset_active)
vcpu             2409 virt/kvm/kvm_main.c static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
vcpu             2413 virt/kvm/kvm_main.c 	old = val = vcpu->halt_poll_ns;
vcpu             2426 virt/kvm/kvm_main.c 	vcpu->halt_poll_ns = val;
vcpu             2428 virt/kvm/kvm_main.c 	trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
vcpu             2431 virt/kvm/kvm_main.c static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
vcpu             2435 virt/kvm/kvm_main.c 	old = val = vcpu->halt_poll_ns;
vcpu             2442 virt/kvm/kvm_main.c 	vcpu->halt_poll_ns = val;
vcpu             2443 virt/kvm/kvm_main.c 	trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
vcpu             2446 virt/kvm/kvm_main.c static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
vcpu             2449 virt/kvm/kvm_main.c 	int idx = srcu_read_lock(&vcpu->kvm->srcu);
vcpu             2451 virt/kvm/kvm_main.c 	if (kvm_arch_vcpu_runnable(vcpu)) {
vcpu             2452 virt/kvm/kvm_main.c 		kvm_make_request(KVM_REQ_UNHALT, vcpu);
vcpu             2455 virt/kvm/kvm_main.c 	if (kvm_cpu_has_pending_timer(vcpu))
vcpu             2462 virt/kvm/kvm_main.c 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
vcpu             2469 virt/kvm/kvm_main.c void kvm_vcpu_block(struct kvm_vcpu *vcpu)
vcpu             2476 virt/kvm/kvm_main.c 	kvm_arch_vcpu_blocking(vcpu);
vcpu             2479 virt/kvm/kvm_main.c 	if (vcpu->halt_poll_ns && !kvm_arch_no_poll(vcpu)) {
vcpu             2480 virt/kvm/kvm_main.c 		ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns);
vcpu             2482 virt/kvm/kvm_main.c 		++vcpu->stat.halt_attempted_poll;
vcpu             2488 virt/kvm/kvm_main.c 			if (kvm_vcpu_check_block(vcpu) < 0) {
vcpu             2489 virt/kvm/kvm_main.c 				++vcpu->stat.halt_successful_poll;
vcpu             2490 virt/kvm/kvm_main.c 				if (!vcpu_valid_wakeup(vcpu))
vcpu             2491 virt/kvm/kvm_main.c 					++vcpu->stat.halt_poll_invalid;
vcpu             2499 virt/kvm/kvm_main.c 		prepare_to_swait_exclusive(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
vcpu             2501 virt/kvm/kvm_main.c 		if (kvm_vcpu_check_block(vcpu) < 0)
vcpu             2508 virt/kvm/kvm_main.c 	finish_swait(&vcpu->wq, &wait);
vcpu             2511 virt/kvm/kvm_main.c 	kvm_arch_vcpu_unblocking(vcpu);
vcpu             2514 virt/kvm/kvm_main.c 	if (!kvm_arch_no_poll(vcpu)) {
vcpu             2515 virt/kvm/kvm_main.c 		if (!vcpu_valid_wakeup(vcpu)) {
vcpu             2516 virt/kvm/kvm_main.c 			shrink_halt_poll_ns(vcpu);
vcpu             2518 virt/kvm/kvm_main.c 			if (block_ns <= vcpu->halt_poll_ns)
vcpu             2521 virt/kvm/kvm_main.c 			else if (vcpu->halt_poll_ns && block_ns > halt_poll_ns)
vcpu             2522 virt/kvm/kvm_main.c 				shrink_halt_poll_ns(vcpu);
vcpu             2524 virt/kvm/kvm_main.c 			else if (vcpu->halt_poll_ns < halt_poll_ns &&
vcpu             2526 virt/kvm/kvm_main.c 				grow_halt_poll_ns(vcpu);
vcpu             2528 virt/kvm/kvm_main.c 			vcpu->halt_poll_ns = 0;
vcpu             2532 virt/kvm/kvm_main.c 	trace_kvm_vcpu_wakeup(block_ns, waited, vcpu_valid_wakeup(vcpu));
vcpu             2533 virt/kvm/kvm_main.c 	kvm_arch_vcpu_block_finish(vcpu);
vcpu             2537 virt/kvm/kvm_main.c bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
vcpu             2541 virt/kvm/kvm_main.c 	wqp = kvm_arch_vcpu_wq(vcpu);
vcpu             2544 virt/kvm/kvm_main.c 		WRITE_ONCE(vcpu->ready, true);
vcpu             2545 virt/kvm/kvm_main.c 		++vcpu->stat.halt_wakeup;
vcpu             2557 virt/kvm/kvm_main.c void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
vcpu             2560 virt/kvm/kvm_main.c 	int cpu = vcpu->cpu;
vcpu             2562 virt/kvm/kvm_main.c 	if (kvm_vcpu_wake_up(vcpu))
vcpu             2567 virt/kvm/kvm_main.c 		if (kvm_arch_vcpu_should_kick(vcpu))
vcpu             2616 virt/kvm/kvm_main.c static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
vcpu             2621 virt/kvm/kvm_main.c 	eligible = !vcpu->spin_loop.in_spin_loop ||
vcpu             2622 virt/kvm/kvm_main.c 		    vcpu->spin_loop.dy_eligible;
vcpu             2624 virt/kvm/kvm_main.c 	if (vcpu->spin_loop.in_spin_loop)
vcpu             2625 virt/kvm/kvm_main.c 		kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);
vcpu             2638 virt/kvm/kvm_main.c bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
vcpu             2640 virt/kvm/kvm_main.c 	return kvm_arch_vcpu_runnable(vcpu);
vcpu             2643 virt/kvm/kvm_main.c static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
vcpu             2645 virt/kvm/kvm_main.c 	if (kvm_arch_dy_runnable(vcpu))
vcpu             2649 virt/kvm/kvm_main.c 	if (!list_empty_careful(&vcpu->async_pf.done))
vcpu             2659 virt/kvm/kvm_main.c 	struct kvm_vcpu *vcpu;
vcpu             2675 virt/kvm/kvm_main.c 		kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu             2681 virt/kvm/kvm_main.c 			if (!READ_ONCE(vcpu->ready))
vcpu             2683 virt/kvm/kvm_main.c 			if (vcpu == me)
vcpu             2685 virt/kvm/kvm_main.c 			if (swait_active(&vcpu->wq) && !vcpu_dy_runnable(vcpu))
vcpu             2687 virt/kvm/kvm_main.c 			if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode &&
vcpu             2688 virt/kvm/kvm_main.c 				!kvm_arch_vcpu_in_kernel(vcpu))
vcpu             2690 virt/kvm/kvm_main.c 			if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
vcpu             2693 virt/kvm/kvm_main.c 			yielded = kvm_vcpu_yield_to(vcpu);
vcpu             2713 virt/kvm/kvm_main.c 	struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data;
vcpu             2717 virt/kvm/kvm_main.c 		page = virt_to_page(vcpu->run);
vcpu             2720 virt/kvm/kvm_main.c 		page = virt_to_page(vcpu->arch.pio_data);
vcpu             2724 virt/kvm/kvm_main.c 		page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
vcpu             2727 virt/kvm/kvm_main.c 		return kvm_arch_vcpu_fault(vcpu, vmf);
vcpu             2745 virt/kvm/kvm_main.c 	struct kvm_vcpu *vcpu = filp->private_data;
vcpu             2747 virt/kvm/kvm_main.c 	debugfs_remove_recursive(vcpu->debugfs_dentry);
vcpu             2748 virt/kvm/kvm_main.c 	kvm_put_kvm(vcpu->kvm);
vcpu             2763 virt/kvm/kvm_main.c static int create_vcpu_fd(struct kvm_vcpu *vcpu)
vcpu             2767 virt/kvm/kvm_main.c 	snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id);
vcpu             2768 virt/kvm/kvm_main.c 	return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
vcpu             2771 virt/kvm/kvm_main.c static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
vcpu             2779 virt/kvm/kvm_main.c 	snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id);
vcpu             2780 virt/kvm/kvm_main.c 	vcpu->debugfs_dentry = debugfs_create_dir(dir_name,
vcpu             2781 virt/kvm/kvm_main.c 						  vcpu->kvm->debugfs_dentry);
vcpu             2783 virt/kvm/kvm_main.c 	kvm_arch_create_vcpu_debugfs(vcpu);
vcpu             2793 virt/kvm/kvm_main.c 	struct kvm_vcpu *vcpu;
vcpu             2807 virt/kvm/kvm_main.c 	vcpu = kvm_arch_vcpu_create(kvm, id);
vcpu             2808 virt/kvm/kvm_main.c 	if (IS_ERR(vcpu)) {
vcpu             2809 virt/kvm/kvm_main.c 		r = PTR_ERR(vcpu);
vcpu             2813 virt/kvm/kvm_main.c 	preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
vcpu             2815 virt/kvm/kvm_main.c 	r = kvm_arch_vcpu_setup(vcpu);
vcpu             2819 virt/kvm/kvm_main.c 	kvm_create_vcpu_debugfs(vcpu);
vcpu             2831 virt/kvm/kvm_main.c 	r = create_vcpu_fd(vcpu);
vcpu             2837 virt/kvm/kvm_main.c 	kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
vcpu             2847 virt/kvm/kvm_main.c 	kvm_arch_vcpu_postcreate(vcpu);
vcpu             2852 virt/kvm/kvm_main.c 	debugfs_remove_recursive(vcpu->debugfs_dentry);
vcpu             2854 virt/kvm/kvm_main.c 	kvm_arch_vcpu_destroy(vcpu);
vcpu             2862 virt/kvm/kvm_main.c static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
vcpu             2866 virt/kvm/kvm_main.c 		vcpu->sigset_active = 1;
vcpu             2867 virt/kvm/kvm_main.c 		vcpu->sigset = *sigset;
vcpu             2869 virt/kvm/kvm_main.c 		vcpu->sigset_active = 0;
vcpu             2876 virt/kvm/kvm_main.c 	struct kvm_vcpu *vcpu = filp->private_data;
vcpu             2882 virt/kvm/kvm_main.c 	if (vcpu->kvm->mm != current->mm)
vcpu             2896 virt/kvm/kvm_main.c 	if (mutex_lock_killable(&vcpu->mutex))
vcpu             2904 virt/kvm/kvm_main.c 		oldpid = rcu_access_pointer(vcpu->pid);
vcpu             2909 virt/kvm/kvm_main.c 			r = kvm_arch_vcpu_run_pid_change(vcpu);
vcpu             2914 virt/kvm/kvm_main.c 			rcu_assign_pointer(vcpu->pid, newpid);
vcpu             2919 virt/kvm/kvm_main.c 		r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
vcpu             2920 virt/kvm/kvm_main.c 		trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
vcpu             2930 virt/kvm/kvm_main.c 		r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
vcpu             2950 virt/kvm/kvm_main.c 		r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
vcpu             2960 virt/kvm/kvm_main.c 		r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
vcpu             2976 virt/kvm/kvm_main.c 		r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
vcpu             2982 virt/kvm/kvm_main.c 		r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
vcpu             2997 virt/kvm/kvm_main.c 		r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
vcpu             3006 virt/kvm/kvm_main.c 		r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
vcpu             3021 virt/kvm/kvm_main.c 		r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
vcpu             3044 virt/kvm/kvm_main.c 		r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
vcpu             3052 virt/kvm/kvm_main.c 		r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
vcpu             3068 virt/kvm/kvm_main.c 		r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
vcpu             3075 virt/kvm/kvm_main.c 	mutex_unlock(&vcpu->mutex);
vcpu             3085 virt/kvm/kvm_main.c 	struct kvm_vcpu *vcpu = filp->private_data;
vcpu             3089 virt/kvm/kvm_main.c 	if (vcpu->kvm->mm != current->mm)
vcpu             3109 virt/kvm/kvm_main.c 			r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
vcpu             3111 virt/kvm/kvm_main.c 			r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL);
vcpu             3854 virt/kvm/kvm_main.c static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
vcpu             3865 virt/kvm/kvm_main.c 		if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr,
vcpu             3875 virt/kvm/kvm_main.c int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
vcpu             3887 virt/kvm/kvm_main.c 	bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
vcpu             3890 virt/kvm/kvm_main.c 	r = __kvm_io_bus_write(vcpu, bus, &range, val);
vcpu             3896 virt/kvm/kvm_main.c int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
vcpu             3907 virt/kvm/kvm_main.c 	bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
vcpu             3914 virt/kvm/kvm_main.c 		if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,
vcpu             3922 virt/kvm/kvm_main.c 	return __kvm_io_bus_write(vcpu, bus, &range, val);
vcpu             3925 virt/kvm/kvm_main.c static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
vcpu             3936 virt/kvm/kvm_main.c 		if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr,
vcpu             3946 virt/kvm/kvm_main.c int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
vcpu             3958 virt/kvm/kvm_main.c 	bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
vcpu             3961 virt/kvm/kvm_main.c 	r = __kvm_io_bus_read(vcpu, bus, &range, val);
vcpu             4149 virt/kvm/kvm_main.c 	struct kvm_vcpu *vcpu;
vcpu             4153 virt/kvm/kvm_main.c 	kvm_for_each_vcpu(i, vcpu, stat_data->kvm)
vcpu             4154 virt/kvm/kvm_main.c 		*val += *(u64 *)((void *)vcpu + stat_data->offset);
vcpu             4163 virt/kvm/kvm_main.c 	struct kvm_vcpu *vcpu;
vcpu             4168 virt/kvm/kvm_main.c 	kvm_for_each_vcpu(i, vcpu, stat_data->kvm)
vcpu             4169 virt/kvm/kvm_main.c 		*(u64 *)((void *)vcpu + stat_data->offset) = 0;
vcpu             4374 virt/kvm/kvm_main.c 	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
vcpu             4376 virt/kvm/kvm_main.c 	WRITE_ONCE(vcpu->preempted, false);
vcpu             4377 virt/kvm/kvm_main.c 	WRITE_ONCE(vcpu->ready, false);
vcpu             4379 virt/kvm/kvm_main.c 	kvm_arch_sched_in(vcpu, cpu);
vcpu             4381 virt/kvm/kvm_main.c 	kvm_arch_vcpu_load(vcpu, cpu);
vcpu             4387 virt/kvm/kvm_main.c 	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
vcpu             4390 virt/kvm/kvm_main.c 		WRITE_ONCE(vcpu->preempted, true);
vcpu             4391 virt/kvm/kvm_main.c 		WRITE_ONCE(vcpu->ready, true);
vcpu             4393 virt/kvm/kvm_main.c 	kvm_arch_vcpu_put(vcpu);