/linux-4.4.14/arch/arm64/include/asm/ |
D | kvm_emulate.h | 34 unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num); 35 unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu); 37 bool kvm_condition_valid32(const struct kvm_vcpu *vcpu); 38 void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr); 40 void kvm_inject_undefined(struct kvm_vcpu *vcpu); 41 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); 42 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); 44 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) in vcpu_reset_hcr() argument 46 vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; in vcpu_reset_hcr() 47 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) in vcpu_reset_hcr() [all …]
|
D | kvm_coproc.h | 26 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu); 41 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run); 42 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run); 43 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run); 44 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run); 45 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run); 46 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run); 52 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); 53 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); 54 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); [all …]
|
D | kvm_host.h | 46 int kvm_reset_vcpu(struct kvm_vcpu *vcpu); 203 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); 204 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); 205 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); 206 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); 229 int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, 253 static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} in kvm_arch_vcpu_uninit() argument 254 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} in kvm_arch_sched_in() argument 257 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu); 258 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu); [all …]
|
D | kvm_mmu.h | 94 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); 96 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); 228 static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) 230 return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101; 233 static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, 239 if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached) 271 void kvm_set_way_flush(struct kvm_vcpu *vcpu); 272 void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
|
D | kvm_psci.h | 24 int kvm_psci_version(struct kvm_vcpu *vcpu); 25 int kvm_psci_call(struct kvm_vcpu *vcpu);
|
D | kvm_mmio.h | 34 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); 35 int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
D | kvm_asm.h | 121 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
|
/linux-4.4.14/arch/s390/kvm/ |
D | priv.c | 34 static int handle_set_clock(struct kvm_vcpu *vcpu) in handle_set_clock() argument 40 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_set_clock() 41 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_set_clock() 43 op2 = kvm_s390_get_base_disp_s(vcpu, &ar); in handle_set_clock() 45 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_set_clock() 46 rc = read_guest(vcpu, op2, ar, &val, sizeof(val)); in handle_set_clock() 48 return kvm_s390_inject_prog_cond(vcpu, rc); in handle_set_clock() 50 VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val); in handle_set_clock() 51 kvm_s390_set_tod_clock(vcpu->kvm, val); in handle_set_clock() 53 kvm_s390_set_psw_cc(vcpu, 0); in handle_set_clock() [all …]
|
D | diag.c | 23 static int diag_release_pages(struct kvm_vcpu *vcpu) in diag_release_pages() argument 26 unsigned long prefix = kvm_s390_get_prefix(vcpu); in diag_release_pages() 28 start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; in diag_release_pages() 29 end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096; in diag_release_pages() 30 vcpu->stat.diagnose_10++; in diag_release_pages() 34 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in diag_release_pages() 36 VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end); in diag_release_pages() 43 gmap_discard(vcpu->arch.gmap, start, end); in diag_release_pages() 51 gmap_discard(vcpu->arch.gmap, start, prefix); in diag_release_pages() 53 gmap_discard(vcpu->arch.gmap, 0, 4096); in diag_release_pages() [all …]
|
D | intercept.c | 41 void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilc) in kvm_s390_rewind_psw() argument 43 struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block; in kvm_s390_rewind_psw() 54 static int handle_noop(struct kvm_vcpu *vcpu) in handle_noop() argument 56 switch (vcpu->arch.sie_block->icptcode) { in handle_noop() 58 vcpu->stat.exit_null++; in handle_noop() 61 vcpu->stat.exit_external_request++; in handle_noop() 69 static int handle_stop(struct kvm_vcpu *vcpu) in handle_stop() argument 71 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; in handle_stop() 75 vcpu->stat.exit_stop_request++; in handle_stop() 78 if (kvm_s390_vcpu_has_irq(vcpu, 1)) in handle_stop() [all …]
|
D | kvm-s390.h | 23 typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu); 26 #define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & 0x10)) argument 28 #define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1)) argument 51 static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu) in is_vcpu_stopped() argument 53 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED; in is_vcpu_stopped() 68 static inline u32 kvm_s390_get_prefix(struct kvm_vcpu *vcpu) in kvm_s390_get_prefix() argument 70 return vcpu->arch.sie_block->prefix << GUEST_PREFIX_SHIFT; in kvm_s390_get_prefix() 73 static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix) in kvm_s390_set_prefix() argument 75 VCPU_EVENT(vcpu, 3, "set prefix of cpu %03u to 0x%x", vcpu->vcpu_id, in kvm_s390_set_prefix() 77 vcpu->arch.sie_block->prefix = prefix >> GUEST_PREFIX_SHIFT; in kvm_s390_set_prefix() [all …]
|
D | guestdbg.c | 62 static void enable_all_hw_bp(struct kvm_vcpu *vcpu) in enable_all_hw_bp() argument 65 u64 *cr9 = &vcpu->arch.sie_block->gcr[9]; in enable_all_hw_bp() 66 u64 *cr10 = &vcpu->arch.sie_block->gcr[10]; in enable_all_hw_bp() 67 u64 *cr11 = &vcpu->arch.sie_block->gcr[11]; in enable_all_hw_bp() 70 if (vcpu->arch.guestdbg.nr_hw_bp <= 0 || in enable_all_hw_bp() 71 vcpu->arch.guestdbg.hw_bp_info == NULL) in enable_all_hw_bp() 82 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) { in enable_all_hw_bp() 83 start = vcpu->arch.guestdbg.hw_bp_info[i].addr; in enable_all_hw_bp() 84 len = vcpu->arch.guestdbg.hw_bp_info[i].len; in enable_all_hw_bp() 102 static void enable_all_hw_wp(struct kvm_vcpu *vcpu) in enable_all_hw_wp() argument [all …]
|
D | kvm-s390.c | 153 struct kvm_vcpu *vcpu; in kvm_clock_sync() local 159 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_clock_sync() 160 vcpu->arch.sie_block->epoch -= *delta; in kvm_clock_sync() 460 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu); 464 struct kvm_vcpu *vcpu; in kvm_s390_vm_set_crypto() local 503 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_s390_vm_set_crypto() 504 kvm_s390_vcpu_crypto_setup(vcpu); in kvm_s390_vm_set_crypto() 505 exit_sie(vcpu); in kvm_s390_vm_set_crypto() 1185 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_destroy() argument 1187 VCPU_EVENT(vcpu, 3, "%s", "free cpu"); in kvm_arch_vcpu_destroy() [all …]
|
D | sigp.c | 23 static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, in __sigp_sense() argument 46 VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", dst_vcpu->vcpu_id, in __sigp_sense() 51 static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, in __inject_sigp_emergency() argument 56 .u.emerg.code = vcpu->vcpu_id, in __inject_sigp_emergency() 62 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", in __inject_sigp_emergency() 68 static int __sigp_emergency(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu) in __sigp_emergency() argument 70 return __inject_sigp_emergency(vcpu, dst_vcpu); in __sigp_emergency() 73 static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, in __sigp_conditional_emergency() argument 92 return __inject_sigp_emergency(vcpu, dst_vcpu); in __sigp_conditional_emergency() 100 static int __sigp_external_call(struct kvm_vcpu *vcpu, in __sigp_external_call() argument [all …]
|
D | interrupt.c | 37 int psw_extint_disabled(struct kvm_vcpu *vcpu) in psw_extint_disabled() argument 39 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); in psw_extint_disabled() 42 static int psw_ioint_disabled(struct kvm_vcpu *vcpu) in psw_ioint_disabled() argument 44 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO); in psw_ioint_disabled() 47 static int psw_mchk_disabled(struct kvm_vcpu *vcpu) in psw_mchk_disabled() argument 49 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK); in psw_mchk_disabled() 52 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu) in psw_interrupts_disabled() argument 54 return psw_extint_disabled(vcpu) && in psw_interrupts_disabled() 55 psw_ioint_disabled(vcpu) && in psw_interrupts_disabled() 56 psw_mchk_disabled(vcpu); in psw_interrupts_disabled() [all …]
|
D | gaccess.h | 30 static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu, in kvm_s390_real_to_abs() argument 33 unsigned long prefix = kvm_s390_get_prefix(vcpu); in kvm_s390_real_to_abs() 55 static inline unsigned long kvm_s390_logical_to_effective(struct kvm_vcpu *vcpu, in kvm_s390_logical_to_effective() argument 58 psw_t *psw = &vcpu->arch.sie_block->gpsw; in kvm_s390_logical_to_effective() 95 #define put_guest_lc(vcpu, x, gra) \ argument 97 struct kvm_vcpu *__vcpu = (vcpu); \ 124 int write_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data, in write_guest_lc() argument 127 unsigned long gpa = gra + kvm_s390_get_prefix(vcpu); in write_guest_lc() 129 return kvm_write_guest(vcpu->kvm, gpa, data, len); in write_guest_lc() 150 int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data, in read_guest_lc() argument [all …]
|
D | gaccess.c | 260 int ipte_lock_held(struct kvm_vcpu *vcpu) in ipte_lock_held() argument 262 union ipte_control *ic = &vcpu->kvm->arch.sca->ipte_control; in ipte_lock_held() 264 if (vcpu->arch.sie_block->eca & 1) in ipte_lock_held() 266 return vcpu->kvm->arch.ipte_lock_count != 0; in ipte_lock_held() 269 static void ipte_lock_simple(struct kvm_vcpu *vcpu) in ipte_lock_simple() argument 273 mutex_lock(&vcpu->kvm->arch.ipte_mutex); in ipte_lock_simple() 274 vcpu->kvm->arch.ipte_lock_count++; in ipte_lock_simple() 275 if (vcpu->kvm->arch.ipte_lock_count > 1) in ipte_lock_simple() 277 ic = &vcpu->kvm->arch.sca->ipte_control; in ipte_lock_simple() 288 mutex_unlock(&vcpu->kvm->arch.ipte_mutex); in ipte_lock_simple() [all …]
|
D | trace-s390.h | 42 TP_PROTO(unsigned int id, struct kvm_vcpu *vcpu, 44 TP_ARGS(id, vcpu, sie_block), 48 __field(struct kvm_vcpu *, vcpu) 54 __entry->vcpu = vcpu; 59 __entry->vcpu, __entry->sie_block)
|
D | trace.h | 19 #define VCPU_PROTO_COMMON struct kvm_vcpu *vcpu 20 #define VCPU_ARGS_COMMON vcpu 25 __entry->id = vcpu->vcpu_id; \ 26 __entry->pswmask = vcpu->arch.sie_block->gpsw.mask; \ 27 __entry->pswaddr = vcpu->arch.sie_block->gpsw.addr; \
|
/linux-4.4.14/arch/powerpc/kvm/ |
D | booke_emulate.c | 35 static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu) in kvmppc_emul_rfi() argument 37 vcpu->arch.pc = vcpu->arch.shared->srr0; in kvmppc_emul_rfi() 38 kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1); in kvmppc_emul_rfi() 41 static void kvmppc_emul_rfdi(struct kvm_vcpu *vcpu) in kvmppc_emul_rfdi() argument 43 vcpu->arch.pc = vcpu->arch.dsrr0; in kvmppc_emul_rfdi() 44 kvmppc_set_msr(vcpu, vcpu->arch.dsrr1); in kvmppc_emul_rfdi() 47 static void kvmppc_emul_rfci(struct kvm_vcpu *vcpu) in kvmppc_emul_rfci() argument 49 vcpu->arch.pc = vcpu->arch.csrr0; in kvmppc_emul_rfci() 50 kvmppc_set_msr(vcpu, vcpu->arch.csrr1); in kvmppc_emul_rfci() 53 int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, in kvmppc_booke_emulate_op() argument [all …]
|
D | booke.c | 75 void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu) in kvmppc_dump_vcpu() argument 79 printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr); in kvmppc_dump_vcpu() 80 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr); in kvmppc_dump_vcpu() 81 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0, in kvmppc_dump_vcpu() 82 vcpu->arch.shared->srr1); in kvmppc_dump_vcpu() 84 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions); in kvmppc_dump_vcpu() 88 kvmppc_get_gpr(vcpu, i), in kvmppc_dump_vcpu() 89 kvmppc_get_gpr(vcpu, i+1), in kvmppc_dump_vcpu() 90 kvmppc_get_gpr(vcpu, i+2), in kvmppc_dump_vcpu() 91 kvmppc_get_gpr(vcpu, i+3)); in kvmppc_dump_vcpu() [all …]
|
D | book3s_pr.c | 54 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, 56 static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac); 65 static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu) in kvmppc_is_split_real() argument 67 ulong msr = kvmppc_get_msr(vcpu); in kvmppc_is_split_real() 71 static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu) in kvmppc_fixup_split_real() argument 73 ulong msr = kvmppc_get_msr(vcpu); in kvmppc_fixup_split_real() 74 ulong pc = kvmppc_get_pc(vcpu); in kvmppc_fixup_split_real() 81 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) in kvmppc_fixup_split_real() 88 vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK; in kvmppc_fixup_split_real() 89 kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS); in kvmppc_fixup_split_real() [all …]
|
D | emulate_loadstore.c | 50 int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) in kvmppc_emulate_loadstore() argument 52 struct kvm_run *run = vcpu->run; in kvmppc_emulate_loadstore() 59 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); in kvmppc_emulate_loadstore() 61 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst); in kvmppc_emulate_loadstore() 73 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); in kvmppc_emulate_loadstore() 77 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); in kvmppc_emulate_loadstore() 81 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); in kvmppc_emulate_loadstore() 82 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); in kvmppc_emulate_loadstore() 86 emulated = kvmppc_handle_store(run, vcpu, in kvmppc_emulate_loadstore() 87 kvmppc_get_gpr(vcpu, rs), in kvmppc_emulate_loadstore() [all …]
|
D | book3s.c | 69 void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu) in kvmppc_unfixup_split_real() argument 71 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) { in kvmppc_unfixup_split_real() 72 ulong pc = kvmppc_get_pc(vcpu); in kvmppc_unfixup_split_real() 74 kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK); in kvmppc_unfixup_split_real() 75 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK; in kvmppc_unfixup_split_real() 80 static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) in kvmppc_interrupt_offset() argument 82 if (!is_kvmppc_hv_enabled(vcpu->kvm)) in kvmppc_interrupt_offset() 83 return to_book3s(vcpu)->hior; in kvmppc_interrupt_offset() 87 static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, in kvmppc_update_int_pending() argument 90 if (is_kvmppc_hv_enabled(vcpu->kvm)) in kvmppc_update_int_pending() [all …]
|
D | book3s_emulate.c | 77 static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level) in spr_allowed() argument 80 if (vcpu->arch.papr_enabled && (level > PRIV_SUPER)) in spr_allowed() 84 if ((kvmppc_get_msr(vcpu) & MSR_PR) && level > PRIV_PROBLEM) in spr_allowed() 90 int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, in kvmppc_core_emulate_op_pr() argument 103 if ((kvmppc_get_msr(vcpu) & MSR_LE) && in kvmppc_core_emulate_op_pr() 112 kvmppc_set_gpr(vcpu, 3, EV_UNIMPLEMENTED); in kvmppc_core_emulate_op_pr() 113 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); in kvmppc_core_emulate_op_pr() 121 kvmppc_set_pc(vcpu, kvmppc_get_srr0(vcpu)); in kvmppc_core_emulate_op_pr() 122 kvmppc_set_msr(vcpu, kvmppc_get_srr1(vcpu)); in kvmppc_core_emulate_op_pr() 134 kvmppc_set_gpr(vcpu, rt, kvmppc_get_msr(vcpu)); in kvmppc_core_emulate_op_pr() [all …]
|
D | book3s_paired_singles.c | 161 static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt) in kvmppc_sync_qpr() argument 163 kvm_cvt_df(&VCPU_FPR(vcpu, rt), &vcpu->arch.qpr[rt]); in kvmppc_sync_qpr() 166 static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store) in kvmppc_inject_pf() argument 169 u64 msr = kvmppc_get_msr(vcpu); in kvmppc_inject_pf() 173 kvmppc_set_msr(vcpu, msr); in kvmppc_inject_pf() 174 kvmppc_set_dar(vcpu, eaddr); in kvmppc_inject_pf() 179 kvmppc_set_dsisr(vcpu, dsisr); in kvmppc_inject_pf() 180 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE); in kvmppc_inject_pf() 183 static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu, in kvmppc_emulate_fpr_load() argument 195 r = kvmppc_ld(vcpu, &addr, len, tmp, true); in kvmppc_emulate_fpr_load() [all …]
|
D | emulate.c | 37 void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) in kvmppc_emulate_dec() argument 42 pr_debug("mtDEC: %x\n", vcpu->arch.dec); in kvmppc_emulate_dec() 43 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); in kvmppc_emulate_dec() 47 kvmppc_core_dequeue_dec(vcpu); in kvmppc_emulate_dec() 50 if (vcpu->arch.dec & 0x80000000) { in kvmppc_emulate_dec() 51 kvmppc_core_queue_dec(vcpu); in kvmppc_emulate_dec() 58 if (vcpu->arch.dec == 0) in kvmppc_emulate_dec() 68 dec_time = vcpu->arch.dec; in kvmppc_emulate_dec() 76 hrtimer_start(&vcpu->arch.dec_timer, in kvmppc_emulate_dec() 78 vcpu->arch.dec_jiffies = get_tb(); in kvmppc_emulate_dec() [all …]
|
D | book3s_pr_papr.c | 26 static unsigned long get_pteg_addr(struct kvm_vcpu *vcpu, long pte_index) in get_pteg_addr() argument 28 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); in get_pteg_addr() 39 static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu) in kvmppc_h_pr_enter() argument 41 long flags = kvmppc_get_gpr(vcpu, 4); in kvmppc_h_pr_enter() 42 long pte_index = kvmppc_get_gpr(vcpu, 5); in kvmppc_h_pr_enter() 50 pteg_addr = get_pteg_addr(vcpu, pte_index); in kvmppc_h_pr_enter() 52 mutex_lock(&vcpu->kvm->arch.hpt_mutex); in kvmppc_h_pr_enter() 71 hpte[0] = cpu_to_be64(kvmppc_get_gpr(vcpu, 6)); in kvmppc_h_pr_enter() 72 hpte[1] = cpu_to_be64(kvmppc_get_gpr(vcpu, 7)); in kvmppc_h_pr_enter() 75 kvmppc_set_gpr(vcpu, 4, pte_index | i); in kvmppc_h_pr_enter() [all …]
|
D | e500mc.c | 31 void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type) in kvmppc_set_pending_interrupt() argument 52 tag = PPC_DBELL_LPID(get_lpid(vcpu)) | vcpu->vcpu_id; in kvmppc_set_pending_interrupt() 77 mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(&vcpu_e500->vcpu)); in kvmppc_e500_tlbil_one() 98 mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(&vcpu_e500->vcpu)); in kvmppc_e500_tlbil_all() 104 void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid) in kvmppc_set_pid() argument 106 vcpu->arch.pid = pid; in kvmppc_set_pid() 109 void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr) in kvmppc_mmu_msr_notify() argument 116 static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu) in kvmppc_core_vcpu_load_e500mc() argument 118 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); in kvmppc_core_vcpu_load_e500mc() 120 kvmppc_booke_vcpu_load(vcpu, cpu); in kvmppc_core_vcpu_load_e500mc() [all …]
|
D | e500_emulate.c | 54 static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb) in kvmppc_e500_emul_msgclr() argument 56 ulong param = vcpu->arch.gpr[rb]; in kvmppc_e500_emul_msgclr() 62 clear_bit(prio, &vcpu->arch.pending_exceptions); in kvmppc_e500_emul_msgclr() 66 static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb) in kvmppc_e500_emul_msgsnd() argument 68 ulong param = vcpu->arch.gpr[rb]; in kvmppc_e500_emul_msgsnd() 77 kvm_for_each_vcpu(i, cvcpu, vcpu->kvm) { in kvmppc_e500_emul_msgsnd() 89 static int kvmppc_e500_emul_ehpriv(struct kvm_run *run, struct kvm_vcpu *vcpu, in kvmppc_e500_emul_ehpriv() argument 97 run->debug.arch.address = vcpu->arch.pc; in kvmppc_e500_emul_ehpriv() 99 kvmppc_account_exit(vcpu, DEBUG_EXITS); in kvmppc_e500_emul_ehpriv() 109 static int kvmppc_e500_emul_dcbtls(struct kvm_vcpu *vcpu) in kvmppc_e500_emul_dcbtls() argument [all …]
|
D | book3s_hv.c | 84 static void kvmppc_end_cede(struct kvm_vcpu *vcpu); 85 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); 114 static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) in kvmppc_fast_vcpu_kick_hv() argument 119 wqp = kvm_arch_vcpu_wq(vcpu); in kvmppc_fast_vcpu_kick_hv() 122 ++vcpu->stat.halt_wakeup; in kvmppc_fast_vcpu_kick_hv() 125 if (kvmppc_ipi_thread(vcpu->arch.thread_cpu)) in kvmppc_fast_vcpu_kick_hv() 129 cpu = vcpu->cpu; in kvmppc_fast_vcpu_kick_hv() 188 static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) in kvmppc_core_vcpu_load_hv() argument 190 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_load_hv() 199 if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING) in kvmppc_core_vcpu_load_hv() [all …]
|
D | timing.c | 33 void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu) in kvmppc_init_timing_stats() argument 38 mutex_lock(&vcpu->arch.exit_timing_lock); in kvmppc_init_timing_stats() 40 vcpu->arch.last_exit_type = 0xDEAD; in kvmppc_init_timing_stats() 42 vcpu->arch.timing_count_type[i] = 0; in kvmppc_init_timing_stats() 43 vcpu->arch.timing_max_duration[i] = 0; in kvmppc_init_timing_stats() 44 vcpu->arch.timing_min_duration[i] = 0xFFFFFFFF; in kvmppc_init_timing_stats() 45 vcpu->arch.timing_sum_duration[i] = 0; in kvmppc_init_timing_stats() 46 vcpu->arch.timing_sum_quad_duration[i] = 0; in kvmppc_init_timing_stats() 48 vcpu->arch.timing_last_exit = 0; in kvmppc_init_timing_stats() 49 vcpu->arch.timing_exit.tv64 = 0; in kvmppc_init_timing_stats() [all …]
|
D | timing.h | 27 void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu); 28 void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu); 29 void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu, unsigned int id); 30 void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu); 32 static inline void kvmppc_set_exit_type(struct kvm_vcpu *vcpu, int type) in kvmppc_set_exit_type() argument 34 vcpu->arch.last_exit_type = type; in kvmppc_set_exit_type() 39 static inline void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu) {} in kvmppc_init_timing_stats() argument 40 static inline void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu) {} in kvmppc_update_timing_stats() argument 41 static inline void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu, in kvmppc_create_vcpu_debugfs() argument 43 static inline void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu) {} in kvmppc_remove_vcpu_debugfs() argument [all …]
|
D | powerpc.c | 55 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_should_kick() argument 69 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) in kvmppc_prepare_to_enter() argument 85 kvmppc_account_exit(vcpu, SIGNAL_EXITS); in kvmppc_prepare_to_enter() 86 vcpu->run->exit_reason = KVM_EXIT_INTR; in kvmppc_prepare_to_enter() 91 vcpu->mode = IN_GUEST_MODE; in kvmppc_prepare_to_enter() 101 if (vcpu->requests) { in kvmppc_prepare_to_enter() 104 trace_kvm_check_requests(vcpu); in kvmppc_prepare_to_enter() 105 r = kvmppc_core_check_requests(vcpu); in kvmppc_prepare_to_enter() 112 if (kvmppc_core_prepare_to_enter(vcpu)) { in kvmppc_prepare_to_enter() 129 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu) in kvmppc_swab_shared() argument [all …]
|
D | e500_mmu.c | 67 static unsigned int get_tlb_esel(struct kvm_vcpu *vcpu, int tlbsel) in get_tlb_esel() argument 69 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); in get_tlb_esel() 70 int esel = get_tlb_esel_bit(vcpu); in get_tlb_esel() 74 esel += gtlb0_set_base(vcpu_e500, vcpu->arch.shared->mas2); in get_tlb_esel() 129 static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu, in kvmppc_e500_deliver_tlb_miss() argument 132 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); in kvmppc_e500_deliver_tlb_miss() 137 tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1; in kvmppc_e500_deliver_tlb_miss() 139 tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f; in kvmppc_e500_deliver_tlb_miss() 141 vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim) in kvmppc_e500_deliver_tlb_miss() 143 vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0) in kvmppc_e500_deliver_tlb_miss() [all …]
|
D | e500.h | 61 struct kvm_vcpu vcpu; member 103 static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu) in to_e500() argument 105 return container_of(vcpu, struct kvmppc_vcpu_e500, vcpu); in to_e500() 130 int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu); 131 int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu); 132 int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, gva_t ea); 133 int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int type, gva_t ea); 134 int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea); 138 void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); 139 int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); [all …]
|
D | book3s_64_mmu.c | 39 static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu) in kvmppc_mmu_book3s_64_reset_msr() argument 41 kvmppc_set_msr(vcpu, vcpu->arch.intr_msr); in kvmppc_mmu_book3s_64_reset_msr() 45 struct kvm_vcpu *vcpu, in kvmppc_mmu_book3s_64_find_slbe() argument 52 for (i = 0; i < vcpu->arch.slb_nr; i++) { in kvmppc_mmu_book3s_64_find_slbe() 55 if (!vcpu->arch.slb[i].valid) in kvmppc_mmu_book3s_64_find_slbe() 58 if (vcpu->arch.slb[i].tb) in kvmppc_mmu_book3s_64_find_slbe() 61 if (vcpu->arch.slb[i].esid == cmp_esid) in kvmppc_mmu_book3s_64_find_slbe() 62 return &vcpu->arch.slb[i]; in kvmppc_mmu_book3s_64_find_slbe() 67 for (i = 0; i < vcpu->arch.slb_nr; i++) { in kvmppc_mmu_book3s_64_find_slbe() 68 if (vcpu->arch.slb[i].vsid) in kvmppc_mmu_book3s_64_find_slbe() [all …]
|
D | booke.h | 76 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr); 77 void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr); 79 void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr); 80 void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr); 81 void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits); 82 void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits); 84 int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, 86 int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val); 87 int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val); 90 void kvmppc_load_guest_spe(struct kvm_vcpu *vcpu); [all …]
|
D | book3s_32_mmu.c | 52 static inline bool check_debug_ip(struct kvm_vcpu *vcpu) in check_debug_ip() argument 55 return vcpu->arch.pc == DEBUG_MMU_PTE_IP; in check_debug_ip() 81 static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, 84 static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, 87 static u32 find_sr(struct kvm_vcpu *vcpu, gva_t eaddr) in find_sr() argument 89 return kvmppc_get_sr(vcpu, (eaddr >> 28) & 0xf); in find_sr() 92 static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, in kvmppc_mmu_book3s_32_ea_to_vp() argument 98 if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data, false)) in kvmppc_mmu_book3s_32_ea_to_vp() 101 kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_book3s_32_ea_to_vp() 105 static void kvmppc_mmu_book3s_32_reset_msr(struct kvm_vcpu *vcpu) in kvmppc_mmu_book3s_32_reset_msr() argument [all …]
|
D | book3s_64_mmu_host.c | 35 void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) in kvmppc_mmu_invalidate_pte() argument 44 static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid) in kvmppc_sid_hash() argument 57 static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) in find_sid_vsid() argument 62 if (kvmppc_get_msr(vcpu) & MSR_PR) in find_sid_vsid() 65 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); in find_sid_vsid() 66 map = &to_book3s(vcpu)->sid_map[sid_map_mask]; in find_sid_vsid() 72 map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask]; in find_sid_vsid() 82 int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, in kvmppc_mmu_map_page() argument 98 struct kvm *kvm = vcpu->kvm; in kvmppc_mmu_map_page() 108 pfn = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable); in kvmppc_mmu_map_page() [all …]
|
D | book3s_mmu_hpte.c | 67 void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte) in kvmppc_mmu_hpte_cache_map() argument 70 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); in kvmppc_mmu_hpte_cache_map() 112 static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) in invalidate_pte() argument 114 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); in invalidate_pte() 119 kvmppc_mmu_invalidate_pte(vcpu, pte); in invalidate_pte() 143 static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu) in kvmppc_mmu_pte_flush_all() argument 145 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); in kvmppc_mmu_pte_flush_all() 155 invalidate_pte(vcpu, pte); in kvmppc_mmu_pte_flush_all() 161 static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea) in kvmppc_mmu_pte_flush_page() argument 163 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); in kvmppc_mmu_pte_flush_page() [all …]
|
D | e500.c | 143 vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500, in kvmppc_e500_recalc_shadow_pid() 144 get_cur_as(&vcpu_e500->vcpu), in kvmppc_e500_recalc_shadow_pid() 145 get_cur_pid(&vcpu_e500->vcpu), in kvmppc_e500_recalc_shadow_pid() 146 get_cur_pr(&vcpu_e500->vcpu), 1); in kvmppc_e500_recalc_shadow_pid() 147 vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500, in kvmppc_e500_recalc_shadow_pid() 148 get_cur_as(&vcpu_e500->vcpu), 0, in kvmppc_e500_recalc_shadow_pid() 149 get_cur_pr(&vcpu_e500->vcpu), 1); in kvmppc_e500_recalc_shadow_pid() 218 unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu, in kvmppc_e500_get_tlb_stid() argument 221 return kvmppc_e500_get_sid(to_e500(vcpu), get_tlb_ts(gtlbe), in kvmppc_e500_get_tlb_stid() 222 get_tlb_tid(gtlbe), get_cur_pr(vcpu), 0); in kvmppc_e500_get_tlb_stid() [all …]
|
D | book3s_32_mmu_host.c | 61 void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) in kvmppc_mmu_invalidate_pte() argument 78 static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid) in kvmppc_sid_hash() argument 91 static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) in find_sid_vsid() argument 96 if (kvmppc_get_msr(vcpu) & MSR_PR) in find_sid_vsid() 99 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); in find_sid_vsid() 100 map = &to_book3s(vcpu)->sid_map[sid_map_mask]; in find_sid_vsid() 107 map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask]; in find_sid_vsid() 118 static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr, in kvmppc_mmu_get_pteg() argument 142 int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, in kvmppc_mmu_map_page() argument 160 hpaddr = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable); in kvmppc_mmu_map_page() [all …]
|
D | trace_hv.h | 223 TP_PROTO(struct kvm_vcpu *vcpu), 224 TP_ARGS(vcpu), 234 __entry->vcpu_id = vcpu->vcpu_id; 235 __entry->pc = kvmppc_get_pc(vcpu); 236 __entry->ceded = vcpu->arch.ceded; 237 __entry->pending_exceptions = vcpu->arch.pending_exceptions; 247 TP_PROTO(struct kvm_vcpu *vcpu), 248 TP_ARGS(vcpu), 259 __entry->vcpu_id = vcpu->vcpu_id; 260 __entry->trap = vcpu->arch.trap; [all …]
|
D | book3s_xics.c | 306 kvmppc_book3s_queue_irqprio(icp->vcpu, in icp_try_update() 309 kvmppc_fast_vcpu_kick(icp->vcpu); in icp_try_update() 570 static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu) in kvmppc_h_xirr() argument 573 struct kvmppc_icp *icp = vcpu->arch.icp; in kvmppc_h_xirr() 577 kvmppc_book3s_dequeue_irqprio(icp->vcpu, in kvmppc_h_xirr() 599 XICS_DBG("h_xirr vcpu %d xirr %#x\n", vcpu->vcpu_id, xirr); in kvmppc_h_xirr() 604 static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, in kvmppc_h_ipi() argument 608 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; in kvmppc_h_ipi() 615 vcpu->vcpu_id, server, mfrr); in kvmppc_h_ipi() 617 icp = vcpu->arch.icp; in kvmppc_h_ipi() [all …]
|
D | book3s_interrupts.S | 41 #define VCPU_LOAD_NVGPRS(vcpu) \ argument 42 PPC_LL r14, VCPU_GPR(R14)(vcpu); \ 43 PPC_LL r15, VCPU_GPR(R15)(vcpu); \ 44 PPC_LL r16, VCPU_GPR(R16)(vcpu); \ 45 PPC_LL r17, VCPU_GPR(R17)(vcpu); \ 46 PPC_LL r18, VCPU_GPR(R18)(vcpu); \ 47 PPC_LL r19, VCPU_GPR(R19)(vcpu); \ 48 PPC_LL r20, VCPU_GPR(R20)(vcpu); \ 49 PPC_LL r21, VCPU_GPR(R21)(vcpu); \ 50 PPC_LL r22, VCPU_GPR(R22)(vcpu); \ [all …]
|
D | book3s_hv_rm_xics.c | 53 static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu, in icp_rm_set_vcpu_irq() argument 60 vcpu->stat.queue_intr++; in icp_rm_set_vcpu_irq() 61 set_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions); in icp_rm_set_vcpu_irq() 64 if (vcpu == this_vcpu) { in icp_rm_set_vcpu_irq() 70 cpu = vcpu->arch.thread_cpu; in icp_rm_set_vcpu_irq() 73 this_icp->rm_kick_target = vcpu; in icp_rm_set_vcpu_irq() 81 static void icp_rm_clr_vcpu_irq(struct kvm_vcpu *vcpu) in icp_rm_clr_vcpu_irq() argument 85 &vcpu->arch.pending_exceptions); in icp_rm_clr_vcpu_irq() 120 icp_rm_set_vcpu_irq(icp->vcpu, this_vcpu); in icp_rm_try_update() 124 this_vcpu->arch.icp->rm_dbgtgt = icp->vcpu; in icp_rm_try_update() [all …]
|
D | book3s_hv_ras.c | 34 static void reload_slb(struct kvm_vcpu *vcpu) in reload_slb() argument 43 slb = vcpu->arch.slb_shadow.pinned_addr; in reload_slb() 49 if ((void *) &slb->save_area[n] > vcpu->arch.slb_shadow.pinned_end) in reload_slb() 68 static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu) in kvmppc_realmode_mc_power7() argument 70 unsigned long srr1 = vcpu->arch.shregs.msr; in kvmppc_realmode_mc_power7() 76 unsigned long dsisr = vcpu->arch.shregs.dsisr; in kvmppc_realmode_mc_power7() 81 reload_slb(vcpu); in kvmppc_realmode_mc_power7() 101 reload_slb(vcpu); in kvmppc_realmode_mc_power7() 139 long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu) in kvmppc_realmode_machine_check() argument 141 return kvmppc_realmode_mc_power7(vcpu); in kvmppc_realmode_machine_check()
|
D | book3s_rtas.c | 21 static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args) in kvm_rtas_set_xive() argument 35 rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority); in kvm_rtas_set_xive() 42 static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args) in kvm_rtas_get_xive() argument 55 rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority); in kvm_rtas_get_xive() 67 static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args) in kvm_rtas_int_off() argument 79 rc = kvmppc_xics_int_off(vcpu->kvm, irq); in kvm_rtas_int_off() 86 static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args) in kvm_rtas_int_on() argument 98 rc = kvmppc_xics_int_on(vcpu->kvm, irq); in kvm_rtas_int_on() 107 void (*handler)(struct kvm_vcpu *vcpu, struct rtas_args *args); 208 int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) in kvmppc_rtas_hcall() argument [all …]
|
D | trace_booke.h | 40 TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu), 41 TP_ARGS(exit_nr, vcpu), 53 __entry->pc = kvmppc_get_pc(vcpu); 54 __entry->dar = kvmppc_get_fault_dar(vcpu); 55 __entry->msr = vcpu->arch.shared->msr; 56 __entry->last_inst = vcpu->arch.last_inst; 196 TP_PROTO(struct kvm_vcpu *vcpu, unsigned int priority), 197 TP_ARGS(vcpu, priority), 206 __entry->cpu_nr = vcpu->vcpu_id; 208 __entry->pending = vcpu->arch.pending_exceptions;
|
D | trace_pr.h | 14 TP_PROTO(int r, struct kvm_vcpu *vcpu), 15 TP_ARGS(r, vcpu), 24 __entry->pc = kvmppc_get_pc(vcpu); 123 TP_PROTO(const char *type, struct kvm_vcpu *vcpu, unsigned long long p1, 125 TP_ARGS(type, vcpu, p1, p2), 135 __entry->count = to_book3s(vcpu)->hpte_cache_count; 219 TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu), 220 TP_ARGS(exit_nr, vcpu), 233 __entry->pc = kvmppc_get_pc(vcpu); 234 __entry->dar = kvmppc_get_fault_dar(vcpu); [all …]
|
D | e500_mmu_host.c | 133 __write_host_tlbe(stlbe, mas0, vcpu_e500->vcpu.kvm->arch.lpid); in write_host_tlbe() 138 vcpu_e500->vcpu.kvm->arch.lpid); in write_host_tlbe() 151 stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe); in write_stlbe() 160 void kvmppc_map_magic(struct kvm_vcpu *vcpu) in kvmppc_map_magic() argument 162 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); in kvmppc_map_magic() 164 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; in kvmppc_map_magic() 176 magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M; in kvmppc_map_magic() 297 void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu) in kvmppc_core_flush_tlb() argument 299 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); in kvmppc_core_flush_tlb() 307 struct kvm_vcpu *vcpu, in kvmppc_e500_setup_stlbe() argument [all …]
|
D | book3s_xics.h | 63 struct kvm_vcpu *vcpu; member 116 struct kvm_vcpu *vcpu = NULL; in kvmppc_xics_find_server() local 119 kvm_for_each_vcpu(i, vcpu, kvm) { in kvmppc_xics_find_server() 120 if (vcpu->arch.icp && nr == vcpu->arch.icp->server_num) in kvmppc_xics_find_server() 121 return vcpu->arch.icp; in kvmppc_xics_find_server()
|
D | book3s_64_vio_hv.c | 44 long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, in kvmppc_h_put_tce() argument 47 struct kvm *kvm = vcpu->kvm; in kvmppc_h_put_tce() 79 long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn, in kvmppc_h_get_tce() argument 82 struct kvm *kvm = vcpu->kvm; in kvmppc_h_get_tce() 97 vcpu->arch.gpr[4] = tbl[idx % TCES_PER_PAGE]; in kvmppc_h_get_tce()
|
D | book3s_hv_rm_mmu.c | 376 long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, in kvmppc_h_enter() argument 379 return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel, in kvmppc_h_enter() 380 vcpu->arch.pgdir, true, &vcpu->arch.gpr[4]); in kvmppc_h_enter() 483 long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags, in kvmppc_h_remove() argument 486 return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn, in kvmppc_h_remove() 487 &vcpu->arch.gpr[4]); in kvmppc_h_remove() 490 long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) in kvmppc_h_bulk_remove() argument 492 struct kvm *kvm = vcpu->kvm; in kvmppc_h_bulk_remove() 493 unsigned long *args = &vcpu->arch.gpr[4]; in kvmppc_h_bulk_remove() 600 long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, in kvmppc_h_protect() argument [all …]
|
D | book3s_64_mmu_hv.c | 175 void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, in kvmppc_map_vrma() argument 186 struct kvm *kvm = vcpu->kvm; in kvmppc_map_vrma() 246 static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu) in kvmppc_mmu_book3s_64_hv_reset_msr() argument 248 unsigned long msr = vcpu->arch.intr_msr; in kvmppc_mmu_book3s_64_hv_reset_msr() 251 if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr)) in kvmppc_mmu_book3s_64_hv_reset_msr() 254 msr |= vcpu->arch.shregs.msr & MSR_TS_MASK; in kvmppc_mmu_book3s_64_hv_reset_msr() 255 kvmppc_set_msr(vcpu, msr); in kvmppc_mmu_book3s_64_hv_reset_msr() 278 static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu, in kvmppc_mmu_book3s_hv_find_slbe() argument 284 for (i = 0; i < vcpu->arch.slb_nr; i++) { in kvmppc_mmu_book3s_hv_find_slbe() 285 if (!(vcpu->arch.slb[i].orige & SLB_ESID_V)) in kvmppc_mmu_book3s_hv_find_slbe() [all …]
|
D | book3s.h | 25 extern void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu); 26 extern int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, 28 extern int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, 30 extern int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu,
|
D | mpic.c | 117 struct kvm_vcpu *vcpu = current->thread.kvm_vcpu; in get_current_cpu() local 118 return vcpu ? vcpu->arch.irq_cpu_id : -1; in get_current_cpu() 179 struct kvm_vcpu *vcpu; member 249 if (!dst->vcpu) { in mpic_irq_raise() 255 pr_debug("%s: cpu %d output %d\n", __func__, dst->vcpu->arch.irq_cpu_id, in mpic_irq_raise() 261 kvm_vcpu_ioctl_interrupt(dst->vcpu, &irq); in mpic_irq_raise() 267 if (!dst->vcpu) { in mpic_irq_lower() 273 pr_debug("%s: cpu %d output %d\n", __func__, dst->vcpu->arch.irq_cpu_id, in mpic_irq_lower() 279 kvmppc_core_dequeue_external(dst->vcpu); in mpic_irq_lower() 1177 void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu) in kvmppc_mpic_set_epr() argument [all …]
|
D | trace.h | 102 TP_PROTO(struct kvm_vcpu *vcpu), 103 TP_ARGS(vcpu), 111 __entry->cpu_nr = vcpu->vcpu_id; 112 __entry->requests = vcpu->requests;
|
D | book3s_hv_builtin.c | 110 long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target, in kvmppc_rm_h_confer() argument 184 long kvmppc_h_random(struct kvm_vcpu *vcpu) in kvmppc_h_random() argument 186 if (powernv_get_random_real_mode(&vcpu->arch.gpr[4])) in kvmppc_h_random()
|
D | Kconfig | 117 Calculate time taken for each vcpu in the real-mode guest entry, 122 kvm/vm#/vcpu#/timings. The overhead is of the order of 30 - 40 134 Calculate elapsed time for every exit/enter cycle. A per-vcpu
|
/linux-4.4.14/arch/arm/include/asm/ |
D | kvm_emulate.h | 28 unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); 29 unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu); 31 static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu, in vcpu_get_reg() argument 34 return *vcpu_reg(vcpu, reg_num); in vcpu_get_reg() 37 static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, in vcpu_set_reg() argument 40 *vcpu_reg(vcpu, reg_num) = val; in vcpu_set_reg() 43 bool kvm_condition_valid(struct kvm_vcpu *vcpu); 44 void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr); 45 void kvm_inject_undefined(struct kvm_vcpu *vcpu); 46 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); [all …]
|
D | kvm_coproc.h | 22 void kvm_reset_coprocs(struct kvm_vcpu *vcpu); 31 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run); 32 int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run); 33 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run); 34 int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run); 35 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run); 36 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run); 38 unsigned long kvm_arm_num_guest_msrs(struct kvm_vcpu *vcpu); 39 int kvm_arm_copy_msrindices(struct kvm_vcpu *vcpu, u64 __user *uindices); 43 int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); [all …]
|
D | kvm_host.h | 44 u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode); 46 int kvm_reset_vcpu(struct kvm_vcpu *vcpu); 47 void kvm_reset_coprocs(struct kvm_vcpu *vcpu); 156 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); 157 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); 158 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); 159 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); 169 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); 170 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); 183 int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); [all …]
|
D | kvm_mmu.h | 62 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); 64 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); 180 static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) in vcpu_has_cache_enabled() argument 182 return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101; in vcpu_has_cache_enabled() 185 static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, in __coherent_cache_guest_page() argument 207 bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached; in __coherent_cache_guest_page() 269 void kvm_set_way_flush(struct kvm_vcpu *vcpu); 270 void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
|
D | kvm_psci.h | 24 int kvm_psci_version(struct kvm_vcpu *vcpu); 25 int kvm_psci_call(struct kvm_vcpu *vcpu);
|
D | kvm_mmio.h | 31 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); 32 int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
D | kvm_asm.h | 101 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
|
/linux-4.4.14/arch/arm64/kvm/ |
D | debug.c | 47 static void save_guest_debug_regs(struct kvm_vcpu *vcpu) in save_guest_debug_regs() argument 49 vcpu->arch.guest_debug_preserved.mdscr_el1 = vcpu_sys_reg(vcpu, MDSCR_EL1); in save_guest_debug_regs() 52 vcpu->arch.guest_debug_preserved.mdscr_el1); in save_guest_debug_regs() 55 static void restore_guest_debug_regs(struct kvm_vcpu *vcpu) in restore_guest_debug_regs() argument 57 vcpu_sys_reg(vcpu, MDSCR_EL1) = vcpu->arch.guest_debug_preserved.mdscr_el1; in restore_guest_debug_regs() 60 vcpu_sys_reg(vcpu, MDSCR_EL1)); in restore_guest_debug_regs() 82 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) in kvm_arm_reset_debug_ptr() argument 84 vcpu->arch.debug_ptr = &vcpu->arch.vcpu_debug_state; in kvm_arm_reset_debug_ptr() 107 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) in kvm_arm_setup_debug() argument 109 bool trap_debug = !(vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY); in kvm_arm_setup_debug() [all …]
|
D | inject_fault.c | 32 static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) in prepare_fault32() argument 35 unsigned long new_spsr_value = *vcpu_cpsr(vcpu); in prepare_fault32() 38 u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); in prepare_fault32() 47 *vcpu_cpsr(vcpu) = cpsr; in prepare_fault32() 50 *vcpu_spsr(vcpu) = new_spsr_value; in prepare_fault32() 51 *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; in prepare_fault32() 57 vect_offset += vcpu_cp15(vcpu, c12_VBAR); in prepare_fault32() 59 *vcpu_pc(vcpu) = vect_offset; in prepare_fault32() 62 static void inject_undef32(struct kvm_vcpu *vcpu) in inject_undef32() argument 64 prepare_fault32(vcpu, COMPAT_PSR_MODE_UND, 4); in inject_undef32() [all …]
|
D | handle_exit.c | 36 static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) in handle_hvc() argument 40 trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0), in handle_hvc() 41 kvm_vcpu_hvc_get_imm(vcpu)); in handle_hvc() 43 ret = kvm_psci_call(vcpu); in handle_hvc() 45 kvm_inject_undefined(vcpu); in handle_hvc() 52 static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) in handle_smc() argument 54 kvm_inject_undefined(vcpu); in handle_smc() 70 static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run) in kvm_handle_wfx() argument 72 if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE) { in kvm_handle_wfx() 73 trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true); in kvm_handle_wfx() [all …]
|
D | guest.c | 41 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_setup() argument 51 static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) in get_core_reg() argument 60 struct kvm_regs *regs = vcpu_gp_regs(vcpu); in get_core_reg() 76 static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) in set_core_reg() argument 79 struct kvm_regs *regs = vcpu_gp_regs(vcpu); in set_core_reg() 124 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_get_regs() argument 129 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_set_regs() argument 156 static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) in copy_timer_indices() argument 170 static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) in set_timer_reg() argument 180 return kvm_arm_timer_set_reg(vcpu, reg->id, val); in set_timer_reg() [all …]
|
D | emulate.c | 55 static int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) in kvm_vcpu_get_condition() argument 57 u32 esr = kvm_vcpu_get_hsr(vcpu); in kvm_vcpu_get_condition() 68 bool kvm_condition_valid32(const struct kvm_vcpu *vcpu) in kvm_condition_valid32() argument 75 if (kvm_vcpu_get_hsr(vcpu) >> 30) in kvm_condition_valid32() 79 cond = kvm_vcpu_get_condition(vcpu); in kvm_condition_valid32() 83 cpsr = *vcpu_cpsr(vcpu); in kvm_condition_valid32() 117 static void kvm_adjust_itstate(struct kvm_vcpu *vcpu) in kvm_adjust_itstate() argument 120 unsigned long cpsr = *vcpu_cpsr(vcpu); in kvm_adjust_itstate() 142 *vcpu_cpsr(vcpu) = cpsr; in kvm_adjust_itstate() 149 void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr) in kvm_skip_instr32() argument [all …]
|
D | sys_regs.c | 80 static bool access_dcsw(struct kvm_vcpu *vcpu, in access_dcsw() argument 85 return read_from_write_only(vcpu, p); in access_dcsw() 87 kvm_set_way_flush(vcpu); in access_dcsw() 96 static bool access_vm_reg(struct kvm_vcpu *vcpu, in access_vm_reg() argument 100 bool was_enabled = vcpu_has_cache_enabled(vcpu); in access_vm_reg() 105 vcpu_sys_reg(vcpu, r->reg) = p->regval; in access_vm_reg() 108 vcpu_cp15_64_high(vcpu, r->reg) = upper_32_bits(p->regval); in access_vm_reg() 109 vcpu_cp15_64_low(vcpu, r->reg) = lower_32_bits(p->regval); in access_vm_reg() 112 kvm_toggle_cache(vcpu, was_enabled); in access_vm_reg() 122 static bool access_gic_sgi(struct kvm_vcpu *vcpu, in access_gic_sgi() argument [all …]
|
D | sys_regs.h | 60 int (*get_user)(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 62 int (*set_user)(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 73 static inline bool ignore_write(struct kvm_vcpu *vcpu, in ignore_write() argument 79 static inline bool read_zero(struct kvm_vcpu *vcpu, in read_zero() argument 86 static inline bool write_to_read_only(struct kvm_vcpu *vcpu, in write_to_read_only() argument 90 *vcpu_pc(vcpu)); in write_to_read_only() 95 static inline bool read_from_write_only(struct kvm_vcpu *vcpu, in read_from_write_only() argument 99 *vcpu_pc(vcpu)); in read_from_write_only() 105 static inline void reset_unknown(struct kvm_vcpu *vcpu, in reset_unknown() argument 110 vcpu_sys_reg(vcpu, r->reg) = 0x1de7ec7edbadc0deULL; in reset_unknown() [all …]
|
D | trace.h | 48 TP_PROTO(struct kvm_vcpu *vcpu, __u32 guest_debug), 49 TP_ARGS(vcpu, guest_debug), 52 __field(struct kvm_vcpu *, vcpu) 57 __entry->vcpu = vcpu; 61 TP_printk("vcpu: %p, flags: 0x%08x", __entry->vcpu, __entry->guest_debug) 153 TP_PROTO(struct kvm_vcpu *vcpu, __u32 guest_debug), 154 TP_ARGS(vcpu, guest_debug), 157 __field(struct kvm_vcpu *, vcpu) 162 __entry->vcpu = vcpu; 166 TP_printk("vcpu: %p, flags: 0x%08x", __entry->vcpu, __entry->guest_debug)
|
D | reset.c | 98 int kvm_reset_vcpu(struct kvm_vcpu *vcpu) in kvm_reset_vcpu() argument 103 switch (vcpu->arch.target) { in kvm_reset_vcpu() 105 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { in kvm_reset_vcpu() 118 memcpy(vcpu_gp_regs(vcpu), cpu_reset, sizeof(*cpu_reset)); in kvm_reset_vcpu() 121 kvm_reset_sys_regs(vcpu); in kvm_reset_vcpu() 124 return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq); in kvm_reset_vcpu()
|
D | sys_regs_generic_v8.c | 33 static bool access_actlr(struct kvm_vcpu *vcpu, in access_actlr() argument 38 return ignore_write(vcpu, p); in access_actlr() 40 p->regval = vcpu_sys_reg(vcpu, ACTLR_EL1); in access_actlr() 44 static void reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) in reset_actlr() argument 49 vcpu_sys_reg(vcpu, ACTLR_EL1) = actlr; in reset_actlr()
|
D | regmap.c | 112 unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num) in vcpu_reg32() argument 114 unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs.regs; in vcpu_reg32() 115 unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK; in vcpu_reg32() 144 unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu) in vcpu_spsr32() argument 146 unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK; in vcpu_spsr32() 167 return (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[mode]; in vcpu_spsr32()
|
/linux-4.4.14/arch/x86/kvm/ |
D | x86.h | 9 static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) in kvm_clear_exception_queue() argument 11 vcpu->arch.exception.pending = false; in kvm_clear_exception_queue() 14 static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector, in kvm_queue_interrupt() argument 17 vcpu->arch.interrupt.pending = true; in kvm_queue_interrupt() 18 vcpu->arch.interrupt.soft = soft; in kvm_queue_interrupt() 19 vcpu->arch.interrupt.nr = vector; in kvm_queue_interrupt() 22 static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu) in kvm_clear_interrupt_queue() argument 24 vcpu->arch.interrupt.pending = false; in kvm_clear_interrupt_queue() 27 static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu) in kvm_event_needs_reinjection() argument 29 return vcpu->arch.exception.pending || vcpu->arch.interrupt.pending || in kvm_event_needs_reinjection() [all …]
|
D | x86.c | 92 static void update_cr8_intercept(struct kvm_vcpu *vcpu); 93 static void process_nmi(struct kvm_vcpu *vcpu); 94 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); 189 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu) in kvm_async_pf_hash_reset() argument 193 vcpu->arch.apf.gfns[i] = ~0; in kvm_async_pf_hash_reset() 279 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu) in kvm_get_apic_base() argument 281 return vcpu->arch.apic_base; in kvm_get_apic_base() 285 int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info) in kvm_set_apic_base() argument 287 u64 old_state = vcpu->arch.apic_base & in kvm_set_apic_base() 291 u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) | in kvm_set_apic_base() [all …]
|
D | kvm_cache_regs.h | 9 static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, in kvm_register_read() argument 12 if (!test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail)) in kvm_register_read() 13 kvm_x86_ops->cache_reg(vcpu, reg); in kvm_register_read() 15 return vcpu->arch.regs[reg]; in kvm_register_read() 18 static inline void kvm_register_write(struct kvm_vcpu *vcpu, in kvm_register_write() argument 22 vcpu->arch.regs[reg] = val; in kvm_register_write() 23 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty); in kvm_register_write() 24 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); in kvm_register_write() 27 static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu) in kvm_rip_read() argument 29 return kvm_register_read(vcpu, VCPU_REGS_RIP); in kvm_rip_read() [all …]
|
D | lapic.h | 26 struct kvm_vcpu *vcpu; member 45 int kvm_create_lapic(struct kvm_vcpu *vcpu); 46 void kvm_free_lapic(struct kvm_vcpu *vcpu); 48 int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu); 49 int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu); 50 int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu); 51 void kvm_apic_accept_events(struct kvm_vcpu *vcpu); 52 void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event); 53 u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu); 54 void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8); [all …]
|
D | hyperv.c | 52 static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu, in kvm_hv_msr_get_crash_data() argument 55 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; in kvm_hv_msr_get_crash_data() 64 static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata) in kvm_hv_msr_get_crash_ctl() argument 66 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; in kvm_hv_msr_get_crash_ctl() 72 static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host) in kvm_hv_msr_set_crash_ctl() argument 74 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; in kvm_hv_msr_set_crash_ctl() 81 vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n", in kvm_hv_msr_set_crash_ctl() 89 kvm_make_request(KVM_REQ_HV_CRASH, vcpu); in kvm_hv_msr_set_crash_ctl() 95 static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu, in kvm_hv_msr_set_crash_data() argument 98 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; in kvm_hv_msr_set_crash_data() [all …]
|
D | cpuid.h | 6 int kvm_update_cpuid(struct kvm_vcpu *vcpu); 7 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, 12 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, 15 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, 18 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, 21 void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx); 23 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu); 25 static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu) in cpuid_maxphyaddr() argument 27 return vcpu->arch.maxphyaddr; in cpuid_maxphyaddr() 30 static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu) in guest_cpuid_has_xsave() argument [all …]
|
D | vmx.c | 521 struct kvm_vcpu vcpu; member 611 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) in to_vmx() argument 613 return container_of(vcpu, struct vcpu_vmx, vcpu); in to_vmx() 616 static struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu) in vcpu_to_pi_desc() argument 618 return &(to_vmx(vcpu)->pi_desc); in vcpu_to_pi_desc() 837 static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu) in get_vmcs12() argument 839 return to_vmx(vcpu)->nested.current_vmcs12; in get_vmcs12() 842 static struct page *nested_get_page(struct kvm_vcpu *vcpu, gpa_t addr) in nested_get_page() argument 844 struct page *page = kvm_vcpu_gfn_to_page(vcpu, addr >> PAGE_SHIFT); in nested_get_page() 861 static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu); [all …]
|
D | lapic.c | 88 bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector) in kvm_apic_pending_eoi() argument 90 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_pending_eoi() 161 struct kvm_vcpu *vcpu; in recalculate_apic_map() local 171 kvm_for_each_vcpu(i, vcpu, kvm) { in recalculate_apic_map() 172 struct kvm_lapic *apic = vcpu->arch.apic; in recalculate_apic_map() 176 if (!kvm_apic_present(vcpu)) in recalculate_apic_map() 225 recalculate_apic_map(apic->vcpu->kvm); in apic_set_spiv() 234 recalculate_apic_map(apic->vcpu->kvm); in kvm_apic_set_id() 240 recalculate_apic_map(apic->vcpu->kvm); in kvm_apic_set_ldr() 249 recalculate_apic_map(apic->vcpu->kvm); in kvm_apic_set_x2apic_id() [all …]
|
D | mmu.c | 179 static void mmu_free_roots(struct kvm_vcpu *vcpu); 226 static unsigned int kvm_current_mmio_generation(struct kvm_vcpu *vcpu) in kvm_current_mmio_generation() argument 228 return kvm_vcpu_memslots(vcpu)->generation & MMIO_GEN_MASK; in kvm_current_mmio_generation() 231 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, in mark_mmio_spte() argument 234 unsigned int gen = kvm_current_mmio_generation(vcpu); in mark_mmio_spte() 261 static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, in set_mmio_spte() argument 265 mark_mmio_spte(vcpu, sptep, gfn, access); in set_mmio_spte() 272 static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte) in check_mmio_spte() argument 276 kvm_gen = kvm_current_mmio_generation(vcpu); in check_mmio_spte() 299 static int is_nx(struct kvm_vcpu *vcpu) in is_nx() argument [all …]
|
D | svm.c | 131 struct kvm_vcpu vcpu; member 206 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); 207 static void svm_flush_tlb(struct kvm_vcpu *vcpu); 251 static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) in to_svm() argument 253 return container_of(vcpu, struct vcpu_svm, vcpu); in to_svm() 263 if (!is_guest_mode(&svm->vcpu)) in recalc_intercepts() 278 if (is_guest_mode(&svm->vcpu)) in get_host_vmcb() 380 svm->vcpu.arch.hflags |= HF_GIF_MASK; in enable_gif() 385 svm->vcpu.arch.hflags &= ~HF_GIF_MASK; in disable_gif() 390 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK); in gif_set() [all …]
|
D | paging_tmpl.h | 140 static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in FNAME() 163 static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu, in FNAME() 167 if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) in FNAME() 180 drop_spte(vcpu->kvm, spte); in FNAME() 184 static inline unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, u64 gpte) in FNAME() 199 static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, in FNAME() 247 ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte); in FNAME() 251 kvm_vcpu_mark_page_dirty(vcpu, table_gfn); in FNAME() 261 struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in FNAME() 281 pte = mmu->get_cr3(vcpu); in FNAME() [all …]
|
D | pmu.c | 53 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu); in kvm_pmi_trigger_fn() local 55 kvm_pmu_deliver_pmi(vcpu); in kvm_pmi_trigger_fn() 68 kvm_make_request(KVM_REQ_PMU, pmc->vcpu); in kvm_perf_overflow() 82 kvm_make_request(KVM_REQ_PMU, pmc->vcpu); in kvm_perf_overflow_intr() 95 kvm_make_request(KVM_REQ_PMI, pmc->vcpu); in kvm_perf_overflow_intr() 214 void kvm_pmu_handle_event(struct kvm_vcpu *vcpu) in kvm_pmu_handle_event() argument 216 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in kvm_pmu_handle_event() 235 int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx) in kvm_pmu_is_valid_msr_idx() argument 237 return kvm_x86_ops->pmu_ops->is_valid_msr_idx(vcpu, idx); in kvm_pmu_is_valid_msr_idx() 240 int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data) in kvm_pmu_rdpmc() argument [all …]
|
D | pmu.h | 4 #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu) argument 6 #define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu) 23 struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx); 24 int (*is_valid_msr_idx)(struct kvm_vcpu *vcpu, unsigned idx); 25 bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr); 26 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data); 27 int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info); 28 void (*refresh)(struct kvm_vcpu *vcpu); 29 void (*init)(struct kvm_vcpu *vcpu); 30 void (*reset)(struct kvm_vcpu *vcpu); [all …]
|
D | mmu_audit.c | 35 typedef void (*inspect_spte_fn) (struct kvm_vcpu *vcpu, u64 *sptep, int level); 37 static void __mmu_spte_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, in __mmu_spte_walk() argument 45 fn(vcpu, ent + i, level); in __mmu_spte_walk() 52 __mmu_spte_walk(vcpu, child, fn, level - 1); in __mmu_spte_walk() 57 static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn) in mmu_spte_walk() argument 62 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in mmu_spte_walk() 65 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { in mmu_spte_walk() 66 hpa_t root = vcpu->arch.mmu.root_hpa; in mmu_spte_walk() 69 __mmu_spte_walk(vcpu, sp, fn, PT64_ROOT_LEVEL); in mmu_spte_walk() 74 hpa_t root = vcpu->arch.mmu.pae_root[i]; in mmu_spte_walk() [all …]
|
D | cpuid.c | 61 int kvm_update_cpuid(struct kvm_vcpu *vcpu) in kvm_update_cpuid() argument 64 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_update_cpuid() 66 best = kvm_find_cpuid_entry(vcpu, 1, 0); in kvm_update_cpuid() 73 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) in kvm_update_cpuid() 84 best = kvm_find_cpuid_entry(vcpu, 0xD, 0); in kvm_update_cpuid() 86 vcpu->arch.guest_supported_xcr0 = 0; in kvm_update_cpuid() 87 vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; in kvm_update_cpuid() 89 vcpu->arch.guest_supported_xcr0 = in kvm_update_cpuid() 92 vcpu->arch.guest_xstate_size = best->ebx = in kvm_update_cpuid() 93 xstate_required_size(vcpu->arch.xcr0, false); in kvm_update_cpuid() [all …]
|
D | pmu_amd.c | 71 static int amd_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx) in amd_is_valid_msr_idx() argument 73 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in amd_is_valid_msr_idx() 81 static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, unsigned idx) in amd_msr_idx_to_pmc() argument 83 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in amd_msr_idx_to_pmc() 94 static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) in amd_is_valid_msr() argument 96 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in amd_is_valid_msr() 105 static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data) in amd_pmu_get_msr() argument 107 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in amd_pmu_get_msr() 126 static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) in amd_pmu_set_msr() argument 128 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in amd_pmu_set_msr() [all …]
|
D | mmu.h | 56 reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context); 74 int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct); 75 void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu); 76 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly); 87 static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) in kvm_mmu_reload() argument 89 if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE)) in kvm_mmu_reload() 92 return kvm_mmu_load(vcpu); in kvm_mmu_reload() 138 static inline bool is_write_protection(struct kvm_vcpu *vcpu) in is_write_protection() argument 140 return kvm_read_cr0_bits(vcpu, X86_CR0_WP); in is_write_protection() 147 static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in permission_fault() argument [all …]
|
D | mtrr.c | 61 bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data) in kvm_mtrr_valid() argument 88 mask = (~0ULL) << cpuid_maxphyaddr(vcpu); in kvm_mtrr_valid() 98 kvm_inject_gp(vcpu, 0); in kvm_mtrr_valid() 121 static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu) in mtrr_disabled_type() argument 133 if (guest_cpuid_has_mtrr(vcpu)) in mtrr_disabled_type() 316 static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr) in update_mtrr() argument 318 struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state; in update_mtrr() 323 !kvm_arch_has_noncoherent_dma(vcpu->kvm)) in update_mtrr() 342 kvm_zap_gfn_range(vcpu->kvm, gpa_to_gfn(start), gpa_to_gfn(end)); in update_mtrr() 350 static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data) in set_var_mtrr_msr() argument [all …]
|
D | pmu_intel.c | 117 static int intel_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx) in intel_is_valid_msr_idx() argument 119 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_is_valid_msr_idx() 128 static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, in intel_msr_idx_to_pmc() argument 131 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_msr_idx_to_pmc() 145 static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) in intel_is_valid_msr() argument 147 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_is_valid_msr() 167 static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data) in intel_pmu_get_msr() argument 169 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_get_msr() 199 static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) in intel_pmu_set_msr() argument 201 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_set_msr() [all …]
|
D | irq.h | 112 static inline int lapic_in_kernel(struct kvm_vcpu *vcpu) in lapic_in_kernel() argument 117 return vcpu->arch.apic != NULL; in lapic_in_kernel() 122 void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu); 123 void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu); 124 void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu); 125 void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu); 126 void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu); 127 void __kvm_migrate_timers(struct kvm_vcpu *vcpu); 129 int apic_has_pending_timer(struct kvm_vcpu *vcpu);
|
D | ioapic.c | 108 static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu) in __rtc_irq_eoi_tracking_restore_one() argument 111 struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; in __rtc_irq_eoi_tracking_restore_one() 115 if (!kvm_apic_match_dest(vcpu, NULL, 0, e->fields.dest_id, in __rtc_irq_eoi_tracking_restore_one() 119 new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector); in __rtc_irq_eoi_tracking_restore_one() 120 old_val = test_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map); in __rtc_irq_eoi_tracking_restore_one() 126 __set_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map); in __rtc_irq_eoi_tracking_restore_one() 129 __clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map); in __rtc_irq_eoi_tracking_restore_one() 135 void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu) in kvm_rtc_eoi_tracking_restore_one() argument 137 struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; in kvm_rtc_eoi_tracking_restore_one() 140 __rtc_irq_eoi_tracking_restore_one(vcpu); in kvm_rtc_eoi_tracking_restore_one() [all …]
|
D | irq.c | 34 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) in kvm_cpu_has_pending_timer() argument 36 return apic_has_pending_timer(vcpu); in kvm_cpu_has_pending_timer() 138 void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu) in kvm_inject_pending_timer_irqs() argument 140 kvm_inject_apic_timer_irqs(vcpu); in kvm_inject_pending_timer_irqs() 145 void __kvm_migrate_timers(struct kvm_vcpu *vcpu) in __kvm_migrate_timers() argument 147 __kvm_migrate_apic_timer(vcpu); in __kvm_migrate_timers() 148 __kvm_migrate_pit_timer(vcpu); in __kvm_migrate_timers()
|
D | irq_comm.c | 57 struct kvm_vcpu *vcpu, *lowest = NULL; in kvm_irq_delivery_to_apic() local 68 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_irq_delivery_to_apic() 69 if (!kvm_apic_present(vcpu)) in kvm_irq_delivery_to_apic() 72 if (!kvm_apic_match_dest(vcpu, src, irq->shorthand, in kvm_irq_delivery_to_apic() 79 r += kvm_apic_set_irq(vcpu, irq, dest_map); in kvm_irq_delivery_to_apic() 80 } else if (kvm_lapic_enabled(vcpu)) { in kvm_irq_delivery_to_apic() 82 lowest = vcpu; in kvm_irq_delivery_to_apic() 83 else if (kvm_apic_compare_prio(vcpu, lowest) < 0) in kvm_irq_delivery_to_apic() 84 lowest = vcpu; in kvm_irq_delivery_to_apic() 273 struct kvm_vcpu *vcpu; in kvm_intr_is_single_vcpu() local [all …]
|
D | hyperv.h | 27 int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host); 28 int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); 30 int kvm_hv_hypercall(struct kvm_vcpu *vcpu);
|
D | ioapic.h | 109 void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu); 110 bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, 113 void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, 124 void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); 125 void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
|
D | i8259.c | 52 struct kvm_vcpu *vcpu, *found = NULL; in pic_unlock() local 60 kvm_for_each_vcpu(i, vcpu, s->kvm) { in pic_unlock() 61 if (kvm_apic_accept_pic_intr(vcpu)) { in pic_unlock() 62 found = vcpu; in pic_unlock() 279 struct kvm_vcpu *vcpu; in kvm_pic_reset() local 295 kvm_for_each_vcpu(i, vcpu, s->pics_state->kvm) in kvm_pic_reset() 296 if (kvm_apic_accept_pic_intr(vcpu)) { in kvm_pic_reset() 532 static int picdev_master_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, in picdev_master_write() argument 539 static int picdev_master_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, in picdev_master_read() argument 546 static int picdev_slave_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, in picdev_slave_write() argument [all …]
|
D | trace.h | 222 TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu, u32 isa), 223 TP_ARGS(exit_reason, vcpu, isa), 235 __entry->guest_rip = kvm_rip_read(vcpu); 237 kvm_x86_ops->get_exit_info(vcpu, &__entry->info1, 470 __entry->apicid = apic->vcpu->vcpu_id; 487 __entry->apicid = apic->vcpu->vcpu_id; 727 TP_PROTO(struct kvm_vcpu *vcpu, __u8 failed), 728 TP_ARGS(vcpu, failed), 740 __entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS); 741 __entry->len = vcpu->arch.emulate_ctxt.fetch.ptr [all …]
|
D | i8254.c | 257 void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu) in __kvm_migrate_pit_timer() argument 259 struct kvm_pit *pit = vcpu->kvm->arch.vpit; in __kvm_migrate_pit_timer() 262 if (!kvm_vcpu_is_bsp(vcpu) || !pit) in __kvm_migrate_pit_timer() 282 struct kvm_vcpu *vcpu; in pit_do_work() local 312 kvm_for_each_vcpu(i, vcpu, kvm) in pit_do_work() 313 kvm_apic_nmi_wd_deliver(vcpu); in pit_do_work() 451 static int pit_ioport_write(struct kvm_vcpu *vcpu, in pit_ioport_write() argument 528 static int pit_ioport_read(struct kvm_vcpu *vcpu, in pit_ioport_read() argument 599 static int speaker_ioport_write(struct kvm_vcpu *vcpu, in speaker_ioport_write() argument 617 static int speaker_ioport_read(struct kvm_vcpu *vcpu, in speaker_ioport_read() argument
|
D | mmutrace.h | 249 TP_PROTO(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code, 251 TP_ARGS(vcpu, gva, error_code, sptep, old_spte, retry), 264 __entry->vcpu_id = vcpu->vcpu_id;
|
/linux-4.4.14/arch/powerpc/include/asm/ |
D | kvm_ppc.h | 70 extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); 71 extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); 74 extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu); 75 extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 78 extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, 81 extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 85 extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, 88 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, 90 extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, 93 struct kvm_vcpu *vcpu); [all …]
|
D | kvm_book3s.h | 117 extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask); 118 extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask); 119 extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end); 120 extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr); 121 extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu); 122 extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu); 123 extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu); 124 extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte, 126 extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); 127 extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); [all …]
|
D | kvm_booke.h | 37 static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) in kvmppc_set_gpr() argument 39 vcpu->arch.gpr[num] = val; in kvmppc_set_gpr() 42 static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) in kvmppc_get_gpr() argument 44 return vcpu->arch.gpr[num]; in kvmppc_get_gpr() 47 static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) in kvmppc_set_cr() argument 49 vcpu->arch.cr = val; in kvmppc_set_cr() 52 static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) in kvmppc_get_cr() argument 54 return vcpu->arch.cr; in kvmppc_get_cr() 57 static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val) in kvmppc_set_xer() argument 59 vcpu->arch.xer = val; in kvmppc_set_xer() [all …]
|
D | kvm_host.h | 350 void (*slbmte)(struct kvm_vcpu *vcpu, u64 rb, u64 rs); 351 u64 (*slbmfee)(struct kvm_vcpu *vcpu, u64 slb_nr); 352 u64 (*slbmfev)(struct kvm_vcpu *vcpu, u64 slb_nr); 353 void (*slbie)(struct kvm_vcpu *vcpu, u64 slb_nr); 354 void (*slbia)(struct kvm_vcpu *vcpu); 356 void (*mtsrin)(struct kvm_vcpu *vcpu, u32 srnum, ulong value); 357 u32 (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum); 358 int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, 360 void (*reset_msr)(struct kvm_vcpu *vcpu); 361 void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large); [all …]
|
D | kvm_book3s_32.h | 23 static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu) in svcpu_get() argument 25 return vcpu->arch.shadow_vcpu; in svcpu_get()
|
D | fsl_hcalls.h | 535 unsigned int vcpu, unsigned int *state) in fh_get_core_state() argument 543 r4 = vcpu; in fh_get_core_state() 565 static inline unsigned int fh_enter_nap(unsigned int handle, unsigned int vcpu) in fh_enter_nap() argument 573 r4 = vcpu; in fh_enter_nap() 590 static inline unsigned int fh_exit_nap(unsigned int handle, unsigned int vcpu) in fh_exit_nap() argument 598 r4 = vcpu; in fh_exit_nap()
|
D | kvm_book3s_64.h | 24 static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu) in svcpu_get() argument
|
/linux-4.4.14/arch/arm/kvm/ |
D | psci.c | 44 static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu) in kvm_psci_vcpu_suspend() argument 59 kvm_vcpu_block(vcpu); in kvm_psci_vcpu_suspend() 64 static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu) in kvm_psci_vcpu_off() argument 66 vcpu->arch.power_off = true; in kvm_psci_vcpu_off() 72 struct kvm_vcpu *vcpu = NULL; in kvm_psci_vcpu_on() local 82 vcpu = kvm_mpidr_to_vcpu(kvm, cpu_id); in kvm_psci_vcpu_on() 88 if (!vcpu) in kvm_psci_vcpu_on() 90 if (!vcpu->arch.power_off) { in kvm_psci_vcpu_on() 100 kvm_reset_vcpu(vcpu); in kvm_psci_vcpu_on() 103 if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) { in kvm_psci_vcpu_on() [all …]
|
D | emulate.c | 113 unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num) in vcpu_reg() argument 115 unsigned long *reg_array = (unsigned long *)&vcpu->arch.regs; in vcpu_reg() 116 unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK; in vcpu_reg() 145 unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu) in vcpu_spsr() argument 147 unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK; in vcpu_spsr() 150 return &vcpu->arch.regs.KVM_ARM_SVC_spsr; in vcpu_spsr() 152 return &vcpu->arch.regs.KVM_ARM_ABT_spsr; in vcpu_spsr() 154 return &vcpu->arch.regs.KVM_ARM_UND_spsr; in vcpu_spsr() 156 return &vcpu->arch.regs.KVM_ARM_IRQ_spsr; in vcpu_spsr() 158 return &vcpu->arch.regs.KVM_ARM_FIQ_spsr; in vcpu_spsr() [all …]
|
D | arm.c | 64 static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu) in kvm_arm_set_running_vcpu() argument 67 __this_cpu_write(kvm_arm_running_vcpu, vcpu); in kvm_arm_set_running_vcpu() 93 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_should_kick() argument 95 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; in kvm_arch_vcpu_should_kick() 144 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) in kvm_arch_vcpu_fault() argument 216 struct kvm_vcpu *vcpu; in kvm_arch_vcpu_create() local 228 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); in kvm_arch_vcpu_create() 229 if (!vcpu) { in kvm_arch_vcpu_create() 234 err = kvm_vcpu_init(vcpu, kvm, id); in kvm_arch_vcpu_create() 238 err = create_hyp_mappings(vcpu, vcpu + 1); in kvm_arch_vcpu_create() [all …]
|
D | handle_exit.c | 31 static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) in handle_svc_hyp() argument 39 static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) in handle_hvc() argument 43 trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0), in handle_hvc() 44 kvm_vcpu_hvc_get_imm(vcpu)); in handle_hvc() 46 ret = kvm_psci_call(vcpu); in handle_hvc() 48 kvm_inject_undefined(vcpu); in handle_hvc() 55 static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) in handle_smc() argument 57 kvm_inject_undefined(vcpu); in handle_smc() 61 static int handle_pabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) in handle_pabt_hyp() argument 65 kvm_vcpu_get_hfar(vcpu), kvm_vcpu_get_hsr(vcpu)); in handle_pabt_hyp() [all …]
|
D | mmio.c | 96 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) in kvm_handle_mmio_return() argument 109 if (vcpu->arch.mmio_decode.sign_extend && in kvm_handle_mmio_return() 117 data = vcpu_data_host_to_guest(vcpu, data, len); in kvm_handle_mmio_return() 118 vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data); in kvm_handle_mmio_return() 124 static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len) in decode_hsr() argument 130 if (kvm_vcpu_dabt_isextabt(vcpu)) { in decode_hsr() 132 kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); in decode_hsr() 136 if (kvm_vcpu_dabt_iss1tw(vcpu)) { in decode_hsr() 138 kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); in decode_hsr() 142 access_size = kvm_vcpu_dabt_get_as(vcpu); in decode_hsr() [all …]
|
D | guest.c | 39 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_setup() argument 49 static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) in get_core_reg() argument 52 struct kvm_regs *regs = &vcpu->arch.regs; in get_core_reg() 66 static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) in set_core_reg() argument 69 struct kvm_regs *regs = &vcpu->arch.regs; in set_core_reg() 102 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_get_regs() argument 107 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_set_regs() argument 125 static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) in copy_timer_indices() argument 139 static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) in set_timer_reg() argument 149 return kvm_arm_timer_set_reg(vcpu, reg->id, val); in set_timer_reg() [all …]
|
D | coproc.c | 53 static inline void vcpu_cp15_reg64_set(struct kvm_vcpu *vcpu, in vcpu_cp15_reg64_set() argument 57 vcpu->arch.cp15[r->reg] = val & 0xffffffff; in vcpu_cp15_reg64_set() 58 vcpu->arch.cp15[r->reg + 1] = val >> 32; in vcpu_cp15_reg64_set() 61 static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu, in vcpu_cp15_reg64_get() argument 66 val = vcpu->arch.cp15[r->reg + 1]; in vcpu_cp15_reg64_get() 68 val = val | vcpu->arch.cp15[r->reg]; in vcpu_cp15_reg64_get() 72 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run) in kvm_handle_cp10_id() argument 74 kvm_inject_undefined(vcpu); in kvm_handle_cp10_id() 78 int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run) in kvm_handle_cp_0_13_access() argument 84 kvm_inject_undefined(vcpu); in kvm_handle_cp_0_13_access() [all …]
|
D | coproc.h | 71 static inline bool ignore_write(struct kvm_vcpu *vcpu, in ignore_write() argument 77 static inline bool read_zero(struct kvm_vcpu *vcpu, in read_zero() argument 80 *vcpu_reg(vcpu, p->Rt1) = 0; in read_zero() 84 static inline bool write_to_read_only(struct kvm_vcpu *vcpu, in write_to_read_only() argument 88 *vcpu_pc(vcpu)); in write_to_read_only() 93 static inline bool read_from_write_only(struct kvm_vcpu *vcpu, in read_from_write_only() argument 97 *vcpu_pc(vcpu)); in read_from_write_only() 103 static inline void reset_unknown(struct kvm_vcpu *vcpu, in reset_unknown() argument 107 BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15)); in reset_unknown() 108 vcpu->arch.cp15[r->reg] = 0xdecafbad; in reset_unknown() [all …]
|
D | interrupts_head.S | 15 vcpu .req r0 @ vcpu pointer always in r0 label 147 add r1, vcpu, \offset 167 add r1, vcpu, #VCPU_FIQ_REGS 179 ldr r2, [vcpu, #VCPU_PC] 180 ldr r3, [vcpu, #VCPU_CPSR] 185 ldr r2, [vcpu, #VCPU_USR_SP] 186 ldr r3, [vcpu, #VCPU_USR_LR] 189 add vcpu, vcpu, #(VCPU_USR_REGS) 190 ldm vcpu, {r0-r12} 202 add r2, vcpu, \offset [all …]
|
D | perf.c | 32 struct kvm_vcpu *vcpu; in kvm_is_user_mode() local 34 vcpu = kvm_arm_get_running_vcpu(); in kvm_is_user_mode() 36 if (vcpu) in kvm_is_user_mode() 37 return !vcpu_mode_priv(vcpu); in kvm_is_user_mode() 44 struct kvm_vcpu *vcpu; in kvm_get_guest_ip() local 46 vcpu = kvm_arm_get_running_vcpu(); in kvm_get_guest_ip() 48 if (vcpu) in kvm_get_guest_ip() 49 return *vcpu_pc(vcpu); in kvm_get_guest_ip()
|
D | reset.c | 57 int kvm_reset_vcpu(struct kvm_vcpu *vcpu) in kvm_reset_vcpu() argument 62 switch (vcpu->arch.target) { in kvm_reset_vcpu() 66 vcpu->arch.midr = read_cpuid_id(); in kvm_reset_vcpu() 74 memcpy(&vcpu->arch.regs, reset_regs, sizeof(vcpu->arch.regs)); in kvm_reset_vcpu() 77 kvm_reset_coprocs(vcpu); in kvm_reset_vcpu() 80 return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq); in kvm_reset_vcpu()
|
D | mmu.c | 1041 static bool kvm_is_write_fault(struct kvm_vcpu *vcpu) in kvm_is_write_fault() argument 1043 if (kvm_vcpu_trap_is_iabt(vcpu)) in kvm_is_write_fault() 1046 return kvm_vcpu_dabt_iswrite(vcpu); in kvm_is_write_fault() 1209 static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, in coherent_cache_guest_page() argument 1212 __coherent_cache_guest_page(vcpu, pfn, size, uncached); in coherent_cache_guest_page() 1215 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, in user_mem_abort() argument 1223 struct kvm *kvm = vcpu->kvm; in user_mem_abort() 1224 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; in user_mem_abort() 1232 write_fault = kvm_is_write_fault(vcpu); in user_mem_abort() 1272 mmu_seq = vcpu->kvm->mmu_notifier_seq; in user_mem_abort() [all …]
|
D | interrupts.S | 107 @ Save the vcpu pointer 108 mcr p15, 4, vcpu, c13, c0, 2 @ HTPIDR 177 add r7, vcpu, #VCPU_VFP_GUEST 179 add r7, vcpu, #VCPU_VFP_HOST 291 str r2, [vcpu, #VCPU_HSR] 292 str r1, [vcpu, #VCPU_HxFAR] 297 str r2, [vcpu, #VCPU_HSR] 298 str r1, [vcpu, #VCPU_HxFAR] 407 str r1, [vcpu, #VCPU_HSR] 422 2: str r2, [vcpu, #VCPU_HxFAR]
|
/linux-4.4.14/arch/mips/kvm/ |
D | mips.c | 63 static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu) in kvm_mips_reset_vcpu() argument 68 vcpu->arch.guest_kernel_asid[i] = 0; in kvm_mips_reset_vcpu() 69 vcpu->arch.guest_user_asid[i] = 0; in kvm_mips_reset_vcpu() 79 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_runnable() argument 81 return !!(vcpu->arch.pending_exceptions); in kvm_arch_vcpu_runnable() 84 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_should_kick() argument 144 struct kvm_vcpu *vcpu; in kvm_mips_free_vcpus() local 153 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_mips_free_vcpus() 154 kvm_arch_vcpu_free(vcpu); in kvm_mips_free_vcpus() 252 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL); in kvm_arch_vcpu_create() local [all …]
|
D | emulate.c | 42 unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu, in kvm_compute_return_epc() argument 47 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_compute_return_epc() 55 insn.word = kvm_get_inst((uint32_t *) epc, vcpu); in kvm_compute_return_epc() 201 enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause) in update_pc() argument 207 branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc); in update_pc() 211 vcpu->arch.pc = branch_pc; in update_pc() 213 vcpu->arch.pc); in update_pc() 216 vcpu->arch.pc += 4; in update_pc() 218 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc); in update_pc() 231 static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu) in kvm_mips_count_disabled() argument [all …]
|
D | trap_emul.c | 40 static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) in kvm_trap_emul_handle_cop_unusable() argument 42 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_trap_emul_handle_cop_unusable() 43 struct kvm_run *run = vcpu->run; in kvm_trap_emul_handle_cop_unusable() 44 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; in kvm_trap_emul_handle_cop_unusable() 45 unsigned long cause = vcpu->arch.host_cp0_cause; in kvm_trap_emul_handle_cop_unusable() 51 if (!kvm_mips_guest_has_fpu(&vcpu->arch) || in kvm_trap_emul_handle_cop_unusable() 57 er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu); in kvm_trap_emul_handle_cop_unusable() 60 kvm_own_fpu(vcpu); in kvm_trap_emul_handle_cop_unusable() 64 er = kvm_mips_emulate_inst(cause, opc, run, vcpu); in kvm_trap_emul_handle_cop_unusable() 88 static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu) in kvm_trap_emul_handle_tlb_mod() argument [all …]
|
D | interrupt.c | 25 void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority) in kvm_mips_queue_irq() argument 27 set_bit(priority, &vcpu->arch.pending_exceptions); in kvm_mips_queue_irq() 30 void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority) in kvm_mips_dequeue_irq() argument 32 clear_bit(priority, &vcpu->arch.pending_exceptions); in kvm_mips_dequeue_irq() 35 void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu) in kvm_mips_queue_timer_int_cb() argument 42 kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI)); in kvm_mips_queue_timer_int_cb() 45 kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_TIMER); in kvm_mips_queue_timer_int_cb() 49 void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu) in kvm_mips_dequeue_timer_int_cb() argument 51 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI)); in kvm_mips_dequeue_timer_int_cb() 52 kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER); in kvm_mips_dequeue_timer_int_cb() [all …]
|
D | tlb.c | 50 uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) in kvm_mips_get_kernel_asid() argument 52 return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK; in kvm_mips_get_kernel_asid() 55 uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) in kvm_mips_get_user_asid() argument 57 return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK; in kvm_mips_get_user_asid() 60 inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu) in kvm_mips_get_commpage_asid() argument 62 return vcpu->kvm->arch.commpage_tlb; in kvm_mips_get_commpage_asid() 116 void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu) in kvm_mips_dump_guest_tlbs() argument 118 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_dump_guest_tlbs() 126 tlb = vcpu->arch.guest_tlb[i]; in kvm_mips_dump_guest_tlbs() 168 unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu, in kvm_mips_translate_guest_kseg0_to_hpa() argument [all …]
|
D | interrupt.h | 39 void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority); 40 void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority); 41 int kvm_mips_pending_timer(struct kvm_vcpu *vcpu); 43 void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu); 44 void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu); 45 void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, 47 void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu, 49 int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, 51 int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority, 53 void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause);
|
D | dyntrans.c | 32 struct kvm_vcpu *vcpu) in kvm_mips_trans_cache_index() argument 41 (vcpu, (unsigned long) opc)); in kvm_mips_trans_cache_index() 53 struct kvm_vcpu *vcpu) in kvm_mips_trans_cache_va() argument 66 (vcpu, (unsigned long) opc)); in kvm_mips_trans_cache_va() 73 int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu) in kvm_mips_trans_mfc0() argument 98 (vcpu, (unsigned long) opc)); in kvm_mips_trans_mfc0() 115 int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu) in kvm_mips_trans_mtc0() argument 133 (vcpu, (unsigned long) opc)); in kvm_mips_trans_mtc0()
|
D | stats.c | 70 void kvm_mips_dump_stats(struct kvm_vcpu *vcpu) in kvm_mips_dump_stats() argument 75 kvm_info("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id); in kvm_mips_dump_stats() 78 if (vcpu->arch.cop0->stat[i][j]) in kvm_mips_dump_stats() 80 vcpu->arch.cop0->stat[i][j]); in kvm_mips_dump_stats()
|
D | commpage.c | 27 void kvm_mips_commpage_init(struct kvm_vcpu *vcpu) in kvm_mips_commpage_init() argument 29 struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage; in kvm_mips_commpage_init() 32 vcpu->arch.cop0 = &page->cop0; in kvm_mips_commpage_init()
|
D | trace.h | 24 TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), 25 TP_ARGS(vcpu, reason), 32 __entry->pc = vcpu->arch.pc;
|
D | commpage.h | 22 extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu);
|
/linux-4.4.14/virt/kvm/ |
D | async_pf.c | 31 static inline void kvm_async_page_present_sync(struct kvm_vcpu *vcpu, in kvm_async_page_present_sync() argument 35 kvm_arch_async_page_present(vcpu, work); in kvm_async_page_present_sync() 38 static inline void kvm_async_page_present_async(struct kvm_vcpu *vcpu, in kvm_async_page_present_async() argument 42 kvm_arch_async_page_present(vcpu, work); in kvm_async_page_present_async() 65 void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu) in kvm_async_pf_vcpu_init() argument 67 INIT_LIST_HEAD(&vcpu->async_pf.done); in kvm_async_pf_vcpu_init() 68 INIT_LIST_HEAD(&vcpu->async_pf.queue); in kvm_async_pf_vcpu_init() 69 spin_lock_init(&vcpu->async_pf.lock); in kvm_async_pf_vcpu_init() 77 struct kvm_vcpu *vcpu = apf->vcpu; in async_pf_execute() local 84 kvm_async_page_present_sync(vcpu, apf); in async_pf_execute() [all …]
|
D | kvm_main.c | 133 int vcpu_load(struct kvm_vcpu *vcpu) in vcpu_load() argument 137 if (mutex_lock_killable(&vcpu->mutex)) in vcpu_load() 140 preempt_notifier_register(&vcpu->preempt_notifier); in vcpu_load() 141 kvm_arch_vcpu_load(vcpu, cpu); in vcpu_load() 146 void vcpu_put(struct kvm_vcpu *vcpu) in vcpu_put() argument 149 kvm_arch_vcpu_put(vcpu); in vcpu_put() 150 preempt_notifier_unregister(&vcpu->preempt_notifier); in vcpu_put() 152 mutex_unlock(&vcpu->mutex); in vcpu_put() 164 struct kvm_vcpu *vcpu; in kvm_make_all_cpus_request() local 169 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_make_all_cpus_request() [all …]
|
D | async_pf.h | 29 void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu);
|
D | coalesced_mmio.c | 63 static int coalesced_mmio_write(struct kvm_vcpu *vcpu, in coalesced_mmio_write() argument
|
D | eventfd.c | 724 ioeventfd_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr, in ioeventfd_write() argument
|
/linux-4.4.14/arch/x86/include/asm/ |
D | kvm_host.h | 270 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root); 271 unsigned long (*get_cr3)(struct kvm_vcpu *vcpu); 272 u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index); 273 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err, 275 void (*inject_page_fault)(struct kvm_vcpu *vcpu, 277 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, 279 gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, 281 int (*sync_page)(struct kvm_vcpu *vcpu, 283 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva); 284 void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, [all …]
|
D | pvclock.h | 13 struct pvclock_vcpu_time_info *vcpu,
|
/linux-4.4.14/virt/kvm/arm/ |
D | arch_timer.c | 66 struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id; in kvm_arch_timer_handler() local 74 pr_warn("Unexpected interrupt %d on vcpu %p\n", irq, vcpu); in kvm_arch_timer_handler() 84 struct kvm_vcpu *vcpu; in kvm_timer_inject_irq_work() local 86 vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired); in kvm_timer_inject_irq_work() 87 vcpu->arch.timer_cpu.armed = false; in kvm_timer_inject_irq_work() 89 WARN_ON(!kvm_timer_should_fire(vcpu)); in kvm_timer_inject_irq_work() 95 kvm_vcpu_kick(vcpu); in kvm_timer_inject_irq_work() 98 static u64 kvm_timer_compute_delta(struct kvm_vcpu *vcpu) in kvm_timer_compute_delta() argument 102 cval = vcpu->arch.timer_cpu.cntv_cval; in kvm_timer_compute_delta() 103 now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff; in kvm_timer_compute_delta() [all …]
|
D | vgic.c | 107 static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu); 108 static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu); 109 static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr); 110 static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc); 111 static u64 vgic_get_elrsr(struct kvm_vcpu *vcpu); 112 static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu, 114 static int compute_pending_for_cpu(struct kvm_vcpu *vcpu); 119 static void add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source) in add_sgi_source() argument 121 vcpu->kvm->arch.vgic.vm_ops.add_sgi_source(vcpu, irq, source); in add_sgi_source() 124 static bool queue_sgi(struct kvm_vcpu *vcpu, int irq) in queue_sgi() argument [all …]
|
D | vgic-v2-emul.c | 37 static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg); 43 static bool handle_mmio_misc(struct kvm_vcpu *vcpu, in handle_mmio_misc() argument 51 reg = vcpu->kvm->arch.vgic.enabled; in handle_mmio_misc() 55 vcpu->kvm->arch.vgic.enabled = reg & 1; in handle_mmio_misc() 56 vgic_update_state(vcpu->kvm); in handle_mmio_misc() 62 reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5; in handle_mmio_misc() 63 reg |= (vcpu->kvm->arch.vgic.nr_irqs >> 5) - 1; in handle_mmio_misc() 78 static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu, in handle_mmio_set_enable_reg() argument 82 return vgic_handle_enable_reg(vcpu->kvm, mmio, offset, in handle_mmio_set_enable_reg() 83 vcpu->vcpu_id, ACCESS_WRITE_SETBIT); in handle_mmio_set_enable_reg() [all …]
|
D | vgic-v3-emul.c | 52 static bool handle_mmio_rao_wi(struct kvm_vcpu *vcpu, in handle_mmio_rao_wi() argument 63 static bool handle_mmio_ctlr(struct kvm_vcpu *vcpu, in handle_mmio_ctlr() argument 72 if (vcpu->kvm->arch.vgic.enabled) in handle_mmio_ctlr() 79 vcpu->kvm->arch.vgic.enabled = !!(reg & GICD_CTLR_ENABLE_SS_G1); in handle_mmio_ctlr() 80 vgic_update_state(vcpu->kvm); in handle_mmio_ctlr() 93 static bool handle_mmio_typer(struct kvm_vcpu *vcpu, in handle_mmio_typer() argument 98 reg = (min(vcpu->kvm->arch.vgic.nr_irqs, 1024) >> 5) - 1; in handle_mmio_typer() 108 static bool handle_mmio_iidr(struct kvm_vcpu *vcpu, in handle_mmio_iidr() argument 120 static bool handle_mmio_set_enable_reg_dist(struct kvm_vcpu *vcpu, in handle_mmio_set_enable_reg_dist() argument 125 return vgic_handle_enable_reg(vcpu->kvm, mmio, offset, in handle_mmio_set_enable_reg_dist() [all …]
|
D | vgic-v3.c | 47 static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr) in vgic_v3_get_lr() argument 50 u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)]; in vgic_v3_get_lr() 52 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) in vgic_v3_get_lr() 59 vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) in vgic_v3_get_lr() 78 static void vgic_v3_set_lr(struct kvm_vcpu *vcpu, int lr, in vgic_v3_set_lr() argument 91 switch (vcpu->kvm->arch.vgic.vgic_model) { in vgic_v3_set_lr() 114 vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)] = lr_val; in vgic_v3_set_lr() 117 vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr); in vgic_v3_set_lr() 119 vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr &= ~(1U << lr); in vgic_v3_set_lr() 122 static u64 vgic_v3_get_elrsr(const struct kvm_vcpu *vcpu) in vgic_v3_get_elrsr() argument [all …]
|
D | vgic-v2.c | 33 static struct vgic_lr vgic_v2_get_lr(const struct kvm_vcpu *vcpu, int lr) in vgic_v2_get_lr() argument 36 u32 val = vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr]; in vgic_v2_get_lr() 59 static void vgic_v2_set_lr(struct kvm_vcpu *vcpu, int lr, in vgic_v2_set_lr() argument 81 vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = lr_val; in vgic_v2_set_lr() 84 vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL << lr); in vgic_v2_set_lr() 86 vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr &= ~(1ULL << lr); in vgic_v2_set_lr() 89 static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu) in vgic_v2_get_elrsr() argument 91 return vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr; in vgic_v2_get_elrsr() 94 static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu) in vgic_v2_get_eisr() argument 96 return vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr; in vgic_v2_get_eisr() [all …]
|
D | vgic.h | 50 void vgic_dist_irq_set_pending(struct kvm_vcpu *vcpu, int irq); 51 void vgic_dist_irq_clear_pending(struct kvm_vcpu *vcpu, int irq); 52 void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq); 56 void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); 57 void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); 59 bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq); 60 void vgic_unqueue_irqs(struct kvm_vcpu *vcpu); 72 bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, 91 bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
|
/linux-4.4.14/arch/mips/include/asm/ |
D | kvm_host.h | 71 #define KVM_GUEST_KERNEL_MODE(vcpu) ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ER… argument 72 ((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0)) 576 static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu) in kvm_mips_guest_can_have_fpu() argument 579 vcpu->fpu_enabled; in kvm_mips_guest_can_have_fpu() 582 static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu) in kvm_mips_guest_has_fpu() argument 584 return kvm_mips_guest_can_have_fpu(vcpu) && in kvm_mips_guest_has_fpu() 585 kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP; in kvm_mips_guest_has_fpu() 588 static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu) in kvm_mips_guest_can_have_msa() argument 591 vcpu->msa_enabled; in kvm_mips_guest_can_have_msa() 594 static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu) in kvm_mips_guest_has_msa() argument [all …]
|
/linux-4.4.14/include/linux/ |
D | kvm_host.h | 177 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 179 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, 181 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 193 struct kvm_vcpu *vcpu; member 201 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); 202 void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu); 203 int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva, 205 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); 287 static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) in kvm_vcpu_exiting_guest_mode() argument 289 return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE); in kvm_vcpu_exiting_guest_mode() [all …]
|
/linux-4.4.14/arch/x86/xen/ |
D | irq.c | 28 struct vcpu_info *vcpu; in xen_save_fl() local 31 vcpu = this_cpu_read(xen_vcpu); in xen_save_fl() 34 flags = !vcpu->evtchn_upcall_mask; in xen_save_fl() 46 struct vcpu_info *vcpu; in xen_restore_fl() local 53 vcpu = this_cpu_read(xen_vcpu); in xen_restore_fl() 54 vcpu->evtchn_upcall_mask = flags; in xen_restore_fl() 58 if (unlikely(vcpu->evtchn_upcall_pending)) in xen_restore_fl() 79 struct vcpu_info *vcpu; in xen_irq_enable() local 88 vcpu = this_cpu_read(xen_vcpu); in xen_irq_enable() 89 vcpu->evtchn_upcall_mask = 0; in xen_irq_enable() [all …]
|
D | xen-ops.h | 77 bool xen_vcpu_stolen(int vcpu);
|
D | time.c | 99 bool xen_vcpu_stolen(int vcpu) in xen_vcpu_stolen() argument 101 return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable; in xen_vcpu_stolen()
|
D | pmu.c | 532 xp.vcpu = cpu; in xen_pmu_init() 562 xp.vcpu = cpu; in xen_pmu_finish()
|
/linux-4.4.14/include/kvm/ |
D | arm_vgic.h | 115 u64 (*get_elrsr)(const struct kvm_vcpu *vcpu); 116 u64 (*get_eisr)(const struct kvm_vcpu *vcpu); 117 void (*clear_eisr)(struct kvm_vcpu *vcpu); 118 u32 (*get_interrupt_status)(const struct kvm_vcpu *vcpu); 119 void (*enable_underflow)(struct kvm_vcpu *vcpu); 120 void (*disable_underflow)(struct kvm_vcpu *vcpu); 121 void (*get_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); 122 void (*set_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); 123 void (*enable)(struct kvm_vcpu *vcpu); 335 void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu); [all …]
|
D | arm_arch_timer.h | 63 int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, 65 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu); 66 void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu); 67 void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu); 68 void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu); 73 bool kvm_timer_should_fire(struct kvm_vcpu *vcpu); 74 void kvm_timer_schedule(struct kvm_vcpu *vcpu); 75 void kvm_timer_unschedule(struct kvm_vcpu *vcpu);
|
D | iodev.h | 30 int (*read)(struct kvm_vcpu *vcpu, 35 int (*write)(struct kvm_vcpu *vcpu, 54 static inline int kvm_iodevice_read(struct kvm_vcpu *vcpu, in kvm_iodevice_read() argument 58 return dev->ops->read ? dev->ops->read(vcpu, dev, addr, l, v) in kvm_iodevice_read() 62 static inline int kvm_iodevice_write(struct kvm_vcpu *vcpu, in kvm_iodevice_write() argument 66 return dev->ops->write ? dev->ops->write(vcpu, dev, addr, l, v) in kvm_iodevice_write()
|
/linux-4.4.14/tools/perf/ |
D | builtin-kvm.c | 142 event->vcpu[j].time = 0; in clear_events_cache_stats() 143 init_stats(&event->vcpu[j].stats); in clear_events_cache_stats() 166 prev = event->vcpu; in kvm_event_expand() 167 event->vcpu = realloc(event->vcpu, in kvm_event_expand() 168 event->max_vcpu * sizeof(*event->vcpu)); in kvm_event_expand() 169 if (!event->vcpu) { in kvm_event_expand() 175 memset(event->vcpu + old_max_vcpu, 0, in kvm_event_expand() 176 (event->max_vcpu - old_max_vcpu) * sizeof(*event->vcpu)); in kvm_event_expand() 243 kvm_stats = &event->vcpu[vcpu_id]; in kvm_event_rel_stddev() 260 kvm_update_event_stats(&event->vcpu[vcpu_id], time_diff); in update_kvm_event() [all …]
|
/linux-4.4.14/arch/powerpc/perf/ |
D | hv-24x7-domains.h | 25 DOMAIN(VCPU_HOME_CORE, 0x03, vcpu, false) 26 DOMAIN(VCPU_HOME_CHIP, 0x04, vcpu, false) 27 DOMAIN(VCPU_HOME_NODE, 0x05, vcpu, false) 28 DOMAIN(VCPU_REMOTE_NODE, 0x06, vcpu, false)
|
D | hv-24x7.c | 104 EVENT_DEFINE_RANGE_FORMAT(vcpu, config, 16, 31);
|
/linux-4.4.14/arch/s390/include/asm/ |
D | kvm_host.h | 484 #define guestdbg_enabled(vcpu) \ argument 485 (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) 486 #define guestdbg_sstep_enabled(vcpu) \ argument 487 (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) 488 #define guestdbg_hw_bp_enabled(vcpu) \ argument 489 (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) 490 #define guestdbg_exit_pending(vcpu) (guestdbg_enabled(vcpu) && \ argument 491 (vcpu->guest_debug & KVM_GUESTDBG_EXIT_PENDING)) 621 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu); 623 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, [all …]
|
/linux-4.4.14/include/xen/interface/ |
D | event_channel.h | 63 uint32_t vcpu; member 92 uint32_t vcpu; member 139 uint32_t vcpu; /* VCPU to which this channel is bound. */ member 167 uint32_t vcpu; member 201 uint32_t vcpu; member
|
D | xenpmu.h | 38 uint32_t vcpu; member
|
/linux-4.4.14/Documentation/virtual/kvm/ |
D | api.txt | 21 - vcpu ioctls: These query and set attributes that control the operation 24 Only run vcpu ioctls from the same thread that was used to create the 25 vcpu. 36 and return a file descriptor pointing to it. Finally, ioctls on a vcpu 37 fd can be used to control the vcpu, including the important task of 45 and one vcpu per thread. 81 Type: system, vm, or vcpu. 176 Returns: size of vcpu mmap area, in bytes 199 Parameters: vcpu id (apic id on x86) 200 Returns: vcpu fd on success, -1 on error [all …]
|
D | hypercalls.txt | 75 Purpose: Hypercall used to wakeup a vcpu from HLT state 76 Usage example : A vcpu of a paravirtualized guest that is busywaiting in guest 80 the vcpu to sleep until occurrence of an appropriate event. Another vcpu of the 81 same guest can wakeup the sleeping vcpu by issuing KVM_HC_KICK_CPU hypercall, 82 specifying APIC ID (a1) of the vcpu to be woken up. An additional argument (a0)
|
D | msr.txt | 120 | | guest vcpu has been paused by 170 when asynchronous page faults are enabled on the vcpu 0 when 172 when vcpu is in cpl == 0. 191 kind of token 0xffffffff which tells vcpu that it should wake 198 Currently type 2 APF will be always delivered on the same vcpu as 232 nanoseconds. Time during which the vcpu is idle, will not be 236 data: Bit 0 is 1 when PV end of interrupt is enabled on the vcpu; 0
|
D | mmu.txt | 17 - scaling: need to scale to large memory and large vcpu guests 294 - cache the information to vcpu->arch.mmio_gva, vcpu->arch.access and 295 vcpu->arch.mmio_gfn, and call the emulator 304 vcpu->arch.mmio_gva, vcpu->arch.access and vcpu->arch.mmio_gfn
|
D | locking.txt | 56 mark_page_dirty(vcpu->kvm, gfn1) 167 The srcu index can be stored in kvm_vcpu->srcu_idx per vcpu
|
/linux-4.4.14/Documentation/virtual/kvm/devices/ |
D | vm.txt | 17 Returns: -EBUSY if a vcpu is already defined, otherwise 0 33 -EBUSY if a vcpu is already defined 65 Allows user space to retrieve or request to change cpu related information for a vcpu: 68 __u64 cpuid; # CPUID currently (to be) used by this vcpu 69 __u16 ibc; # IBC level currently (to be) used by this vcpu 72 # by this vcpu
|
D | xics.txt | 16 capability for each vcpu, specifying KVM_CAP_IRQ_XICS in args[0] and 17 the interrupt server number (i.e. the vcpu number from the XICS's 20 KVM_GET_ONE_REG and KVM_SET_ONE_REG ioctls on the vcpu. The 64 bit 47 interrupt server number specified for the destination vcpu.
|
D | mpic.txt | 10 vcpu's interrupt inputs.
|
/linux-4.4.14/tools/perf/Documentation/ |
D | perf-kvm.txt | 102 --vcpu=<value>:: 103 analyze events which occur on this vcpu. (default: all vcpus) 136 --vcpu=<value>:: 137 analyze events which occur on this vcpu. (default: all vcpus)
|
/linux-4.4.14/Documentation/DocBook/ |
D | genericirq.xml.db | 24 API-irq-set-vcpu-affinity 70 API-irq-chip-set-vcpu-affinity-parent
|
D | kernel-api.xml.db | 366 API-irq-set-vcpu-affinity
|
/linux-4.4.14/tools/perf/util/ |
D | kvm-stat.h | 33 struct kvm_event_stats *vcpu; member
|
/linux-4.4.14/drivers/xen/events/ |
D | events_base.c | 898 bind_ipi.vcpu = cpu; in bind_ipi_to_irq() 951 if (status.u.virq == virq && status.vcpu == cpu) { in find_virq() 994 bind_virq.vcpu = cpu; in bind_virq_to_irq() 1321 bind_vcpu.vcpu = tcpu; in rebind_irq_to_cpu() 1461 bind_virq.vcpu = cpu; in restore_cpu_virqs() 1485 bind_ipi.vcpu = cpu; in restore_cpu_ipis()
|
D | events_fifo.c | 116 init_control.vcpu = cpu; in init_control_block()
|
/linux-4.4.14/drivers/iommu/ |
D | fsl_pamu.h | 405 u32 get_stash_id(u32 stash_dest_hint, u32 vcpu);
|
D | fsl_pamu.c | 519 u32 get_stash_id(u32 stash_dest_hint, u32 vcpu) in get_stash_id() argument 547 if (be32_to_cpup(&prop[i]) == vcpu) { in get_stash_id() 587 stash_dest_hint, vcpu); in get_stash_id()
|
/linux-4.4.14/drivers/irqchip/ |
D | irq-gic-v3.c | 313 static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) in gic_irq_set_vcpu_affinity() argument 315 if (vcpu) in gic_irq_set_vcpu_affinity()
|
D | irq-gic.c | 288 static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) in gic_irq_set_vcpu_affinity() argument 294 if (vcpu) in gic_irq_set_vcpu_affinity()
|
/linux-4.4.14/drivers/xen/ |
D | evtchn.c | 451 bind_virq.vcpu = 0; in evtchn_ioctl()
|
/linux-4.4.14/arch/arm/boot/dts/ |
D | stih416-clock.dtsi | 654 "clk-m-fvdp-vcpu",
|
/linux-4.4.14/Documentation/RCU/ |
D | RTFP.txt | 2436 ,Title="[{PATCH} 37/40] {KVM}: Bump maximum vcpu count to 64"
|