Home
last modified time | relevance | path

Searched refs:vcpu (Results 1 – 178 of 178) sorted by relevance

/linux-4.1.27/arch/arm64/include/asm/
Dkvm_emulate.h34 unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
35 unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu);
37 bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
38 void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
40 void kvm_inject_undefined(struct kvm_vcpu *vcpu);
41 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
42 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
44 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) in vcpu_reset_hcr() argument
46 vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; in vcpu_reset_hcr()
47 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) in vcpu_reset_hcr()
[all …]
Dkvm_coproc.h26 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
41 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
42 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
43 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
44 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
45 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
46 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run);
52 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
53 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
54 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
[all …]
Dkvm_host.h49 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
172 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
173 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
174 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
175 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
198 int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
250 static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} in kvm_arch_vcpu_uninit() argument
251 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} in kvm_arch_sched_in() argument
Dkvm_mmu.h94 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
96 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
228 static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
230 return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
233 static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
239 if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
271 void kvm_set_way_flush(struct kvm_vcpu *vcpu);
272 void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
Dkvm_psci.h24 int kvm_psci_version(struct kvm_vcpu *vcpu);
25 int kvm_psci_call(struct kvm_vcpu *vcpu);
Dkvm_mmio.h34 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
35 int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
Dkvm_asm.h131 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
/linux-4.1.27/arch/powerpc/kvm/
Dbooke_emulate.c35 static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu) in kvmppc_emul_rfi() argument
37 vcpu->arch.pc = vcpu->arch.shared->srr0; in kvmppc_emul_rfi()
38 kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1); in kvmppc_emul_rfi()
41 static void kvmppc_emul_rfdi(struct kvm_vcpu *vcpu) in kvmppc_emul_rfdi() argument
43 vcpu->arch.pc = vcpu->arch.dsrr0; in kvmppc_emul_rfdi()
44 kvmppc_set_msr(vcpu, vcpu->arch.dsrr1); in kvmppc_emul_rfdi()
47 static void kvmppc_emul_rfci(struct kvm_vcpu *vcpu) in kvmppc_emul_rfci() argument
49 vcpu->arch.pc = vcpu->arch.csrr0; in kvmppc_emul_rfci()
50 kvmppc_set_msr(vcpu, vcpu->arch.csrr1); in kvmppc_emul_rfci()
53 int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, in kvmppc_booke_emulate_op() argument
[all …]
Dbooke.c74 void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu) in kvmppc_dump_vcpu() argument
78 printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr); in kvmppc_dump_vcpu()
79 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr); in kvmppc_dump_vcpu()
80 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0, in kvmppc_dump_vcpu()
81 vcpu->arch.shared->srr1); in kvmppc_dump_vcpu()
83 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions); in kvmppc_dump_vcpu()
87 kvmppc_get_gpr(vcpu, i), in kvmppc_dump_vcpu()
88 kvmppc_get_gpr(vcpu, i+1), in kvmppc_dump_vcpu()
89 kvmppc_get_gpr(vcpu, i+2), in kvmppc_dump_vcpu()
90 kvmppc_get_gpr(vcpu, i+3)); in kvmppc_dump_vcpu()
[all …]
Dbook3s_pr.c54 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
56 static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
65 static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu) in kvmppc_is_split_real() argument
67 ulong msr = kvmppc_get_msr(vcpu); in kvmppc_is_split_real()
71 static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu) in kvmppc_fixup_split_real() argument
73 ulong msr = kvmppc_get_msr(vcpu); in kvmppc_fixup_split_real()
74 ulong pc = kvmppc_get_pc(vcpu); in kvmppc_fixup_split_real()
81 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) in kvmppc_fixup_split_real()
88 vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK; in kvmppc_fixup_split_real()
89 kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS); in kvmppc_fixup_split_real()
[all …]
Demulate_loadstore.c50 int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) in kvmppc_emulate_loadstore() argument
52 struct kvm_run *run = vcpu->run; in kvmppc_emulate_loadstore()
59 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); in kvmppc_emulate_loadstore()
61 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst); in kvmppc_emulate_loadstore()
73 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); in kvmppc_emulate_loadstore()
77 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); in kvmppc_emulate_loadstore()
81 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); in kvmppc_emulate_loadstore()
82 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); in kvmppc_emulate_loadstore()
86 emulated = kvmppc_handle_store(run, vcpu, in kvmppc_emulate_loadstore()
87 kvmppc_get_gpr(vcpu, rs), in kvmppc_emulate_loadstore()
[all …]
Dbook3s.c68 void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu) in kvmppc_unfixup_split_real() argument
70 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) { in kvmppc_unfixup_split_real()
71 ulong pc = kvmppc_get_pc(vcpu); in kvmppc_unfixup_split_real()
73 kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK); in kvmppc_unfixup_split_real()
74 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK; in kvmppc_unfixup_split_real()
79 static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) in kvmppc_interrupt_offset() argument
81 if (!is_kvmppc_hv_enabled(vcpu->kvm)) in kvmppc_interrupt_offset()
82 return to_book3s(vcpu)->hior; in kvmppc_interrupt_offset()
86 static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, in kvmppc_update_int_pending() argument
89 if (is_kvmppc_hv_enabled(vcpu->kvm)) in kvmppc_update_int_pending()
[all …]
Dbook3s_emulate.c76 static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level) in spr_allowed() argument
79 if (vcpu->arch.papr_enabled && (level > PRIV_SUPER)) in spr_allowed()
83 if ((kvmppc_get_msr(vcpu) & MSR_PR) && level > PRIV_PROBLEM) in spr_allowed()
89 int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, in kvmppc_core_emulate_op_pr() argument
102 if ((kvmppc_get_msr(vcpu) & MSR_LE) && in kvmppc_core_emulate_op_pr()
111 kvmppc_set_gpr(vcpu, 3, EV_UNIMPLEMENTED); in kvmppc_core_emulate_op_pr()
112 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); in kvmppc_core_emulate_op_pr()
120 kvmppc_set_pc(vcpu, kvmppc_get_srr0(vcpu)); in kvmppc_core_emulate_op_pr()
121 kvmppc_set_msr(vcpu, kvmppc_get_srr1(vcpu)); in kvmppc_core_emulate_op_pr()
133 kvmppc_set_gpr(vcpu, rt, kvmppc_get_msr(vcpu)); in kvmppc_core_emulate_op_pr()
[all …]
Dbook3s_paired_singles.c161 static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt) in kvmppc_sync_qpr() argument
163 kvm_cvt_df(&VCPU_FPR(vcpu, rt), &vcpu->arch.qpr[rt]); in kvmppc_sync_qpr()
166 static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store) in kvmppc_inject_pf() argument
169 u64 msr = kvmppc_get_msr(vcpu); in kvmppc_inject_pf()
173 kvmppc_set_msr(vcpu, msr); in kvmppc_inject_pf()
174 kvmppc_set_dar(vcpu, eaddr); in kvmppc_inject_pf()
179 kvmppc_set_dsisr(vcpu, dsisr); in kvmppc_inject_pf()
180 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE); in kvmppc_inject_pf()
183 static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu, in kvmppc_emulate_fpr_load() argument
195 r = kvmppc_ld(vcpu, &addr, len, tmp, true); in kvmppc_emulate_fpr_load()
[all …]
Demulate.c37 void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) in kvmppc_emulate_dec() argument
42 pr_debug("mtDEC: %x\n", vcpu->arch.dec); in kvmppc_emulate_dec()
43 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); in kvmppc_emulate_dec()
47 kvmppc_core_dequeue_dec(vcpu); in kvmppc_emulate_dec()
50 if (vcpu->arch.dec & 0x80000000) { in kvmppc_emulate_dec()
51 kvmppc_core_queue_dec(vcpu); in kvmppc_emulate_dec()
58 if (vcpu->arch.dec == 0) in kvmppc_emulate_dec()
68 dec_time = vcpu->arch.dec; in kvmppc_emulate_dec()
76 hrtimer_start(&vcpu->arch.dec_timer, in kvmppc_emulate_dec()
78 vcpu->arch.dec_jiffies = get_tb(); in kvmppc_emulate_dec()
[all …]
Dbook3s_hv.c85 static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
86 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
115 static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) in kvmppc_fast_vcpu_kick_hv() argument
117 int cpu = vcpu->cpu; in kvmppc_fast_vcpu_kick_hv()
120 wqp = kvm_arch_vcpu_wq(vcpu); in kvmppc_fast_vcpu_kick_hv()
123 ++vcpu->stat.halt_wakeup; in kvmppc_fast_vcpu_kick_hv()
126 if (kvmppc_ipi_thread(cpu + vcpu->arch.ptid)) in kvmppc_fast_vcpu_kick_hv()
167 static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) in kvmppc_core_vcpu_load_hv() argument
169 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_load_hv()
178 if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) { in kvmppc_core_vcpu_load_hv()
[all …]
Dbook3s_pr_papr.c26 static unsigned long get_pteg_addr(struct kvm_vcpu *vcpu, long pte_index) in get_pteg_addr() argument
28 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); in get_pteg_addr()
39 static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu) in kvmppc_h_pr_enter() argument
41 long flags = kvmppc_get_gpr(vcpu, 4); in kvmppc_h_pr_enter()
42 long pte_index = kvmppc_get_gpr(vcpu, 5); in kvmppc_h_pr_enter()
50 pteg_addr = get_pteg_addr(vcpu, pte_index); in kvmppc_h_pr_enter()
52 mutex_lock(&vcpu->kvm->arch.hpt_mutex); in kvmppc_h_pr_enter()
71 hpte[0] = cpu_to_be64(kvmppc_get_gpr(vcpu, 6)); in kvmppc_h_pr_enter()
72 hpte[1] = cpu_to_be64(kvmppc_get_gpr(vcpu, 7)); in kvmppc_h_pr_enter()
75 kvmppc_set_gpr(vcpu, 4, pte_index | i); in kvmppc_h_pr_enter()
[all …]
De500mc.c31 void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type) in kvmppc_set_pending_interrupt() argument
52 tag = PPC_DBELL_LPID(get_lpid(vcpu)) | vcpu->vcpu_id; in kvmppc_set_pending_interrupt()
77 mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(&vcpu_e500->vcpu)); in kvmppc_e500_tlbil_one()
98 mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(&vcpu_e500->vcpu)); in kvmppc_e500_tlbil_all()
104 void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid) in kvmppc_set_pid() argument
106 vcpu->arch.pid = pid; in kvmppc_set_pid()
109 void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr) in kvmppc_mmu_msr_notify() argument
116 static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu) in kvmppc_core_vcpu_load_e500mc() argument
118 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); in kvmppc_core_vcpu_load_e500mc()
120 kvmppc_booke_vcpu_load(vcpu, cpu); in kvmppc_core_vcpu_load_e500mc()
[all …]
Dtiming.c33 void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu) in kvmppc_init_timing_stats() argument
38 mutex_lock(&vcpu->arch.exit_timing_lock); in kvmppc_init_timing_stats()
40 vcpu->arch.last_exit_type = 0xDEAD; in kvmppc_init_timing_stats()
42 vcpu->arch.timing_count_type[i] = 0; in kvmppc_init_timing_stats()
43 vcpu->arch.timing_max_duration[i] = 0; in kvmppc_init_timing_stats()
44 vcpu->arch.timing_min_duration[i] = 0xFFFFFFFF; in kvmppc_init_timing_stats()
45 vcpu->arch.timing_sum_duration[i] = 0; in kvmppc_init_timing_stats()
46 vcpu->arch.timing_sum_quad_duration[i] = 0; in kvmppc_init_timing_stats()
48 vcpu->arch.timing_last_exit = 0; in kvmppc_init_timing_stats()
49 vcpu->arch.timing_exit.tv64 = 0; in kvmppc_init_timing_stats()
[all …]
De500_emulate.c52 static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb) in kvmppc_e500_emul_msgclr() argument
54 ulong param = vcpu->arch.gpr[rb]; in kvmppc_e500_emul_msgclr()
60 clear_bit(prio, &vcpu->arch.pending_exceptions); in kvmppc_e500_emul_msgclr()
64 static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb) in kvmppc_e500_emul_msgsnd() argument
66 ulong param = vcpu->arch.gpr[rb]; in kvmppc_e500_emul_msgsnd()
75 kvm_for_each_vcpu(i, cvcpu, vcpu->kvm) { in kvmppc_e500_emul_msgsnd()
87 static int kvmppc_e500_emul_ehpriv(struct kvm_run *run, struct kvm_vcpu *vcpu, in kvmppc_e500_emul_ehpriv() argument
95 run->debug.arch.address = vcpu->arch.pc; in kvmppc_e500_emul_ehpriv()
97 kvmppc_account_exit(vcpu, DEBUG_EXITS); in kvmppc_e500_emul_ehpriv()
107 static int kvmppc_e500_emul_dcbtls(struct kvm_vcpu *vcpu) in kvmppc_e500_emul_dcbtls() argument
[all …]
Dtiming.h27 void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu);
28 void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu);
29 void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu, unsigned int id);
30 void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu);
32 static inline void kvmppc_set_exit_type(struct kvm_vcpu *vcpu, int type) in kvmppc_set_exit_type() argument
34 vcpu->arch.last_exit_type = type; in kvmppc_set_exit_type()
39 static inline void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu) {} in kvmppc_init_timing_stats() argument
40 static inline void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu) {} in kvmppc_update_timing_stats() argument
41 static inline void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu, in kvmppc_create_vcpu_debugfs() argument
43 static inline void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu) {} in kvmppc_remove_vcpu_debugfs() argument
[all …]
Dpowerpc.c55 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_should_kick() argument
69 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) in kvmppc_prepare_to_enter() argument
85 kvmppc_account_exit(vcpu, SIGNAL_EXITS); in kvmppc_prepare_to_enter()
86 vcpu->run->exit_reason = KVM_EXIT_INTR; in kvmppc_prepare_to_enter()
91 vcpu->mode = IN_GUEST_MODE; in kvmppc_prepare_to_enter()
101 if (vcpu->requests) { in kvmppc_prepare_to_enter()
104 trace_kvm_check_requests(vcpu); in kvmppc_prepare_to_enter()
105 r = kvmppc_core_check_requests(vcpu); in kvmppc_prepare_to_enter()
112 if (kvmppc_core_prepare_to_enter(vcpu)) { in kvmppc_prepare_to_enter()
129 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu) in kvmppc_swab_shared() argument
[all …]
De500_mmu.c67 static unsigned int get_tlb_esel(struct kvm_vcpu *vcpu, int tlbsel) in get_tlb_esel() argument
69 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); in get_tlb_esel()
70 int esel = get_tlb_esel_bit(vcpu); in get_tlb_esel()
74 esel += gtlb0_set_base(vcpu_e500, vcpu->arch.shared->mas2); in get_tlb_esel()
129 static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu, in kvmppc_e500_deliver_tlb_miss() argument
132 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); in kvmppc_e500_deliver_tlb_miss()
137 tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1; in kvmppc_e500_deliver_tlb_miss()
139 tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f; in kvmppc_e500_deliver_tlb_miss()
141 vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim) in kvmppc_e500_deliver_tlb_miss()
143 vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0) in kvmppc_e500_deliver_tlb_miss()
[all …]
De500.h61 struct kvm_vcpu vcpu; member
103 static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu) in to_e500() argument
105 return container_of(vcpu, struct kvmppc_vcpu_e500, vcpu); in to_e500()
130 int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu);
131 int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu);
132 int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, gva_t ea);
133 int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int type, gva_t ea);
134 int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea);
138 void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
139 int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
[all …]
Dbook3s_64_mmu.c39 static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu) in kvmppc_mmu_book3s_64_reset_msr() argument
41 kvmppc_set_msr(vcpu, vcpu->arch.intr_msr); in kvmppc_mmu_book3s_64_reset_msr()
45 struct kvm_vcpu *vcpu, in kvmppc_mmu_book3s_64_find_slbe() argument
52 for (i = 0; i < vcpu->arch.slb_nr; i++) { in kvmppc_mmu_book3s_64_find_slbe()
55 if (!vcpu->arch.slb[i].valid) in kvmppc_mmu_book3s_64_find_slbe()
58 if (vcpu->arch.slb[i].tb) in kvmppc_mmu_book3s_64_find_slbe()
61 if (vcpu->arch.slb[i].esid == cmp_esid) in kvmppc_mmu_book3s_64_find_slbe()
62 return &vcpu->arch.slb[i]; in kvmppc_mmu_book3s_64_find_slbe()
67 for (i = 0; i < vcpu->arch.slb_nr; i++) { in kvmppc_mmu_book3s_64_find_slbe()
68 if (vcpu->arch.slb[i].vsid) in kvmppc_mmu_book3s_64_find_slbe()
[all …]
Dbooke.h76 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr);
77 void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr);
79 void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr);
80 void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr);
81 void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits);
82 void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits);
84 int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
86 int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
87 int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
90 void kvmppc_load_guest_spe(struct kvm_vcpu *vcpu);
[all …]
Dbook3s_32_mmu.c52 static inline bool check_debug_ip(struct kvm_vcpu *vcpu) in check_debug_ip() argument
55 return vcpu->arch.pc == DEBUG_MMU_PTE_IP; in check_debug_ip()
81 static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
84 static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
87 static u32 find_sr(struct kvm_vcpu *vcpu, gva_t eaddr) in find_sr() argument
89 return kvmppc_get_sr(vcpu, (eaddr >> 28) & 0xf); in find_sr()
92 static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, in kvmppc_mmu_book3s_32_ea_to_vp() argument
98 if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data, false)) in kvmppc_mmu_book3s_32_ea_to_vp()
101 kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_book3s_32_ea_to_vp()
105 static void kvmppc_mmu_book3s_32_reset_msr(struct kvm_vcpu *vcpu) in kvmppc_mmu_book3s_32_reset_msr() argument
[all …]
Dbook3s_64_mmu_host.c34 void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) in kvmppc_mmu_invalidate_pte() argument
43 static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid) in kvmppc_sid_hash() argument
56 static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) in find_sid_vsid() argument
61 if (kvmppc_get_msr(vcpu) & MSR_PR) in find_sid_vsid()
64 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); in find_sid_vsid()
65 map = &to_book3s(vcpu)->sid_map[sid_map_mask]; in find_sid_vsid()
71 map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask]; in find_sid_vsid()
81 int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, in kvmppc_mmu_map_page() argument
97 struct kvm *kvm = vcpu->kvm; in kvmppc_mmu_map_page()
107 pfn = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable); in kvmppc_mmu_map_page()
[all …]
Dbook3s_mmu_hpte.c67 void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte) in kvmppc_mmu_hpte_cache_map() argument
70 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); in kvmppc_mmu_hpte_cache_map()
112 static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) in invalidate_pte() argument
114 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); in invalidate_pte()
119 kvmppc_mmu_invalidate_pte(vcpu, pte); in invalidate_pte()
143 static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu) in kvmppc_mmu_pte_flush_all() argument
145 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); in kvmppc_mmu_pte_flush_all()
155 invalidate_pte(vcpu, pte); in kvmppc_mmu_pte_flush_all()
161 static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea) in kvmppc_mmu_pte_flush_page() argument
163 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); in kvmppc_mmu_pte_flush_page()
[all …]
De500.c143 vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500, in kvmppc_e500_recalc_shadow_pid()
144 get_cur_as(&vcpu_e500->vcpu), in kvmppc_e500_recalc_shadow_pid()
145 get_cur_pid(&vcpu_e500->vcpu), in kvmppc_e500_recalc_shadow_pid()
146 get_cur_pr(&vcpu_e500->vcpu), 1); in kvmppc_e500_recalc_shadow_pid()
147 vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500, in kvmppc_e500_recalc_shadow_pid()
148 get_cur_as(&vcpu_e500->vcpu), 0, in kvmppc_e500_recalc_shadow_pid()
149 get_cur_pr(&vcpu_e500->vcpu), 1); in kvmppc_e500_recalc_shadow_pid()
218 unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu, in kvmppc_e500_get_tlb_stid() argument
221 return kvmppc_e500_get_sid(to_e500(vcpu), get_tlb_ts(gtlbe), in kvmppc_e500_get_tlb_stid()
222 get_tlb_tid(gtlbe), get_cur_pr(vcpu), 0); in kvmppc_e500_get_tlb_stid()
[all …]
Dbook3s_32_mmu_host.c60 void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) in kvmppc_mmu_invalidate_pte() argument
77 static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid) in kvmppc_sid_hash() argument
90 static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) in find_sid_vsid() argument
95 if (kvmppc_get_msr(vcpu) & MSR_PR) in find_sid_vsid()
98 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); in find_sid_vsid()
99 map = &to_book3s(vcpu)->sid_map[sid_map_mask]; in find_sid_vsid()
106 map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask]; in find_sid_vsid()
117 static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr, in kvmppc_mmu_get_pteg() argument
141 int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, in kvmppc_mmu_map_page() argument
159 hpaddr = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable); in kvmppc_mmu_map_page()
[all …]
Dtrace_hv.h223 TP_PROTO(struct kvm_vcpu *vcpu),
224 TP_ARGS(vcpu),
234 __entry->vcpu_id = vcpu->vcpu_id;
235 __entry->pc = kvmppc_get_pc(vcpu);
236 __entry->ceded = vcpu->arch.ceded;
237 __entry->pending_exceptions = vcpu->arch.pending_exceptions;
247 TP_PROTO(struct kvm_vcpu *vcpu),
248 TP_ARGS(vcpu),
259 __entry->vcpu_id = vcpu->vcpu_id;
260 __entry->trap = vcpu->arch.trap;
[all …]
Dbook3s_xics.c306 kvmppc_book3s_queue_irqprio(icp->vcpu, in icp_try_update()
309 kvmppc_fast_vcpu_kick(icp->vcpu); in icp_try_update()
570 static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu) in kvmppc_h_xirr() argument
573 struct kvmppc_icp *icp = vcpu->arch.icp; in kvmppc_h_xirr()
577 kvmppc_book3s_dequeue_irqprio(icp->vcpu, in kvmppc_h_xirr()
599 XICS_DBG("h_xirr vcpu %d xirr %#x\n", vcpu->vcpu_id, xirr); in kvmppc_h_xirr()
604 static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, in kvmppc_h_ipi() argument
608 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; in kvmppc_h_ipi()
615 vcpu->vcpu_id, server, mfrr); in kvmppc_h_ipi()
617 icp = vcpu->arch.icp; in kvmppc_h_ipi()
[all …]
Dbook3s_interrupts.S41 #define VCPU_LOAD_NVGPRS(vcpu) \ argument
42 PPC_LL r14, VCPU_GPR(R14)(vcpu); \
43 PPC_LL r15, VCPU_GPR(R15)(vcpu); \
44 PPC_LL r16, VCPU_GPR(R16)(vcpu); \
45 PPC_LL r17, VCPU_GPR(R17)(vcpu); \
46 PPC_LL r18, VCPU_GPR(R18)(vcpu); \
47 PPC_LL r19, VCPU_GPR(R19)(vcpu); \
48 PPC_LL r20, VCPU_GPR(R20)(vcpu); \
49 PPC_LL r21, VCPU_GPR(R21)(vcpu); \
50 PPC_LL r22, VCPU_GPR(R22)(vcpu); \
[all …]
Dbook3s_hv_rm_xics.c53 static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu, in icp_rm_set_vcpu_irq() argument
60 vcpu->stat.queue_intr++; in icp_rm_set_vcpu_irq()
61 set_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions); in icp_rm_set_vcpu_irq()
64 if (vcpu == this_vcpu) { in icp_rm_set_vcpu_irq()
70 cpu = vcpu->cpu; in icp_rm_set_vcpu_irq()
73 this_icp->rm_kick_target = vcpu; in icp_rm_set_vcpu_irq()
77 cpu += vcpu->arch.ptid; in icp_rm_set_vcpu_irq()
83 static void icp_rm_clr_vcpu_irq(struct kvm_vcpu *vcpu) in icp_rm_clr_vcpu_irq() argument
87 &vcpu->arch.pending_exceptions); in icp_rm_clr_vcpu_irq()
122 icp_rm_set_vcpu_irq(icp->vcpu, this_vcpu); in icp_rm_try_update()
[all …]
Dbook3s_hv_ras.c34 static void reload_slb(struct kvm_vcpu *vcpu) in reload_slb() argument
43 slb = vcpu->arch.slb_shadow.pinned_addr; in reload_slb()
49 if ((void *) &slb->save_area[n] > vcpu->arch.slb_shadow.pinned_end) in reload_slb()
68 static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu) in kvmppc_realmode_mc_power7() argument
70 unsigned long srr1 = vcpu->arch.shregs.msr; in kvmppc_realmode_mc_power7()
76 unsigned long dsisr = vcpu->arch.shregs.dsisr; in kvmppc_realmode_mc_power7()
81 reload_slb(vcpu); in kvmppc_realmode_mc_power7()
101 reload_slb(vcpu); in kvmppc_realmode_mc_power7()
139 long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu) in kvmppc_realmode_machine_check() argument
141 return kvmppc_realmode_mc_power7(vcpu); in kvmppc_realmode_machine_check()
Dbook3s_rtas.c21 static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args) in kvm_rtas_set_xive() argument
35 rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority); in kvm_rtas_set_xive()
42 static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args) in kvm_rtas_get_xive() argument
55 rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority); in kvm_rtas_get_xive()
67 static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args) in kvm_rtas_int_off() argument
79 rc = kvmppc_xics_int_off(vcpu->kvm, irq); in kvm_rtas_int_off()
86 static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args) in kvm_rtas_int_on() argument
98 rc = kvmppc_xics_int_on(vcpu->kvm, irq); in kvm_rtas_int_on()
107 void (*handler)(struct kvm_vcpu *vcpu, struct rtas_args *args);
208 int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) in kvmppc_rtas_hcall() argument
[all …]
Dtrace_booke.h40 TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
41 TP_ARGS(exit_nr, vcpu),
53 __entry->pc = kvmppc_get_pc(vcpu);
54 __entry->dar = kvmppc_get_fault_dar(vcpu);
55 __entry->msr = vcpu->arch.shared->msr;
56 __entry->last_inst = vcpu->arch.last_inst;
196 TP_PROTO(struct kvm_vcpu *vcpu, unsigned int priority),
197 TP_ARGS(vcpu, priority),
206 __entry->cpu_nr = vcpu->vcpu_id;
208 __entry->pending = vcpu->arch.pending_exceptions;
Dtrace_pr.h14 TP_PROTO(int r, struct kvm_vcpu *vcpu),
15 TP_ARGS(r, vcpu),
24 __entry->pc = kvmppc_get_pc(vcpu);
123 TP_PROTO(const char *type, struct kvm_vcpu *vcpu, unsigned long long p1,
125 TP_ARGS(type, vcpu, p1, p2),
135 __entry->count = to_book3s(vcpu)->hpte_cache_count;
219 TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
220 TP_ARGS(exit_nr, vcpu),
233 __entry->pc = kvmppc_get_pc(vcpu);
234 __entry->dar = kvmppc_get_fault_dar(vcpu);
[all …]
De500_mmu_host.c133 __write_host_tlbe(stlbe, mas0, vcpu_e500->vcpu.kvm->arch.lpid); in write_host_tlbe()
138 vcpu_e500->vcpu.kvm->arch.lpid); in write_host_tlbe()
151 stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe); in write_stlbe()
160 void kvmppc_map_magic(struct kvm_vcpu *vcpu) in kvmppc_map_magic() argument
162 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); in kvmppc_map_magic()
164 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; in kvmppc_map_magic()
176 magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M; in kvmppc_map_magic()
297 void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu) in kvmppc_core_flush_tlb() argument
299 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); in kvmppc_core_flush_tlb()
307 struct kvm_vcpu *vcpu, in kvmppc_e500_setup_stlbe() argument
[all …]
Dbook3s_xics.h63 struct kvm_vcpu *vcpu; member
116 struct kvm_vcpu *vcpu = NULL; in kvmppc_xics_find_server() local
119 kvm_for_each_vcpu(i, vcpu, kvm) { in kvmppc_xics_find_server()
120 if (vcpu->arch.icp && nr == vcpu->arch.icp->server_num) in kvmppc_xics_find_server()
121 return vcpu->arch.icp; in kvmppc_xics_find_server()
Dbook3s_64_vio_hv.c44 long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, in kvmppc_h_put_tce() argument
47 struct kvm *kvm = vcpu->kvm; in kvmppc_h_put_tce()
79 long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn, in kvmppc_h_get_tce() argument
82 struct kvm *kvm = vcpu->kvm; in kvmppc_h_get_tce()
97 vcpu->arch.gpr[4] = tbl[idx % TCES_PER_PAGE]; in kvmppc_h_get_tce()
Dbook3s_64_mmu_hv.c174 void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, in kvmppc_map_vrma() argument
185 struct kvm *kvm = vcpu->kvm; in kvmppc_map_vrma()
245 static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu) in kvmppc_mmu_book3s_64_hv_reset_msr() argument
247 unsigned long msr = vcpu->arch.intr_msr; in kvmppc_mmu_book3s_64_hv_reset_msr()
250 if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr)) in kvmppc_mmu_book3s_64_hv_reset_msr()
253 msr |= vcpu->arch.shregs.msr & MSR_TS_MASK; in kvmppc_mmu_book3s_64_hv_reset_msr()
254 kvmppc_set_msr(vcpu, msr); in kvmppc_mmu_book3s_64_hv_reset_msr()
277 static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu, in kvmppc_mmu_book3s_hv_find_slbe() argument
283 for (i = 0; i < vcpu->arch.slb_nr; i++) { in kvmppc_mmu_book3s_hv_find_slbe()
284 if (!(vcpu->arch.slb[i].orige & SLB_ESID_V)) in kvmppc_mmu_book3s_hv_find_slbe()
[all …]
Dbook3s_hv_rm_mmu.c344 long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, in kvmppc_h_enter() argument
347 return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel, in kvmppc_h_enter()
348 vcpu->arch.pgdir, true, &vcpu->arch.gpr[4]); in kvmppc_h_enter()
449 long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags, in kvmppc_h_remove() argument
452 return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn, in kvmppc_h_remove()
453 &vcpu->arch.gpr[4]); in kvmppc_h_remove()
456 long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) in kvmppc_h_bulk_remove() argument
458 struct kvm *kvm = vcpu->kvm; in kvmppc_h_bulk_remove()
459 unsigned long *args = &vcpu->arch.gpr[4]; in kvmppc_h_bulk_remove()
566 long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, in kvmppc_h_protect() argument
[all …]
Dbook3s.h25 extern void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu);
26 extern int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
28 extern int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu,
30 extern int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu,
Dbook3s_hv_builtin.c110 long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target, in kvmppc_rm_h_confer() argument
113 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_rm_h_confer()
120 set_bit(vcpu->arch.ptid, &vc->conferring_threads); in kvmppc_rm_h_confer()
130 clear_bit(vcpu->arch.ptid, &vc->conferring_threads); in kvmppc_rm_h_confer()
183 long kvmppc_h_random(struct kvm_vcpu *vcpu) in kvmppc_h_random() argument
185 if (powernv_get_random_real_mode(&vcpu->arch.gpr[4])) in kvmppc_h_random()
Dmpic.c117 struct kvm_vcpu *vcpu = current->thread.kvm_vcpu; in get_current_cpu() local
118 return vcpu ? vcpu->arch.irq_cpu_id : -1; in get_current_cpu()
179 struct kvm_vcpu *vcpu; member
249 if (!dst->vcpu) { in mpic_irq_raise()
255 pr_debug("%s: cpu %d output %d\n", __func__, dst->vcpu->arch.irq_cpu_id, in mpic_irq_raise()
261 kvm_vcpu_ioctl_interrupt(dst->vcpu, &irq); in mpic_irq_raise()
267 if (!dst->vcpu) { in mpic_irq_lower()
273 pr_debug("%s: cpu %d output %d\n", __func__, dst->vcpu->arch.irq_cpu_id, in mpic_irq_lower()
279 kvmppc_core_dequeue_external(dst->vcpu); in mpic_irq_lower()
1177 void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu) in kvmppc_mpic_set_epr() argument
[all …]
Dtrace.h102 TP_PROTO(struct kvm_vcpu *vcpu),
103 TP_ARGS(vcpu),
111 __entry->cpu_nr = vcpu->vcpu_id;
112 __entry->requests = vcpu->requests;
DKconfig117 Calculate time taken for each vcpu in the real-mode guest entry,
122 kvm/vm#/vcpu#/timings. The overhead is of the order of 30 - 40
134 Calculate elapsed time for every exit/enter cycle. A per-vcpu
/linux-4.1.27/arch/s390/kvm/
Dpriv.c34 static int handle_set_clock(struct kvm_vcpu *vcpu) in handle_set_clock() argument
42 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) in handle_set_clock()
43 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); in handle_set_clock()
45 op2 = kvm_s390_get_base_disp_s(vcpu, &ar); in handle_set_clock()
47 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_set_clock()
48 rc = read_guest(vcpu, op2, ar, &val, sizeof(val)); in handle_set_clock()
50 return kvm_s390_inject_prog_cond(vcpu, rc); in handle_set_clock()
53 kvm_s390_set_psw_cc(vcpu, 3); in handle_set_clock()
58 mutex_lock(&vcpu->kvm->lock); in handle_set_clock()
59 kvm_for_each_vcpu(i, cpup, vcpu->kvm) in handle_set_clock()
[all …]
Ddiag.c23 static int diag_release_pages(struct kvm_vcpu *vcpu) in diag_release_pages() argument
26 unsigned long prefix = kvm_s390_get_prefix(vcpu); in diag_release_pages()
28 start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; in diag_release_pages()
29 end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096; in diag_release_pages()
33 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in diag_release_pages()
35 VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end); in diag_release_pages()
36 vcpu->stat.diagnose_10++; in diag_release_pages()
43 gmap_discard(vcpu->arch.gmap, start, end); in diag_release_pages()
51 gmap_discard(vcpu->arch.gmap, start, prefix); in diag_release_pages()
53 gmap_discard(vcpu->arch.gmap, 0, 4096); in diag_release_pages()
[all …]
Dintercept.c41 void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilc) in kvm_s390_rewind_psw() argument
43 struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block; in kvm_s390_rewind_psw()
54 static int handle_noop(struct kvm_vcpu *vcpu) in handle_noop() argument
56 switch (vcpu->arch.sie_block->icptcode) { in handle_noop()
58 vcpu->stat.exit_null++; in handle_noop()
61 vcpu->stat.exit_external_request++; in handle_noop()
69 static int handle_stop(struct kvm_vcpu *vcpu) in handle_stop() argument
71 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; in handle_stop()
75 vcpu->stat.exit_stop_request++; in handle_stop()
78 if (kvm_s390_vcpu_has_irq(vcpu, 1)) in handle_stop()
[all …]
Dkvm-s390.h23 typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
26 #define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & 0x10)) argument
28 #define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1)) argument
44 static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu) in is_vcpu_stopped() argument
46 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED; in is_vcpu_stopped()
61 static inline u32 kvm_s390_get_prefix(struct kvm_vcpu *vcpu) in kvm_s390_get_prefix() argument
63 return vcpu->arch.sie_block->prefix << GUEST_PREFIX_SHIFT; in kvm_s390_get_prefix()
66 static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix) in kvm_s390_set_prefix() argument
68 vcpu->arch.sie_block->prefix = prefix >> GUEST_PREFIX_SHIFT; in kvm_s390_set_prefix()
69 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); in kvm_s390_set_prefix()
[all …]
Dkvm-s390.c384 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
388 struct kvm_vcpu *vcpu; in kvm_s390_vm_set_crypto() local
423 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_s390_vm_set_crypto()
424 kvm_s390_vcpu_crypto_setup(vcpu); in kvm_s390_vm_set_crypto()
425 exit_sie(vcpu); in kvm_s390_vm_set_crypto()
1116 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_destroy() argument
1118 VCPU_EVENT(vcpu, 3, "%s", "free cpu"); in kvm_arch_vcpu_destroy()
1119 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id); in kvm_arch_vcpu_destroy()
1120 kvm_s390_clear_local_irqs(vcpu); in kvm_arch_vcpu_destroy()
1121 kvm_clear_async_pf_completion_queue(vcpu); in kvm_arch_vcpu_destroy()
[all …]
Dsigp.c23 static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, in __sigp_sense() argument
46 VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", dst_vcpu->vcpu_id, in __sigp_sense()
51 static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, in __inject_sigp_emergency() argument
56 .u.emerg.code = vcpu->vcpu_id, in __inject_sigp_emergency()
62 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", in __inject_sigp_emergency()
68 static int __sigp_emergency(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu) in __sigp_emergency() argument
70 return __inject_sigp_emergency(vcpu, dst_vcpu); in __sigp_emergency()
73 static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, in __sigp_conditional_emergency() argument
92 return __inject_sigp_emergency(vcpu, dst_vcpu); in __sigp_conditional_emergency()
100 static int __sigp_external_call(struct kvm_vcpu *vcpu, in __sigp_external_call() argument
[all …]
Dguestdbg.c62 static void enable_all_hw_bp(struct kvm_vcpu *vcpu) in enable_all_hw_bp() argument
65 u64 *cr9 = &vcpu->arch.sie_block->gcr[9]; in enable_all_hw_bp()
66 u64 *cr10 = &vcpu->arch.sie_block->gcr[10]; in enable_all_hw_bp()
67 u64 *cr11 = &vcpu->arch.sie_block->gcr[11]; in enable_all_hw_bp()
70 if (vcpu->arch.guestdbg.nr_hw_bp <= 0 || in enable_all_hw_bp()
71 vcpu->arch.guestdbg.hw_bp_info == NULL) in enable_all_hw_bp()
82 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) { in enable_all_hw_bp()
83 start = vcpu->arch.guestdbg.hw_bp_info[i].addr; in enable_all_hw_bp()
84 len = vcpu->arch.guestdbg.hw_bp_info[i].len; in enable_all_hw_bp()
102 static void enable_all_hw_wp(struct kvm_vcpu *vcpu) in enable_all_hw_wp() argument
[all …]
Dinterrupt.c38 int psw_extint_disabled(struct kvm_vcpu *vcpu) in psw_extint_disabled() argument
40 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); in psw_extint_disabled()
43 static int psw_ioint_disabled(struct kvm_vcpu *vcpu) in psw_ioint_disabled() argument
45 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO); in psw_ioint_disabled()
48 static int psw_mchk_disabled(struct kvm_vcpu *vcpu) in psw_mchk_disabled() argument
50 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK); in psw_mchk_disabled()
53 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu) in psw_interrupts_disabled() argument
55 if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) || in psw_interrupts_disabled()
56 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) || in psw_interrupts_disabled()
57 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT)) in psw_interrupts_disabled()
[all …]
Dgaccess.h30 static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu, in kvm_s390_real_to_abs() argument
33 unsigned long prefix = kvm_s390_get_prefix(vcpu); in kvm_s390_real_to_abs()
55 static inline unsigned long kvm_s390_logical_to_effective(struct kvm_vcpu *vcpu, in kvm_s390_logical_to_effective() argument
58 psw_t *psw = &vcpu->arch.sie_block->gpsw; in kvm_s390_logical_to_effective()
95 #define put_guest_lc(vcpu, x, gra) \ argument
97 struct kvm_vcpu *__vcpu = (vcpu); \
124 int write_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data, in write_guest_lc() argument
127 unsigned long gpa = gra + kvm_s390_get_prefix(vcpu); in write_guest_lc()
129 return kvm_write_guest(vcpu->kvm, gpa, data, len); in write_guest_lc()
150 int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data, in read_guest_lc() argument
[all …]
Dgaccess.c260 int ipte_lock_held(struct kvm_vcpu *vcpu) in ipte_lock_held() argument
262 union ipte_control *ic = &vcpu->kvm->arch.sca->ipte_control; in ipte_lock_held()
264 if (vcpu->arch.sie_block->eca & 1) in ipte_lock_held()
266 return vcpu->kvm->arch.ipte_lock_count != 0; in ipte_lock_held()
269 static void ipte_lock_simple(struct kvm_vcpu *vcpu) in ipte_lock_simple() argument
273 mutex_lock(&vcpu->kvm->arch.ipte_mutex); in ipte_lock_simple()
274 vcpu->kvm->arch.ipte_lock_count++; in ipte_lock_simple()
275 if (vcpu->kvm->arch.ipte_lock_count > 1) in ipte_lock_simple()
277 ic = &vcpu->kvm->arch.sca->ipte_control; in ipte_lock_simple()
288 mutex_unlock(&vcpu->kvm->arch.ipte_mutex); in ipte_lock_simple()
[all …]
Dtrace-s390.h42 TP_PROTO(unsigned int id, struct kvm_vcpu *vcpu,
44 TP_ARGS(id, vcpu, sie_block),
48 __field(struct kvm_vcpu *, vcpu)
54 __entry->vcpu = vcpu;
59 __entry->vcpu, __entry->sie_block)
Dtrace.h19 #define VCPU_PROTO_COMMON struct kvm_vcpu *vcpu
20 #define VCPU_ARGS_COMMON vcpu
25 __entry->id = vcpu->vcpu_id; \
26 __entry->pswmask = vcpu->arch.sie_block->gpsw.mask; \
27 __entry->pswaddr = vcpu->arch.sie_block->gpsw.addr; \
/linux-4.1.27/arch/arm/include/asm/
Dkvm_emulate.h28 unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
29 unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu);
31 bool kvm_condition_valid(struct kvm_vcpu *vcpu);
32 void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);
33 void kvm_inject_undefined(struct kvm_vcpu *vcpu);
34 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
35 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
37 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) in vcpu_reset_hcr() argument
39 vcpu->arch.hcr = HCR_GUEST_MASK; in vcpu_reset_hcr()
42 static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu) in vcpu_get_hcr() argument
[all …]
Dkvm_coproc.h22 void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
31 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run);
32 int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
33 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
34 int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
35 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
36 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
38 unsigned long kvm_arm_num_guest_msrs(struct kvm_vcpu *vcpu);
39 int kvm_arm_copy_msrindices(struct kvm_vcpu *vcpu, u64 __user *uindices);
43 int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
[all …]
Dkvm_host.h47 u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
49 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
50 void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
155 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
156 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
157 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
158 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
168 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
169 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
182 int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
[all …]
Dkvm_mmu.h62 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
64 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
180 static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) in vcpu_has_cache_enabled() argument
182 return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101; in vcpu_has_cache_enabled()
185 static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, in __coherent_cache_guest_page() argument
207 bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached; in __coherent_cache_guest_page()
269 void kvm_set_way_flush(struct kvm_vcpu *vcpu);
270 void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
Dkvm_psci.h24 int kvm_psci_version(struct kvm_vcpu *vcpu);
25 int kvm_psci_call(struct kvm_vcpu *vcpu);
Dkvm_mmio.h31 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
32 int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
Dkvm_asm.h101 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
/linux-4.1.27/arch/x86/kvm/
Dx86.c89 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
90 static void process_nmi(struct kvm_vcpu *vcpu);
91 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
177 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu) in kvm_async_pf_hash_reset() argument
181 vcpu->arch.apf.gfns[i] = ~0; in kvm_async_pf_hash_reset()
269 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu) in kvm_get_apic_base() argument
271 return vcpu->arch.apic_base; in kvm_get_apic_base()
275 int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info) in kvm_set_apic_base() argument
277 u64 old_state = vcpu->arch.apic_base & in kvm_set_apic_base()
281 u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) | in kvm_set_apic_base()
[all …]
Dx86.h7 static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) in kvm_clear_exception_queue() argument
9 vcpu->arch.exception.pending = false; in kvm_clear_exception_queue()
12 static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector, in kvm_queue_interrupt() argument
15 vcpu->arch.interrupt.pending = true; in kvm_queue_interrupt()
16 vcpu->arch.interrupt.soft = soft; in kvm_queue_interrupt()
17 vcpu->arch.interrupt.nr = vector; in kvm_queue_interrupt()
20 static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu) in kvm_clear_interrupt_queue() argument
22 vcpu->arch.interrupt.pending = false; in kvm_clear_interrupt_queue()
25 static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu) in kvm_event_needs_reinjection() argument
27 return vcpu->arch.exception.pending || vcpu->arch.interrupt.pending || in kvm_event_needs_reinjection()
[all …]
Dkvm_cache_regs.h9 static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, in kvm_register_read() argument
12 if (!test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail)) in kvm_register_read()
13 kvm_x86_ops->cache_reg(vcpu, reg); in kvm_register_read()
15 return vcpu->arch.regs[reg]; in kvm_register_read()
18 static inline void kvm_register_write(struct kvm_vcpu *vcpu, in kvm_register_write() argument
22 vcpu->arch.regs[reg] = val; in kvm_register_write()
23 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty); in kvm_register_write()
24 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); in kvm_register_write()
27 static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu) in kvm_rip_read() argument
29 return kvm_register_read(vcpu, VCPU_REGS_RIP); in kvm_rip_read()
[all …]
Dlapic.h26 struct kvm_vcpu *vcpu; member
44 int kvm_create_lapic(struct kvm_vcpu *vcpu);
45 void kvm_free_lapic(struct kvm_vcpu *vcpu);
47 int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu);
48 int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu);
49 int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu);
50 void kvm_apic_accept_events(struct kvm_vcpu *vcpu);
51 void kvm_lapic_reset(struct kvm_vcpu *vcpu);
52 u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu);
53 void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8);
[all …]
Dvmx.c471 struct kvm_vcpu vcpu; member
561 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) in to_vmx() argument
563 return container_of(vcpu, struct vcpu_vmx, vcpu); in to_vmx()
782 static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu) in get_vmcs12() argument
784 return to_vmx(vcpu)->nested.current_vmcs12; in get_vmcs12()
787 static struct page *nested_get_page(struct kvm_vcpu *vcpu, gpa_t addr) in nested_get_page() argument
789 struct page *page = gfn_to_page(vcpu->kvm, addr >> PAGE_SHIFT); in nested_get_page()
806 static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
814 static void vmx_set_segment(struct kvm_vcpu *vcpu,
816 static void vmx_get_segment(struct kvm_vcpu *vcpu,
[all …]
Dcpuid.h6 int kvm_update_cpuid(struct kvm_vcpu *vcpu);
7 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
12 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
15 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
18 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
21 void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
23 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
25 static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu) in cpuid_maxphyaddr() argument
27 return vcpu->arch.maxphyaddr; in cpuid_maxphyaddr()
30 static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu) in guest_cpuid_has_xsave() argument
[all …]
Dlapic.c88 bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector) in kvm_apic_pending_eoi() argument
90 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_pending_eoi()
161 struct kvm_vcpu *vcpu; in recalculate_apic_map() local
171 kvm_for_each_vcpu(i, vcpu, kvm) { in recalculate_apic_map()
172 struct kvm_lapic *apic = vcpu->arch.apic; in recalculate_apic_map()
176 if (!kvm_apic_present(vcpu)) in recalculate_apic_map()
225 recalculate_apic_map(apic->vcpu->kvm); in apic_set_spiv()
234 recalculate_apic_map(apic->vcpu->kvm); in kvm_apic_set_id()
240 recalculate_apic_map(apic->vcpu->kvm); in kvm_apic_set_ldr()
273 void kvm_apic_set_version(struct kvm_vcpu *vcpu) in kvm_apic_set_version() argument
[all …]
Dmmu.c179 static void mmu_free_roots(struct kvm_vcpu *vcpu);
299 static int is_nx(struct kvm_vcpu *vcpu) in is_nx() argument
301 return vcpu->arch.efer & EFER_NX; in is_nx()
632 static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu) in walk_shadow_page_lockless_begin() argument
639 vcpu->mode = READING_SHADOW_PAGE_TABLES; in walk_shadow_page_lockless_begin()
647 static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu) in walk_shadow_page_lockless_end() argument
655 vcpu->mode = OUTSIDE_GUEST_MODE; in walk_shadow_page_lockless_end()
709 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) in mmu_topup_memory_caches() argument
713 r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, in mmu_topup_memory_caches()
717 r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8); in mmu_topup_memory_caches()
[all …]
Dsvm.c130 struct kvm_vcpu vcpu; member
204 static void svm_flush_tlb(struct kvm_vcpu *vcpu);
249 static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) in to_svm() argument
251 return container_of(vcpu, struct vcpu_svm, vcpu); in to_svm()
261 if (!is_guest_mode(&svm->vcpu)) in recalc_intercepts()
276 if (is_guest_mode(&svm->vcpu)) in get_host_vmcb()
378 svm->vcpu.arch.hflags |= HF_GIF_MASK; in enable_gif()
383 svm->vcpu.arch.hflags &= ~HF_GIF_MASK; in disable_gif()
388 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK); in gif_set()
473 static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) in svm_set_efer() argument
[all …]
Dpaging_tmpl.h148 static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in FNAME()
171 static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu, in FNAME()
175 if (FNAME(is_rsvd_bits_set)(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) in FNAME()
188 drop_spte(vcpu->kvm, spte); in FNAME()
192 static inline unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, u64 gpte) in FNAME()
207 static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, in FNAME()
255 ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte); in FNAME()
259 mark_page_dirty(vcpu->kvm, table_gfn); in FNAME()
269 struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in FNAME()
289 pte = mmu->get_cr3(vcpu); in FNAME()
[all …]
Dmmu_audit.c35 typedef void (*inspect_spte_fn) (struct kvm_vcpu *vcpu, u64 *sptep, int level);
37 static void __mmu_spte_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, in __mmu_spte_walk() argument
45 fn(vcpu, ent + i, level); in __mmu_spte_walk()
52 __mmu_spte_walk(vcpu, child, fn, level - 1); in __mmu_spte_walk()
57 static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn) in mmu_spte_walk() argument
62 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in mmu_spte_walk()
65 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { in mmu_spte_walk()
66 hpa_t root = vcpu->arch.mmu.root_hpa; in mmu_spte_walk()
69 __mmu_spte_walk(vcpu, sp, fn, PT64_ROOT_LEVEL); in mmu_spte_walk()
74 hpa_t root = vcpu->arch.mmu.pae_root[i]; in mmu_spte_walk()
[all …]
Dcpuid.c61 int kvm_update_cpuid(struct kvm_vcpu *vcpu) in kvm_update_cpuid() argument
64 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_update_cpuid()
66 best = kvm_find_cpuid_entry(vcpu, 1, 0); in kvm_update_cpuid()
73 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) in kvm_update_cpuid()
84 best = kvm_find_cpuid_entry(vcpu, 0xD, 0); in kvm_update_cpuid()
86 vcpu->arch.guest_supported_xcr0 = 0; in kvm_update_cpuid()
87 vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; in kvm_update_cpuid()
89 vcpu->arch.guest_supported_xcr0 = in kvm_update_cpuid()
92 vcpu->arch.guest_xstate_size = best->ebx = in kvm_update_cpuid()
93 xstate_required_size(vcpu->arch.xcr0, false); in kvm_update_cpuid()
[all …]
Dpmu.c50 struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu; in pmc_bitmask()
57 struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu; in pmc_enabled()
90 void kvm_deliver_pmi(struct kvm_vcpu *vcpu) in kvm_deliver_pmi() argument
92 if (vcpu->arch.apic) in kvm_deliver_pmi()
93 kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC); in kvm_deliver_pmi()
100 struct kvm_vcpu *vcpu = container_of(pmu, struct kvm_vcpu, in trigger_pmi() local
103 kvm_deliver_pmi(vcpu); in trigger_pmi()
111 struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu; in kvm_perf_overflow()
114 kvm_make_request(KVM_REQ_PMU, pmc->vcpu); in kvm_perf_overflow()
122 struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu; in kvm_perf_overflow_intr()
[all …]
Dmmu.h52 int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
71 int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
72 void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
73 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);
84 static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) in kvm_mmu_reload() argument
86 if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE)) in kvm_mmu_reload()
89 return kvm_mmu_load(vcpu); in kvm_mmu_reload()
135 static inline bool is_write_protection(struct kvm_vcpu *vcpu) in is_write_protection() argument
137 return kvm_read_cr0_bits(vcpu, X86_CR0_WP); in is_write_protection()
144 static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in permission_fault() argument
[all …]
Dirq.c34 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) in kvm_cpu_has_pending_timer() argument
36 return apic_has_pending_timer(vcpu); in kvm_cpu_has_pending_timer()
118 void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu) in kvm_inject_pending_timer_irqs() argument
120 kvm_inject_apic_timer_irqs(vcpu); in kvm_inject_pending_timer_irqs()
125 void __kvm_migrate_timers(struct kvm_vcpu *vcpu) in __kvm_migrate_timers() argument
127 __kvm_migrate_apic_timer(vcpu); in __kvm_migrate_timers()
128 __kvm_migrate_pit_timer(vcpu); in __kvm_migrate_timers()
Dirq.h97 void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu);
98 void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu);
99 void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu);
100 void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu);
101 void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu);
102 void __kvm_migrate_timers(struct kvm_vcpu *vcpu);
104 int apic_has_pending_timer(struct kvm_vcpu *vcpu);
Dioapic.c108 static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu) in __rtc_irq_eoi_tracking_restore_one() argument
111 struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; in __rtc_irq_eoi_tracking_restore_one()
115 if (!kvm_apic_match_dest(vcpu, NULL, 0, e->fields.dest_id, in __rtc_irq_eoi_tracking_restore_one()
119 new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector); in __rtc_irq_eoi_tracking_restore_one()
120 old_val = test_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map); in __rtc_irq_eoi_tracking_restore_one()
126 __set_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map); in __rtc_irq_eoi_tracking_restore_one()
129 __clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map); in __rtc_irq_eoi_tracking_restore_one()
135 void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu) in kvm_rtc_eoi_tracking_restore_one() argument
137 struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; in kvm_rtc_eoi_tracking_restore_one()
140 __rtc_irq_eoi_tracking_restore_one(vcpu); in kvm_rtc_eoi_tracking_restore_one()
[all …]
Dirq_comm.c60 struct kvm_vcpu *vcpu, *lowest = NULL; in kvm_irq_delivery_to_apic() local
71 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_irq_delivery_to_apic()
72 if (!kvm_apic_present(vcpu)) in kvm_irq_delivery_to_apic()
75 if (!kvm_apic_match_dest(vcpu, src, irq->shorthand, in kvm_irq_delivery_to_apic()
82 r += kvm_apic_set_irq(vcpu, irq, dest_map); in kvm_irq_delivery_to_apic()
83 } else if (kvm_lapic_enabled(vcpu)) { in kvm_irq_delivery_to_apic()
85 lowest = vcpu; in kvm_irq_delivery_to_apic()
86 else if (kvm_apic_compare_prio(vcpu, lowest) < 0) in kvm_irq_delivery_to_apic()
87 lowest = vcpu; in kvm_irq_delivery_to_apic()
Di8259.c52 struct kvm_vcpu *vcpu, *found = NULL; in pic_unlock() local
60 kvm_for_each_vcpu(i, vcpu, s->kvm) { in pic_unlock()
61 if (kvm_apic_accept_pic_intr(vcpu)) { in pic_unlock()
62 found = vcpu; in pic_unlock()
279 struct kvm_vcpu *vcpu; in kvm_pic_reset() local
295 kvm_for_each_vcpu(i, vcpu, s->pics_state->kvm) in kvm_pic_reset()
296 if (kvm_apic_accept_pic_intr(vcpu)) { in kvm_pic_reset()
532 static int picdev_master_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, in picdev_master_write() argument
539 static int picdev_master_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, in picdev_master_read() argument
546 static int picdev_slave_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, in picdev_slave_write() argument
[all …]
Dioapic.h108 void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu);
109 bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
112 void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector,
123 void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap,
Dtrace.h204 TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu, u32 isa),
205 TP_ARGS(exit_reason, vcpu, isa),
217 __entry->guest_rip = kvm_rip_read(vcpu);
219 kvm_x86_ops->get_exit_info(vcpu, &__entry->info1,
452 __entry->apicid = apic->vcpu->vcpu_id;
469 __entry->apicid = apic->vcpu->vcpu_id;
709 TP_PROTO(struct kvm_vcpu *vcpu, __u8 failed),
710 TP_ARGS(vcpu, failed),
722 __entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS);
723 __entry->len = vcpu->arch.emulate_ctxt.fetch.ptr
[all …]
Di8254.c256 void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu) in __kvm_migrate_pit_timer() argument
258 struct kvm_pit *pit = vcpu->kvm->arch.vpit; in __kvm_migrate_pit_timer()
261 if (!kvm_vcpu_is_bsp(vcpu) || !pit) in __kvm_migrate_pit_timer()
281 struct kvm_vcpu *vcpu; in pit_do_work() local
311 kvm_for_each_vcpu(i, vcpu, kvm) in pit_do_work()
312 kvm_apic_nmi_wd_deliver(vcpu); in pit_do_work()
448 static int pit_ioport_write(struct kvm_vcpu *vcpu, in pit_ioport_write() argument
525 static int pit_ioport_read(struct kvm_vcpu *vcpu, in pit_ioport_read() argument
596 static int speaker_ioport_write(struct kvm_vcpu *vcpu, in speaker_ioport_write() argument
614 static int speaker_ioport_read(struct kvm_vcpu *vcpu, in speaker_ioport_read() argument
Dmmutrace.h249 TP_PROTO(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
251 TP_ARGS(vcpu, gva, error_code, sptep, old_spte, retry),
264 __entry->vcpu_id = vcpu->vcpu_id;
/linux-4.1.27/arch/arm64/kvm/
Dinject_fault.c32 static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) in prepare_fault32() argument
35 unsigned long new_spsr_value = *vcpu_cpsr(vcpu); in prepare_fault32()
38 u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); in prepare_fault32()
47 *vcpu_cpsr(vcpu) = cpsr; in prepare_fault32()
50 *vcpu_spsr(vcpu) = new_spsr_value; in prepare_fault32()
51 *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; in prepare_fault32()
57 vect_offset += vcpu_cp15(vcpu, c12_VBAR); in prepare_fault32()
59 *vcpu_pc(vcpu) = vect_offset; in prepare_fault32()
62 static void inject_undef32(struct kvm_vcpu *vcpu) in inject_undef32() argument
64 prepare_fault32(vcpu, COMPAT_PSR_MODE_UND, 4); in inject_undef32()
[all …]
Dhandle_exit.c36 static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) in handle_hvc() argument
40 trace_kvm_hvc_arm64(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0), in handle_hvc()
41 kvm_vcpu_hvc_get_imm(vcpu)); in handle_hvc()
43 ret = kvm_psci_call(vcpu); in handle_hvc()
45 kvm_inject_undefined(vcpu); in handle_hvc()
52 static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) in handle_smc() argument
54 kvm_inject_undefined(vcpu); in handle_smc()
70 static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run) in kvm_handle_wfx() argument
72 if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE) { in kvm_handle_wfx()
73 trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true); in kvm_handle_wfx()
[all …]
Dguest.c39 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_setup() argument
49 static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) in get_core_reg() argument
58 struct kvm_regs *regs = vcpu_gp_regs(vcpu); in get_core_reg()
74 static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) in set_core_reg() argument
77 struct kvm_regs *regs = vcpu_gp_regs(vcpu); in set_core_reg()
122 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_get_regs() argument
127 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_set_regs() argument
154 static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) in copy_timer_indices() argument
168 static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) in set_timer_reg() argument
178 return kvm_arm_timer_set_reg(vcpu, reg->id, val); in set_timer_reg()
[all …]
Demulate.c55 static int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) in kvm_vcpu_get_condition() argument
57 u32 esr = kvm_vcpu_get_hsr(vcpu); in kvm_vcpu_get_condition()
68 bool kvm_condition_valid32(const struct kvm_vcpu *vcpu) in kvm_condition_valid32() argument
75 if (kvm_vcpu_get_hsr(vcpu) >> 30) in kvm_condition_valid32()
79 cond = kvm_vcpu_get_condition(vcpu); in kvm_condition_valid32()
83 cpsr = *vcpu_cpsr(vcpu); in kvm_condition_valid32()
117 static void kvm_adjust_itstate(struct kvm_vcpu *vcpu) in kvm_adjust_itstate() argument
120 unsigned long cpsr = *vcpu_cpsr(vcpu); in kvm_adjust_itstate()
142 *vcpu_cpsr(vcpu) = cpsr; in kvm_adjust_itstate()
149 void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr) in kvm_skip_instr32() argument
[all …]
Dsys_regs.c78 static bool access_dcsw(struct kvm_vcpu *vcpu, in access_dcsw() argument
83 return read_from_write_only(vcpu, p); in access_dcsw()
85 kvm_set_way_flush(vcpu); in access_dcsw()
94 static bool access_vm_reg(struct kvm_vcpu *vcpu, in access_vm_reg() argument
99 bool was_enabled = vcpu_has_cache_enabled(vcpu); in access_vm_reg()
103 val = *vcpu_reg(vcpu, p->Rt); in access_vm_reg()
105 vcpu_sys_reg(vcpu, r->reg) = val; in access_vm_reg()
108 vcpu_cp15_64_high(vcpu, r->reg) = val >> 32; in access_vm_reg()
109 vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL; in access_vm_reg()
112 kvm_toggle_cache(vcpu, was_enabled); in access_vm_reg()
[all …]
Dsys_regs.h67 static inline bool ignore_write(struct kvm_vcpu *vcpu, in ignore_write() argument
73 static inline bool read_zero(struct kvm_vcpu *vcpu, in read_zero() argument
76 *vcpu_reg(vcpu, p->Rt) = 0; in read_zero()
80 static inline bool write_to_read_only(struct kvm_vcpu *vcpu, in write_to_read_only() argument
84 *vcpu_pc(vcpu)); in write_to_read_only()
89 static inline bool read_from_write_only(struct kvm_vcpu *vcpu, in read_from_write_only() argument
93 *vcpu_pc(vcpu)); in read_from_write_only()
99 static inline void reset_unknown(struct kvm_vcpu *vcpu, in reset_unknown() argument
104 vcpu_sys_reg(vcpu, r->reg) = 0x1de7ec7edbadc0deULL; in reset_unknown()
107 static inline void reset_val(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) in reset_val() argument
[all …]
Dreset.c82 int kvm_reset_vcpu(struct kvm_vcpu *vcpu) in kvm_reset_vcpu() argument
87 switch (vcpu->arch.target) { in kvm_reset_vcpu()
89 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { in kvm_reset_vcpu()
102 memcpy(vcpu_gp_regs(vcpu), cpu_reset, sizeof(*cpu_reset)); in kvm_reset_vcpu()
105 kvm_reset_sys_regs(vcpu); in kvm_reset_vcpu()
108 kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq); in kvm_reset_vcpu()
Dsys_regs_generic_v8.c33 static bool access_actlr(struct kvm_vcpu *vcpu, in access_actlr() argument
38 return ignore_write(vcpu, p); in access_actlr()
40 *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, ACTLR_EL1); in access_actlr()
44 static void reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) in reset_actlr() argument
49 vcpu_sys_reg(vcpu, ACTLR_EL1) = actlr; in reset_actlr()
Dregmap.c112 unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num) in vcpu_reg32() argument
114 unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs.regs; in vcpu_reg32()
115 unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK; in vcpu_reg32()
144 unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu) in vcpu_spsr32() argument
146 unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK; in vcpu_spsr32()
167 return (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[mode]; in vcpu_spsr32()
DKconfig50 If you choose a high number, the vcpu structures will be quite
/linux-4.1.27/arch/powerpc/include/asm/
Dkvm_ppc.h70 extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
71 extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
74 extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
75 extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
78 extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
81 extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
85 extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
88 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
90 extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
93 struct kvm_vcpu *vcpu);
[all …]
Dkvm_book3s.h117 extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask);
118 extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
119 extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
120 extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
121 extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
122 extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
123 extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
124 extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
126 extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
127 extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
[all …]
Dkvm_booke.h37 static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) in kvmppc_set_gpr() argument
39 vcpu->arch.gpr[num] = val; in kvmppc_set_gpr()
42 static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) in kvmppc_get_gpr() argument
44 return vcpu->arch.gpr[num]; in kvmppc_get_gpr()
47 static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) in kvmppc_set_cr() argument
49 vcpu->arch.cr = val; in kvmppc_set_cr()
52 static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) in kvmppc_get_cr() argument
54 return vcpu->arch.cr; in kvmppc_get_cr()
57 static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val) in kvmppc_set_xer() argument
59 vcpu->arch.xer = val; in kvmppc_set_xer()
[all …]
Dkvm_host.h337 void (*slbmte)(struct kvm_vcpu *vcpu, u64 rb, u64 rs);
338 u64 (*slbmfee)(struct kvm_vcpu *vcpu, u64 slb_nr);
339 u64 (*slbmfev)(struct kvm_vcpu *vcpu, u64 slb_nr);
340 void (*slbie)(struct kvm_vcpu *vcpu, u64 slb_nr);
341 void (*slbia)(struct kvm_vcpu *vcpu);
343 void (*mtsrin)(struct kvm_vcpu *vcpu, u32 srnum, ulong value);
344 u32 (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum);
345 int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr,
347 void (*reset_msr)(struct kvm_vcpu *vcpu);
348 void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large);
[all …]
Dkvm_book3s_32.h23 static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu) in svcpu_get() argument
25 return vcpu->arch.shadow_vcpu; in svcpu_get()
Dfsl_hcalls.h535 unsigned int vcpu, unsigned int *state) in fh_get_core_state() argument
543 r4 = vcpu; in fh_get_core_state()
565 static inline unsigned int fh_enter_nap(unsigned int handle, unsigned int vcpu) in fh_enter_nap() argument
573 r4 = vcpu; in fh_enter_nap()
590 static inline unsigned int fh_exit_nap(unsigned int handle, unsigned int vcpu) in fh_exit_nap() argument
598 r4 = vcpu; in fh_exit_nap()
Dkvm_book3s_64.h24 static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu) in svcpu_get() argument
/linux-4.1.27/arch/arm/kvm/
Dpsci.c42 static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu) in kvm_psci_vcpu_suspend() argument
57 kvm_vcpu_block(vcpu); in kvm_psci_vcpu_suspend()
62 static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu) in kvm_psci_vcpu_off() argument
64 vcpu->arch.pause = true; in kvm_psci_vcpu_off()
70 struct kvm_vcpu *vcpu = NULL; in kvm_psci_vcpu_on() local
80 vcpu = kvm_mpidr_to_vcpu(kvm, cpu_id); in kvm_psci_vcpu_on()
86 if (!vcpu) in kvm_psci_vcpu_on()
88 if (!vcpu->arch.pause) { in kvm_psci_vcpu_on()
98 kvm_reset_vcpu(vcpu); in kvm_psci_vcpu_on()
101 if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) { in kvm_psci_vcpu_on()
[all …]
Demulate.c113 unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num) in vcpu_reg() argument
115 unsigned long *reg_array = (unsigned long *)&vcpu->arch.regs; in vcpu_reg()
116 unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK; in vcpu_reg()
145 unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu) in vcpu_spsr() argument
147 unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK; in vcpu_spsr()
150 return &vcpu->arch.regs.KVM_ARM_SVC_spsr; in vcpu_spsr()
152 return &vcpu->arch.regs.KVM_ARM_ABT_spsr; in vcpu_spsr()
154 return &vcpu->arch.regs.KVM_ARM_UND_spsr; in vcpu_spsr()
156 return &vcpu->arch.regs.KVM_ARM_IRQ_spsr; in vcpu_spsr()
158 return &vcpu->arch.regs.KVM_ARM_FIQ_spsr; in vcpu_spsr()
[all …]
Dhandle_exit.c31 static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) in handle_svc_hyp() argument
39 static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) in handle_hvc() argument
43 trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0), in handle_hvc()
44 kvm_vcpu_hvc_get_imm(vcpu)); in handle_hvc()
46 ret = kvm_psci_call(vcpu); in handle_hvc()
48 kvm_inject_undefined(vcpu); in handle_hvc()
55 static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) in handle_smc() argument
57 kvm_inject_undefined(vcpu); in handle_smc()
61 static int handle_pabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) in handle_pabt_hyp() argument
65 kvm_vcpu_get_hfar(vcpu), kvm_vcpu_get_hsr(vcpu)); in handle_pabt_hyp()
[all …]
Darm.c64 static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu) in kvm_arm_set_running_vcpu() argument
67 __this_cpu_write(kvm_arm_running_vcpu, vcpu); in kvm_arm_set_running_vcpu()
93 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_should_kick() argument
95 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; in kvm_arch_vcpu_should_kick()
143 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) in kvm_arch_vcpu_fault() argument
216 struct kvm_vcpu *vcpu; in kvm_arch_vcpu_create() local
228 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); in kvm_arch_vcpu_create()
229 if (!vcpu) { in kvm_arch_vcpu_create()
234 err = kvm_vcpu_init(vcpu, kvm, id); in kvm_arch_vcpu_create()
238 err = create_hyp_mappings(vcpu, vcpu + 1); in kvm_arch_vcpu_create()
[all …]
Dmmio.c96 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) in kvm_handle_mmio_return() argument
109 if (vcpu->arch.mmio_decode.sign_extend && in kvm_handle_mmio_return()
117 data = vcpu_data_host_to_guest(vcpu, data, len); in kvm_handle_mmio_return()
118 *vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt) = data; in kvm_handle_mmio_return()
124 static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len) in decode_hsr() argument
130 if (kvm_vcpu_dabt_isextabt(vcpu)) { in decode_hsr()
132 kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); in decode_hsr()
136 if (kvm_vcpu_dabt_iss1tw(vcpu)) { in decode_hsr()
138 kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); in decode_hsr()
142 access_size = kvm_vcpu_dabt_get_as(vcpu); in decode_hsr()
[all …]
Dguest.c39 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_setup() argument
49 static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) in get_core_reg() argument
52 struct kvm_regs *regs = &vcpu->arch.regs; in get_core_reg()
66 static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) in set_core_reg() argument
69 struct kvm_regs *regs = &vcpu->arch.regs; in set_core_reg()
102 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_get_regs() argument
107 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_set_regs() argument
125 static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) in copy_timer_indices() argument
139 static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) in set_timer_reg() argument
149 return kvm_arm_timer_set_reg(vcpu, reg->id, val); in set_timer_reg()
[all …]
Dcoproc.c53 static inline void vcpu_cp15_reg64_set(struct kvm_vcpu *vcpu, in vcpu_cp15_reg64_set() argument
57 vcpu->arch.cp15[r->reg] = val & 0xffffffff; in vcpu_cp15_reg64_set()
58 vcpu->arch.cp15[r->reg + 1] = val >> 32; in vcpu_cp15_reg64_set()
61 static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu, in vcpu_cp15_reg64_get() argument
66 val = vcpu->arch.cp15[r->reg + 1]; in vcpu_cp15_reg64_get()
68 val = val | vcpu->arch.cp15[r->reg]; in vcpu_cp15_reg64_get()
72 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run) in kvm_handle_cp10_id() argument
74 kvm_inject_undefined(vcpu); in kvm_handle_cp10_id()
78 int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run) in kvm_handle_cp_0_13_access() argument
84 kvm_inject_undefined(vcpu); in kvm_handle_cp_0_13_access()
[all …]
Dcoproc.h71 static inline bool ignore_write(struct kvm_vcpu *vcpu, in ignore_write() argument
77 static inline bool read_zero(struct kvm_vcpu *vcpu, in read_zero() argument
80 *vcpu_reg(vcpu, p->Rt1) = 0; in read_zero()
84 static inline bool write_to_read_only(struct kvm_vcpu *vcpu, in write_to_read_only() argument
88 *vcpu_pc(vcpu)); in write_to_read_only()
93 static inline bool read_from_write_only(struct kvm_vcpu *vcpu, in read_from_write_only() argument
97 *vcpu_pc(vcpu)); in read_from_write_only()
103 static inline void reset_unknown(struct kvm_vcpu *vcpu, in reset_unknown() argument
107 BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15)); in reset_unknown()
108 vcpu->arch.cp15[r->reg] = 0xdecafbad; in reset_unknown()
[all …]
Dinterrupts_head.S15 vcpu .req r0 @ vcpu pointer always in r0 label
147 add r1, vcpu, \offset
167 add r1, vcpu, #VCPU_FIQ_REGS
179 ldr r2, [vcpu, #VCPU_PC]
180 ldr r3, [vcpu, #VCPU_CPSR]
185 ldr r2, [vcpu, #VCPU_USR_SP]
186 ldr r3, [vcpu, #VCPU_USR_LR]
189 add vcpu, vcpu, #(VCPU_USR_REGS)
190 ldm vcpu, {r0-r12}
202 add r2, vcpu, \offset
[all …]
Dperf.c32 struct kvm_vcpu *vcpu; in kvm_is_user_mode() local
34 vcpu = kvm_arm_get_running_vcpu(); in kvm_is_user_mode()
36 if (vcpu) in kvm_is_user_mode()
37 return !vcpu_mode_priv(vcpu); in kvm_is_user_mode()
44 struct kvm_vcpu *vcpu; in kvm_get_guest_ip() local
46 vcpu = kvm_arm_get_running_vcpu(); in kvm_get_guest_ip()
48 if (vcpu) in kvm_get_guest_ip()
49 return *vcpu_pc(vcpu); in kvm_get_guest_ip()
Dreset.c57 int kvm_reset_vcpu(struct kvm_vcpu *vcpu) in kvm_reset_vcpu() argument
62 switch (vcpu->arch.target) { in kvm_reset_vcpu()
66 vcpu->arch.midr = read_cpuid_id(); in kvm_reset_vcpu()
74 memcpy(&vcpu->arch.regs, reset_regs, sizeof(vcpu->arch.regs)); in kvm_reset_vcpu()
77 kvm_reset_coprocs(vcpu); in kvm_reset_vcpu()
80 kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq); in kvm_reset_vcpu()
Dmmu.c1041 static bool kvm_is_write_fault(struct kvm_vcpu *vcpu) in kvm_is_write_fault() argument
1043 if (kvm_vcpu_trap_is_iabt(vcpu)) in kvm_is_write_fault()
1046 return kvm_vcpu_dabt_iswrite(vcpu); in kvm_is_write_fault()
1208 static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, in coherent_cache_guest_page() argument
1211 __coherent_cache_guest_page(vcpu, pfn, size, uncached); in coherent_cache_guest_page()
1214 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, in user_mem_abort() argument
1222 struct kvm *kvm = vcpu->kvm; in user_mem_abort()
1223 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; in user_mem_abort()
1231 write_fault = kvm_is_write_fault(vcpu); in user_mem_abort()
1271 mmu_seq = vcpu->kvm->mmu_notifier_seq; in user_mem_abort()
[all …]
Dinterrupts.S107 @ Save the vcpu pointer
108 mcr p15, 4, vcpu, c13, c0, 2 @ HTPIDR
177 add r7, vcpu, #VCPU_VFP_GUEST
179 add r7, vcpu, #VCPU_VFP_HOST
291 str r2, [vcpu, #VCPU_HSR]
292 str r1, [vcpu, #VCPU_HxFAR]
297 str r2, [vcpu, #VCPU_HSR]
298 str r1, [vcpu, #VCPU_HxFAR]
408 str r1, [vcpu, #VCPU_HSR]
419 2: str r2, [vcpu, #VCPU_HxFAR]
DKconfig55 If you choose a high number, the vcpu structures will be quite
/linux-4.1.27/arch/mips/kvm/
Dmips.c62 static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu) in kvm_mips_reset_vcpu() argument
67 vcpu->arch.guest_kernel_asid[i] = 0; in kvm_mips_reset_vcpu()
68 vcpu->arch.guest_user_asid[i] = 0; in kvm_mips_reset_vcpu()
78 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_runnable() argument
80 return !!(vcpu->arch.pending_exceptions); in kvm_arch_vcpu_runnable()
83 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_should_kick() argument
143 struct kvm_vcpu *vcpu; in kvm_mips_free_vcpus() local
152 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_mips_free_vcpus()
153 kvm_arch_vcpu_free(vcpu); in kvm_mips_free_vcpus()
250 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL); in kvm_arch_vcpu_create() local
[all …]
Demulate.c42 unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu, in kvm_compute_return_epc() argument
47 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_compute_return_epc()
55 insn.word = kvm_get_inst((uint32_t *) epc, vcpu); in kvm_compute_return_epc()
201 enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause) in update_pc() argument
207 branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc); in update_pc()
211 vcpu->arch.pc = branch_pc; in update_pc()
213 vcpu->arch.pc); in update_pc()
216 vcpu->arch.pc += 4; in update_pc()
218 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc); in update_pc()
231 static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu) in kvm_mips_count_disabled() argument
[all …]
Dtrap_emul.c40 static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) in kvm_trap_emul_handle_cop_unusable() argument
42 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_trap_emul_handle_cop_unusable()
43 struct kvm_run *run = vcpu->run; in kvm_trap_emul_handle_cop_unusable()
44 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; in kvm_trap_emul_handle_cop_unusable()
45 unsigned long cause = vcpu->arch.host_cp0_cause; in kvm_trap_emul_handle_cop_unusable()
51 if (!kvm_mips_guest_has_fpu(&vcpu->arch) || in kvm_trap_emul_handle_cop_unusable()
57 er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu); in kvm_trap_emul_handle_cop_unusable()
60 kvm_own_fpu(vcpu); in kvm_trap_emul_handle_cop_unusable()
64 er = kvm_mips_emulate_inst(cause, opc, run, vcpu); in kvm_trap_emul_handle_cop_unusable()
88 static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu) in kvm_trap_emul_handle_tlb_mod() argument
[all …]
Dinterrupt.c25 void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority) in kvm_mips_queue_irq() argument
27 set_bit(priority, &vcpu->arch.pending_exceptions); in kvm_mips_queue_irq()
30 void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority) in kvm_mips_dequeue_irq() argument
32 clear_bit(priority, &vcpu->arch.pending_exceptions); in kvm_mips_dequeue_irq()
35 void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu) in kvm_mips_queue_timer_int_cb() argument
42 kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI)); in kvm_mips_queue_timer_int_cb()
45 kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_TIMER); in kvm_mips_queue_timer_int_cb()
49 void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu) in kvm_mips_dequeue_timer_int_cb() argument
51 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI)); in kvm_mips_dequeue_timer_int_cb()
52 kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER); in kvm_mips_dequeue_timer_int_cb()
[all …]
Dtlb.c50 uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) in kvm_mips_get_kernel_asid() argument
52 return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK; in kvm_mips_get_kernel_asid()
55 uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) in kvm_mips_get_user_asid() argument
57 return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK; in kvm_mips_get_user_asid()
60 inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu) in kvm_mips_get_commpage_asid() argument
62 return vcpu->kvm->arch.commpage_tlb; in kvm_mips_get_commpage_asid()
116 void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu) in kvm_mips_dump_guest_tlbs() argument
118 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_dump_guest_tlbs()
126 tlb = vcpu->arch.guest_tlb[i]; in kvm_mips_dump_guest_tlbs()
168 unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu, in kvm_mips_translate_guest_kseg0_to_hpa() argument
[all …]
Dinterrupt.h39 void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
40 void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
41 int kvm_mips_pending_timer(struct kvm_vcpu *vcpu);
43 void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu);
44 void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu);
45 void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
47 void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
49 int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
51 int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
53 void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause);
Ddyntrans.c32 struct kvm_vcpu *vcpu) in kvm_mips_trans_cache_index() argument
41 (vcpu, (unsigned long) opc)); in kvm_mips_trans_cache_index()
53 struct kvm_vcpu *vcpu) in kvm_mips_trans_cache_va() argument
66 (vcpu, (unsigned long) opc)); in kvm_mips_trans_cache_va()
73 int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu) in kvm_mips_trans_mfc0() argument
98 (vcpu, (unsigned long) opc)); in kvm_mips_trans_mfc0()
115 int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu) in kvm_mips_trans_mtc0() argument
133 (vcpu, (unsigned long) opc)); in kvm_mips_trans_mtc0()
Dstats.c70 void kvm_mips_dump_stats(struct kvm_vcpu *vcpu) in kvm_mips_dump_stats() argument
75 kvm_info("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id); in kvm_mips_dump_stats()
78 if (vcpu->arch.cop0->stat[i][j]) in kvm_mips_dump_stats()
80 vcpu->arch.cop0->stat[i][j]); in kvm_mips_dump_stats()
Dcommpage.c27 void kvm_mips_commpage_init(struct kvm_vcpu *vcpu) in kvm_mips_commpage_init() argument
29 struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage; in kvm_mips_commpage_init()
32 vcpu->arch.cop0 = &page->cop0; in kvm_mips_commpage_init()
Dtrace.h24 TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
25 TP_ARGS(vcpu, reason),
32 __entry->pc = vcpu->arch.pc;
Dcommpage.h22 extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu);
/linux-4.1.27/virt/kvm/
Dasync_pf.c31 static inline void kvm_async_page_present_sync(struct kvm_vcpu *vcpu, in kvm_async_page_present_sync() argument
35 kvm_arch_async_page_present(vcpu, work); in kvm_async_page_present_sync()
38 static inline void kvm_async_page_present_async(struct kvm_vcpu *vcpu, in kvm_async_page_present_async() argument
42 kvm_arch_async_page_present(vcpu, work); in kvm_async_page_present_async()
65 void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu) in kvm_async_pf_vcpu_init() argument
67 INIT_LIST_HEAD(&vcpu->async_pf.done); in kvm_async_pf_vcpu_init()
68 INIT_LIST_HEAD(&vcpu->async_pf.queue); in kvm_async_pf_vcpu_init()
69 spin_lock_init(&vcpu->async_pf.lock); in kvm_async_pf_vcpu_init()
77 struct kvm_vcpu *vcpu = apf->vcpu; in async_pf_execute() local
84 kvm_async_page_present_sync(vcpu, apf); in async_pf_execute()
[all …]
Dkvm_main.c125 int vcpu_load(struct kvm_vcpu *vcpu) in vcpu_load() argument
129 if (mutex_lock_killable(&vcpu->mutex)) in vcpu_load()
132 preempt_notifier_register(&vcpu->preempt_notifier); in vcpu_load()
133 kvm_arch_vcpu_load(vcpu, cpu); in vcpu_load()
138 void vcpu_put(struct kvm_vcpu *vcpu) in vcpu_put() argument
141 kvm_arch_vcpu_put(vcpu); in vcpu_put()
142 preempt_notifier_unregister(&vcpu->preempt_notifier); in vcpu_put()
144 mutex_unlock(&vcpu->mutex); in vcpu_put()
156 struct kvm_vcpu *vcpu; in kvm_make_all_cpus_request() local
161 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_make_all_cpus_request()
[all …]
Dasync_pf.h29 void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu);
Dcoalesced_mmio.c63 static int coalesced_mmio_write(struct kvm_vcpu *vcpu, in coalesced_mmio_write() argument
Deventfd.c718 ioeventfd_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr, in ioeventfd_write() argument
/linux-4.1.27/arch/x86/include/asm/
Dkvm_host.h263 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
264 unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
265 u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
266 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err,
268 void (*inject_page_fault)(struct kvm_vcpu *vcpu,
270 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
272 gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
274 int (*sync_page)(struct kvm_vcpu *vcpu,
276 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
277 void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
[all …]
Dpvclock.h13 struct pvclock_vcpu_time_info *vcpu,
/linux-4.1.27/virt/kvm/arm/
Dvgic.c83 static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
84 static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
85 static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
86 static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
91 static void add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source) in add_sgi_source() argument
93 vcpu->kvm->arch.vgic.vm_ops.add_sgi_source(vcpu, irq, source); in add_sgi_source()
96 static bool queue_sgi(struct kvm_vcpu *vcpu, int irq) in queue_sgi() argument
98 return vcpu->kvm->arch.vgic.vm_ops.queue_sgi(vcpu, irq); in queue_sgi()
246 static bool vgic_irq_is_edge(struct kvm_vcpu *vcpu, int irq) in vgic_irq_is_edge() argument
248 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_irq_is_edge()
[all …]
Darch_timer.c62 static void kvm_timer_inject_irq(struct kvm_vcpu *vcpu) in kvm_timer_inject_irq() argument
65 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; in kvm_timer_inject_irq()
68 ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, in kvm_timer_inject_irq()
76 struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id; in kvm_arch_timer_handler() local
84 pr_warn("Unexpected interrupt %d on vcpu %p\n", irq, vcpu); in kvm_arch_timer_handler()
94 struct kvm_vcpu *vcpu; in kvm_timer_inject_irq_work() local
96 vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired); in kvm_timer_inject_irq_work()
97 vcpu->arch.timer_cpu.armed = false; in kvm_timer_inject_irq_work()
103 kvm_vcpu_kick(vcpu); in kvm_timer_inject_irq_work()
114 bool kvm_timer_should_fire(struct kvm_vcpu *vcpu) in kvm_timer_should_fire() argument
[all …]
Dvgic-v2-emul.c37 static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
43 static bool handle_mmio_misc(struct kvm_vcpu *vcpu, in handle_mmio_misc() argument
51 reg = vcpu->kvm->arch.vgic.enabled; in handle_mmio_misc()
55 vcpu->kvm->arch.vgic.enabled = reg & 1; in handle_mmio_misc()
56 vgic_update_state(vcpu->kvm); in handle_mmio_misc()
62 reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5; in handle_mmio_misc()
63 reg |= (vcpu->kvm->arch.vgic.nr_irqs >> 5) - 1; in handle_mmio_misc()
78 static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu, in handle_mmio_set_enable_reg() argument
82 return vgic_handle_enable_reg(vcpu->kvm, mmio, offset, in handle_mmio_set_enable_reg()
83 vcpu->vcpu_id, ACCESS_WRITE_SETBIT); in handle_mmio_set_enable_reg()
[all …]
Dvgic-v3.c47 static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr) in vgic_v3_get_lr() argument
50 u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)]; in vgic_v3_get_lr()
52 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) in vgic_v3_get_lr()
59 vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) in vgic_v3_get_lr()
74 static void vgic_v3_set_lr(struct kvm_vcpu *vcpu, int lr, in vgic_v3_set_lr() argument
87 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) in vgic_v3_set_lr()
99 vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)] = lr_val; in vgic_v3_set_lr()
102 static void vgic_v3_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr, in vgic_v3_sync_lr_elrsr() argument
106 vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr); in vgic_v3_sync_lr_elrsr()
108 vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr &= ~(1U << lr); in vgic_v3_sync_lr_elrsr()
[all …]
Dvgic-v2.c33 static struct vgic_lr vgic_v2_get_lr(const struct kvm_vcpu *vcpu, int lr) in vgic_v2_get_lr() argument
36 u32 val = vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr]; in vgic_v2_get_lr()
55 static void vgic_v2_set_lr(struct kvm_vcpu *vcpu, int lr, in vgic_v2_set_lr() argument
67 vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = lr_val; in vgic_v2_set_lr()
70 static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr, in vgic_v2_sync_lr_elrsr() argument
74 vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL << lr); in vgic_v2_sync_lr_elrsr()
76 vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr &= ~(1ULL << lr); in vgic_v2_sync_lr_elrsr()
79 static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu) in vgic_v2_get_elrsr() argument
81 return vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr; in vgic_v2_get_elrsr()
84 static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu) in vgic_v2_get_eisr() argument
[all …]
Dvgic-v3-emul.c52 static bool handle_mmio_rao_wi(struct kvm_vcpu *vcpu, in handle_mmio_rao_wi() argument
63 static bool handle_mmio_ctlr(struct kvm_vcpu *vcpu, in handle_mmio_ctlr() argument
72 if (vcpu->kvm->arch.vgic.enabled) in handle_mmio_ctlr()
81 vcpu->kvm->arch.vgic.enabled = !!(reg & GICD_CTLR_ENABLE_SS_G1); in handle_mmio_ctlr()
82 vgic_update_state(vcpu->kvm); in handle_mmio_ctlr()
95 static bool handle_mmio_typer(struct kvm_vcpu *vcpu, in handle_mmio_typer() argument
100 reg = (min(vcpu->kvm->arch.vgic.nr_irqs, 1024) >> 5) - 1; in handle_mmio_typer()
110 static bool handle_mmio_iidr(struct kvm_vcpu *vcpu, in handle_mmio_iidr() argument
122 static bool handle_mmio_set_enable_reg_dist(struct kvm_vcpu *vcpu, in handle_mmio_set_enable_reg_dist() argument
127 return vgic_handle_enable_reg(vcpu->kvm, mmio, offset, in handle_mmio_set_enable_reg_dist()
[all …]
Dvgic.h50 void vgic_dist_irq_set_pending(struct kvm_vcpu *vcpu, int irq);
51 void vgic_dist_irq_clear_pending(struct kvm_vcpu *vcpu, int irq);
52 void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq);
56 void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
57 void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
59 bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq);
60 void vgic_unqueue_irqs(struct kvm_vcpu *vcpu);
72 bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
91 bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
/linux-4.1.27/arch/mips/include/asm/
Dkvm_host.h70 #define KVM_GUEST_KERNEL_MODE(vcpu) ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ER… argument
71 ((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0))
574 static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu) in kvm_mips_guest_can_have_fpu() argument
577 vcpu->fpu_enabled; in kvm_mips_guest_can_have_fpu()
580 static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu) in kvm_mips_guest_has_fpu() argument
582 return kvm_mips_guest_can_have_fpu(vcpu) && in kvm_mips_guest_has_fpu()
583 kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP; in kvm_mips_guest_has_fpu()
586 static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu) in kvm_mips_guest_can_have_msa() argument
589 vcpu->msa_enabled; in kvm_mips_guest_can_have_msa()
592 static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu) in kvm_mips_guest_has_msa() argument
[all …]
/linux-4.1.27/include/linux/
Dkvm_host.h168 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
170 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
172 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
184 struct kvm_vcpu *vcpu; member
192 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
193 void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
194 int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
196 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
273 static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) in kvm_vcpu_exiting_guest_mode() argument
275 return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE); in kvm_vcpu_exiting_guest_mode()
[all …]
/linux-4.1.27/arch/x86/xen/
Dirq.c28 struct vcpu_info *vcpu; in xen_save_fl() local
31 vcpu = this_cpu_read(xen_vcpu); in xen_save_fl()
34 flags = !vcpu->evtchn_upcall_mask; in xen_save_fl()
46 struct vcpu_info *vcpu; in xen_restore_fl() local
53 vcpu = this_cpu_read(xen_vcpu); in xen_restore_fl()
54 vcpu->evtchn_upcall_mask = flags; in xen_restore_fl()
58 if (unlikely(vcpu->evtchn_upcall_pending)) in xen_restore_fl()
79 struct vcpu_info *vcpu; in xen_irq_enable() local
88 vcpu = this_cpu_read(xen_vcpu); in xen_irq_enable()
89 vcpu->evtchn_upcall_mask = 0; in xen_irq_enable()
[all …]
Dxen-ops.h70 bool xen_vcpu_stolen(int vcpu);
Dtime.c99 bool xen_vcpu_stolen(int vcpu) in xen_vcpu_stolen() argument
101 return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable; in xen_vcpu_stolen()
/linux-4.1.27/include/kvm/
Darm_vgic.h116 u64 (*get_elrsr)(const struct kvm_vcpu *vcpu);
117 u64 (*get_eisr)(const struct kvm_vcpu *vcpu);
118 void (*clear_eisr)(struct kvm_vcpu *vcpu);
119 u32 (*get_interrupt_status)(const struct kvm_vcpu *vcpu);
120 void (*enable_underflow)(struct kvm_vcpu *vcpu);
121 void (*disable_underflow)(struct kvm_vcpu *vcpu);
122 void (*get_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
123 void (*set_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
124 void (*enable)(struct kvm_vcpu *vcpu);
322 void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
[all …]
Darm_arch_timer.h60 void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
62 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
63 void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu);
64 void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu);
65 void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu);
70 bool kvm_timer_should_fire(struct kvm_vcpu *vcpu);
Diodev.h30 int (*read)(struct kvm_vcpu *vcpu,
35 int (*write)(struct kvm_vcpu *vcpu,
54 static inline int kvm_iodevice_read(struct kvm_vcpu *vcpu, in kvm_iodevice_read() argument
58 return dev->ops->read ? dev->ops->read(vcpu, dev, addr, l, v) in kvm_iodevice_read()
62 static inline int kvm_iodevice_write(struct kvm_vcpu *vcpu, in kvm_iodevice_write() argument
66 return dev->ops->write ? dev->ops->write(vcpu, dev, addr, l, v) in kvm_iodevice_write()
/linux-4.1.27/tools/perf/
Dbuiltin-kvm.c143 event->vcpu[j].time = 0; in clear_events_cache_stats()
144 init_stats(&event->vcpu[j].stats); in clear_events_cache_stats()
167 prev = event->vcpu; in kvm_event_expand()
168 event->vcpu = realloc(event->vcpu, in kvm_event_expand()
169 event->max_vcpu * sizeof(*event->vcpu)); in kvm_event_expand()
170 if (!event->vcpu) { in kvm_event_expand()
176 memset(event->vcpu + old_max_vcpu, 0, in kvm_event_expand()
177 (event->max_vcpu - old_max_vcpu) * sizeof(*event->vcpu)); in kvm_event_expand()
244 kvm_stats = &event->vcpu[vcpu_id]; in kvm_event_rel_stddev()
261 kvm_update_event_stats(&event->vcpu[vcpu_id], time_diff); in update_kvm_event()
[all …]
/linux-4.1.27/arch/powerpc/perf/
Dhv-24x7-domains.h25 DOMAIN(VCPU_HOME_CORE, 0x03, vcpu, false)
26 DOMAIN(VCPU_HOME_CHIP, 0x04, vcpu, false)
27 DOMAIN(VCPU_HOME_NODE, 0x05, vcpu, false)
28 DOMAIN(VCPU_REMOTE_NODE, 0x06, vcpu, false)
Dhv-24x7.c104 EVENT_DEFINE_RANGE_FORMAT(vcpu, config, 16, 31);
/linux-4.1.27/arch/s390/include/asm/
Dkvm_host.h476 #define guestdbg_enabled(vcpu) \ argument
477 (vcpu->guest_debug & KVM_GUESTDBG_ENABLE)
478 #define guestdbg_sstep_enabled(vcpu) \ argument
479 (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
480 #define guestdbg_hw_bp_enabled(vcpu) \ argument
481 (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
482 #define guestdbg_exit_pending(vcpu) (guestdbg_enabled(vcpu) && \ argument
483 (vcpu->guest_debug & KVM_GUESTDBG_EXIT_PENDING))
615 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
617 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
[all …]
/linux-4.1.27/include/xen/interface/
Devent_channel.h63 uint32_t vcpu; member
92 uint32_t vcpu; member
139 uint32_t vcpu; /* VCPU to which this channel is bound. */ member
167 uint32_t vcpu; member
201 uint32_t vcpu; member
/linux-4.1.27/Documentation/virtual/kvm/
Dapi.txt21 - vcpu ioctls: These query and set attributes that control the operation
24 Only run vcpu ioctls from the same thread that was used to create the
25 vcpu.
36 and return a file descriptor pointing to it. Finally, ioctls on a vcpu
37 fd can be used to control the vcpu, including the important task of
45 and one vcpu per thread.
81 Type: system, vm, or vcpu.
176 Returns: size of vcpu mmap area, in bytes
199 Parameters: vcpu id (apic id on x86)
200 Returns: vcpu fd on success, -1 on error
[all …]
Dhypercalls.txt75 Purpose: Hypercall used to wakeup a vcpu from HLT state
76 Usage example : A vcpu of a paravirtualized guest that is busywaiting in guest
80 the vcpu to sleep until occurrence of an appropriate event. Another vcpu of the
81 same guest can wakeup the sleeping vcpu by issuing KVM_HC_KICK_CPU hypercall,
82 specifying APIC ID (a1) of the vcpu to be woken up. An additional argument (a0)
Dmsr.txt120 | | guest vcpu has been paused by
170 when asynchronous page faults are enabled on the vcpu 0 when
172 when vcpu is in cpl == 0.
191 kind of token 0xffffffff which tells vcpu that it should wake
198 Currently type 2 APF will be always delivered on the same vcpu as
232 nanoseconds. Time during which the vcpu is idle, will not be
236 data: Bit 0 is 1 when PV end of interrupt is enabled on the vcpu; 0
Dlocking.txt56 mark_page_dirty(vcpu->kvm, gfn1)
167 The srcu index can be stored in kvm_vcpu->srcu_idx per vcpu
Dmmu.txt17 - scaling: need to scale to large memory and large vcpu guests
288 - cache the information to vcpu->arch.mmio_gva, vcpu->arch.access and
289 vcpu->arch.mmio_gfn, and call the emulator
298 vcpu->arch.mmio_gva, vcpu->arch.access and vcpu->arch.mmio_gfn
/linux-4.1.27/Documentation/virtual/kvm/devices/
Dvm.txt17 Returns: -EBUSY if a vcpu is already defined, otherwise 0
33 -EBUSY if a vcpu is already defined
65 Allows user space to retrieve or request to change cpu related information for a vcpu:
68 __u64 cpuid; # CPUID currently (to be) used by this vcpu
69 __u16 ibc; # IBC level currently (to be) used by this vcpu
72 # by this vcpu
Dxics.txt16 capability for each vcpu, specifying KVM_CAP_IRQ_XICS in args[0] and
17 the interrupt server number (i.e. the vcpu number from the XICS's
20 KVM_GET_ONE_REG and KVM_SET_ONE_REG ioctls on the vcpu. The 64 bit
47 interrupt server number specified for the destination vcpu.
Dmpic.txt10 vcpu's interrupt inputs.
/linux-4.1.27/tools/perf/Documentation/
Dperf-kvm.txt102 --vcpu=<value>::
103 analyze events which occur on this vcpu. (default: all vcpus)
136 --vcpu=<value>::
137 analyze events which occur on this vcpu. (default: all vcpus)
/linux-4.1.27/tools/perf/util/
Dkvm-stat.h33 struct kvm_event_stats *vcpu; member
/linux-4.1.27/Documentation/s390/
Dkvm.txt10 kvm-vm file descriptors, and the kvm-vcpu file descriptors that differ from x86.
73 3. ioctl calls to the kvm-vcpu file descriptor
85 kvm-vcpu file descriptor are supported:
/linux-4.1.27/drivers/xen/events/
Devents_base.c897 bind_ipi.vcpu = cpu; in bind_ipi_to_irq()
950 if (status.u.virq == virq && status.vcpu == cpu) { in find_virq()
993 bind_virq.vcpu = cpu; in bind_virq_to_irq()
1324 bind_vcpu.vcpu = tcpu; in rebind_irq_to_cpu()
1464 bind_virq.vcpu = cpu; in restore_cpu_virqs()
1488 bind_ipi.vcpu = cpu; in restore_cpu_ipis()
Devents_fifo.c116 init_control.vcpu = cpu; in init_control_block()
/linux-4.1.27/drivers/iommu/
Dfsl_pamu.h405 u32 get_stash_id(u32 stash_dest_hint, u32 vcpu);
Dfsl_pamu.c519 u32 get_stash_id(u32 stash_dest_hint, u32 vcpu) in get_stash_id() argument
547 if (be32_to_cpup(&prop[i]) == vcpu) { in get_stash_id()
587 stash_dest_hint, vcpu); in get_stash_id()
/linux-4.1.27/drivers/xen/
Devtchn.c357 bind_virq.vcpu = 0; in evtchn_ioctl()
/linux-4.1.27/arch/arm/boot/dts/
Dstih416-clock.dtsi654 "clk-m-fvdp-vcpu",
/linux-4.1.27/Documentation/RCU/
DRTFP.txt2436 ,Title="[{PATCH} 37/40] {KVM}: Bump maximum vcpu count to 64"