Lines Matching refs:vcpu

61 int kvm_update_cpuid(struct kvm_vcpu *vcpu)  in kvm_update_cpuid()  argument
64 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_update_cpuid()
66 best = kvm_find_cpuid_entry(vcpu, 1, 0); in kvm_update_cpuid()
73 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) in kvm_update_cpuid()
84 best = kvm_find_cpuid_entry(vcpu, 0xD, 0); in kvm_update_cpuid()
86 vcpu->arch.guest_supported_xcr0 = 0; in kvm_update_cpuid()
87 vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; in kvm_update_cpuid()
89 vcpu->arch.guest_supported_xcr0 = in kvm_update_cpuid()
92 vcpu->arch.guest_xstate_size = best->ebx = in kvm_update_cpuid()
93 xstate_required_size(vcpu->arch.xcr0, false); in kvm_update_cpuid()
96 best = kvm_find_cpuid_entry(vcpu, 0xD, 1); in kvm_update_cpuid()
98 best->ebx = xstate_required_size(vcpu->arch.xcr0, true); in kvm_update_cpuid()
100 vcpu->arch.eager_fpu = use_eager_fpu() || guest_cpuid_has_mpx(vcpu); in kvm_update_cpuid()
101 if (vcpu->arch.eager_fpu) in kvm_update_cpuid()
102 kvm_x86_ops->fpu_activate(vcpu); in kvm_update_cpuid()
108 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0); in kvm_update_cpuid()
114 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); in kvm_update_cpuid()
116 kvm_pmu_refresh(vcpu); in kvm_update_cpuid()
128 static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu) in cpuid_fix_nx_cap() argument
134 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) { in cpuid_fix_nx_cap()
135 e = &vcpu->arch.cpuid_entries[i]; in cpuid_fix_nx_cap()
147 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu) in cpuid_query_maxphyaddr() argument
151 best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0); in cpuid_query_maxphyaddr()
154 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0); in cpuid_query_maxphyaddr()
163 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_set_cpuid() argument
182 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function; in kvm_vcpu_ioctl_set_cpuid()
183 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax; in kvm_vcpu_ioctl_set_cpuid()
184 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx; in kvm_vcpu_ioctl_set_cpuid()
185 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx; in kvm_vcpu_ioctl_set_cpuid()
186 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx; in kvm_vcpu_ioctl_set_cpuid()
187 vcpu->arch.cpuid_entries[i].index = 0; in kvm_vcpu_ioctl_set_cpuid()
188 vcpu->arch.cpuid_entries[i].flags = 0; in kvm_vcpu_ioctl_set_cpuid()
189 vcpu->arch.cpuid_entries[i].padding[0] = 0; in kvm_vcpu_ioctl_set_cpuid()
190 vcpu->arch.cpuid_entries[i].padding[1] = 0; in kvm_vcpu_ioctl_set_cpuid()
191 vcpu->arch.cpuid_entries[i].padding[2] = 0; in kvm_vcpu_ioctl_set_cpuid()
193 vcpu->arch.cpuid_nent = cpuid->nent; in kvm_vcpu_ioctl_set_cpuid()
194 cpuid_fix_nx_cap(vcpu); in kvm_vcpu_ioctl_set_cpuid()
195 kvm_apic_set_version(vcpu); in kvm_vcpu_ioctl_set_cpuid()
196 kvm_x86_ops->cpuid_update(vcpu); in kvm_vcpu_ioctl_set_cpuid()
197 r = kvm_update_cpuid(vcpu); in kvm_vcpu_ioctl_set_cpuid()
205 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_set_cpuid2() argument
215 if (copy_from_user(&vcpu->arch.cpuid_entries, entries, in kvm_vcpu_ioctl_set_cpuid2()
218 vcpu->arch.cpuid_nent = cpuid->nent; in kvm_vcpu_ioctl_set_cpuid2()
219 kvm_apic_set_version(vcpu); in kvm_vcpu_ioctl_set_cpuid2()
220 kvm_x86_ops->cpuid_update(vcpu); in kvm_vcpu_ioctl_set_cpuid2()
221 r = kvm_update_cpuid(vcpu); in kvm_vcpu_ioctl_set_cpuid2()
226 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_get_cpuid2() argument
233 if (cpuid->nent < vcpu->arch.cpuid_nent) in kvm_vcpu_ioctl_get_cpuid2()
236 if (copy_to_user(entries, &vcpu->arch.cpuid_entries, in kvm_vcpu_ioctl_get_cpuid2()
237 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2))) in kvm_vcpu_ioctl_get_cpuid2()
242 cpuid->nent = vcpu->arch.cpuid_nent; in kvm_vcpu_ioctl_get_cpuid2()
737 static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i) in move_to_next_stateful_cpuid_entry() argument
739 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i]; in move_to_next_stateful_cpuid_entry()
740 int j, nent = vcpu->arch.cpuid_nent; in move_to_next_stateful_cpuid_entry()
745 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j]; in move_to_next_stateful_cpuid_entry()
769 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, in kvm_find_cpuid_entry() argument
775 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) { in kvm_find_cpuid_entry()
778 e = &vcpu->arch.cpuid_entries[i]; in kvm_find_cpuid_entry()
781 move_to_next_stateful_cpuid_entry(vcpu, i); in kvm_find_cpuid_entry()
795 static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu, in check_cpuid_limit() argument
800 maxlevel = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0); in check_cpuid_limit()
804 maxlevel = kvm_find_cpuid_entry(vcpu, 0, 0); in check_cpuid_limit()
808 return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index); in check_cpuid_limit()
811 void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx) in kvm_cpuid() argument
816 best = kvm_find_cpuid_entry(vcpu, function, index); in kvm_cpuid()
819 best = check_cpuid_limit(vcpu, function, index); in kvm_cpuid()
824 if (is_guest_mode(vcpu) && function == 0xa) in kvm_cpuid()
838 void kvm_emulate_cpuid(struct kvm_vcpu *vcpu) in kvm_emulate_cpuid() argument
842 function = eax = kvm_register_read(vcpu, VCPU_REGS_RAX); in kvm_emulate_cpuid()
843 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX); in kvm_emulate_cpuid()
844 kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx); in kvm_emulate_cpuid()
845 kvm_register_write(vcpu, VCPU_REGS_RAX, eax); in kvm_emulate_cpuid()
846 kvm_register_write(vcpu, VCPU_REGS_RBX, ebx); in kvm_emulate_cpuid()
847 kvm_register_write(vcpu, VCPU_REGS_RCX, ecx); in kvm_emulate_cpuid()
848 kvm_register_write(vcpu, VCPU_REGS_RDX, edx); in kvm_emulate_cpuid()
849 kvm_x86_ops->skip_emulated_instruction(vcpu); in kvm_emulate_cpuid()