Lines Matching refs:vcpu

61 int kvm_update_cpuid(struct kvm_vcpu *vcpu)  in kvm_update_cpuid()  argument
64 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_update_cpuid()
66 best = kvm_find_cpuid_entry(vcpu, 1, 0); in kvm_update_cpuid()
73 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) in kvm_update_cpuid()
84 best = kvm_find_cpuid_entry(vcpu, 0xD, 0); in kvm_update_cpuid()
86 vcpu->arch.guest_supported_xcr0 = 0; in kvm_update_cpuid()
87 vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; in kvm_update_cpuid()
89 vcpu->arch.guest_supported_xcr0 = in kvm_update_cpuid()
92 vcpu->arch.guest_xstate_size = best->ebx = in kvm_update_cpuid()
93 xstate_required_size(vcpu->arch.xcr0, false); in kvm_update_cpuid()
96 best = kvm_find_cpuid_entry(vcpu, 0xD, 1); in kvm_update_cpuid()
98 best->ebx = xstate_required_size(vcpu->arch.xcr0, true); in kvm_update_cpuid()
100 vcpu->arch.eager_fpu = guest_cpuid_has_mpx(vcpu); in kvm_update_cpuid()
106 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0); in kvm_update_cpuid()
112 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); in kvm_update_cpuid()
114 kvm_pmu_cpuid_update(vcpu); in kvm_update_cpuid()
126 static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu) in cpuid_fix_nx_cap() argument
132 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) { in cpuid_fix_nx_cap()
133 e = &vcpu->arch.cpuid_entries[i]; in cpuid_fix_nx_cap()
145 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu) in cpuid_query_maxphyaddr() argument
149 best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0); in cpuid_query_maxphyaddr()
152 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0); in cpuid_query_maxphyaddr()
161 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_set_cpuid() argument
180 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function; in kvm_vcpu_ioctl_set_cpuid()
181 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax; in kvm_vcpu_ioctl_set_cpuid()
182 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx; in kvm_vcpu_ioctl_set_cpuid()
183 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx; in kvm_vcpu_ioctl_set_cpuid()
184 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx; in kvm_vcpu_ioctl_set_cpuid()
185 vcpu->arch.cpuid_entries[i].index = 0; in kvm_vcpu_ioctl_set_cpuid()
186 vcpu->arch.cpuid_entries[i].flags = 0; in kvm_vcpu_ioctl_set_cpuid()
187 vcpu->arch.cpuid_entries[i].padding[0] = 0; in kvm_vcpu_ioctl_set_cpuid()
188 vcpu->arch.cpuid_entries[i].padding[1] = 0; in kvm_vcpu_ioctl_set_cpuid()
189 vcpu->arch.cpuid_entries[i].padding[2] = 0; in kvm_vcpu_ioctl_set_cpuid()
191 vcpu->arch.cpuid_nent = cpuid->nent; in kvm_vcpu_ioctl_set_cpuid()
192 cpuid_fix_nx_cap(vcpu); in kvm_vcpu_ioctl_set_cpuid()
193 kvm_apic_set_version(vcpu); in kvm_vcpu_ioctl_set_cpuid()
194 kvm_x86_ops->cpuid_update(vcpu); in kvm_vcpu_ioctl_set_cpuid()
195 r = kvm_update_cpuid(vcpu); in kvm_vcpu_ioctl_set_cpuid()
203 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_set_cpuid2() argument
213 if (copy_from_user(&vcpu->arch.cpuid_entries, entries, in kvm_vcpu_ioctl_set_cpuid2()
216 vcpu->arch.cpuid_nent = cpuid->nent; in kvm_vcpu_ioctl_set_cpuid2()
217 kvm_apic_set_version(vcpu); in kvm_vcpu_ioctl_set_cpuid2()
218 kvm_x86_ops->cpuid_update(vcpu); in kvm_vcpu_ioctl_set_cpuid2()
219 r = kvm_update_cpuid(vcpu); in kvm_vcpu_ioctl_set_cpuid2()
224 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_get_cpuid2() argument
231 if (cpuid->nent < vcpu->arch.cpuid_nent) in kvm_vcpu_ioctl_get_cpuid2()
234 if (copy_to_user(entries, &vcpu->arch.cpuid_entries, in kvm_vcpu_ioctl_get_cpuid2()
235 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2))) in kvm_vcpu_ioctl_get_cpuid2()
240 cpuid->nent = vcpu->arch.cpuid_nent; in kvm_vcpu_ioctl_get_cpuid2()
729 static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i) in move_to_next_stateful_cpuid_entry() argument
731 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i]; in move_to_next_stateful_cpuid_entry()
732 int j, nent = vcpu->arch.cpuid_nent; in move_to_next_stateful_cpuid_entry()
737 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j]; in move_to_next_stateful_cpuid_entry()
761 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, in kvm_find_cpuid_entry() argument
767 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) { in kvm_find_cpuid_entry()
770 e = &vcpu->arch.cpuid_entries[i]; in kvm_find_cpuid_entry()
773 move_to_next_stateful_cpuid_entry(vcpu, i); in kvm_find_cpuid_entry()
787 static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu, in check_cpuid_limit() argument
792 maxlevel = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0); in check_cpuid_limit()
796 maxlevel = kvm_find_cpuid_entry(vcpu, 0, 0); in check_cpuid_limit()
800 return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index); in check_cpuid_limit()
803 void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx) in kvm_cpuid() argument
808 best = kvm_find_cpuid_entry(vcpu, function, index); in kvm_cpuid()
811 best = check_cpuid_limit(vcpu, function, index); in kvm_cpuid()
816 if (is_guest_mode(vcpu) && function == 0xa) in kvm_cpuid()
830 void kvm_emulate_cpuid(struct kvm_vcpu *vcpu) in kvm_emulate_cpuid() argument
834 function = eax = kvm_register_read(vcpu, VCPU_REGS_RAX); in kvm_emulate_cpuid()
835 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX); in kvm_emulate_cpuid()
836 kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx); in kvm_emulate_cpuid()
837 kvm_register_write(vcpu, VCPU_REGS_RAX, eax); in kvm_emulate_cpuid()
838 kvm_register_write(vcpu, VCPU_REGS_RBX, ebx); in kvm_emulate_cpuid()
839 kvm_register_write(vcpu, VCPU_REGS_RCX, ecx); in kvm_emulate_cpuid()
840 kvm_register_write(vcpu, VCPU_REGS_RDX, edx); in kvm_emulate_cpuid()
841 kvm_x86_ops->skip_emulated_instruction(vcpu); in kvm_emulate_cpuid()