1#ifndef ARCH_X86_KVM_CPUID_H
2#define ARCH_X86_KVM_CPUID_H
3
4#include "x86.h"
5
6int kvm_update_cpuid(struct kvm_vcpu *vcpu);
7struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
8					      u32 function, u32 index);
9int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
10			    struct kvm_cpuid_entry2 __user *entries,
11			    unsigned int type);
12int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
13			     struct kvm_cpuid *cpuid,
14			     struct kvm_cpuid_entry __user *entries);
15int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
16			      struct kvm_cpuid2 *cpuid,
17			      struct kvm_cpuid_entry2 __user *entries);
18int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
19			      struct kvm_cpuid2 *cpuid,
20			      struct kvm_cpuid_entry2 __user *entries);
21void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
22
23int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
24
25static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
26{
27	return vcpu->arch.maxphyaddr;
28}
29
30static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
31{
32	struct kvm_cpuid_entry2 *best;
33
34	if (!static_cpu_has(X86_FEATURE_XSAVE))
35		return false;
36
37	best = kvm_find_cpuid_entry(vcpu, 1, 0);
38	return best && (best->ecx & bit(X86_FEATURE_XSAVE));
39}
40
41static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu)
42{
43	struct kvm_cpuid_entry2 *best;
44
45	best = kvm_find_cpuid_entry(vcpu, 7, 0);
46	return best && (best->ebx & bit(X86_FEATURE_TSC_ADJUST));
47}
48
49static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu)
50{
51	struct kvm_cpuid_entry2 *best;
52
53	best = kvm_find_cpuid_entry(vcpu, 7, 0);
54	return best && (best->ebx & bit(X86_FEATURE_SMEP));
55}
56
57static inline bool guest_cpuid_has_smap(struct kvm_vcpu *vcpu)
58{
59	struct kvm_cpuid_entry2 *best;
60
61	best = kvm_find_cpuid_entry(vcpu, 7, 0);
62	return best && (best->ebx & bit(X86_FEATURE_SMAP));
63}
64
65static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
66{
67	struct kvm_cpuid_entry2 *best;
68
69	best = kvm_find_cpuid_entry(vcpu, 7, 0);
70	return best && (best->ebx & bit(X86_FEATURE_FSGSBASE));
71}
72
73static inline bool guest_cpuid_has_osvw(struct kvm_vcpu *vcpu)
74{
75	struct kvm_cpuid_entry2 *best;
76
77	best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
78	return best && (best->ecx & bit(X86_FEATURE_OSVW));
79}
80
81static inline bool guest_cpuid_has_pcid(struct kvm_vcpu *vcpu)
82{
83	struct kvm_cpuid_entry2 *best;
84
85	best = kvm_find_cpuid_entry(vcpu, 1, 0);
86	return best && (best->ecx & bit(X86_FEATURE_PCID));
87}
88
89static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu *vcpu)
90{
91	struct kvm_cpuid_entry2 *best;
92
93	best = kvm_find_cpuid_entry(vcpu, 1, 0);
94	return best && (best->ecx & bit(X86_FEATURE_X2APIC));
95}
96
97static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
98{
99	struct kvm_cpuid_entry2 *best;
100
101	best = kvm_find_cpuid_entry(vcpu, 0, 0);
102	return best && best->ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx;
103}
104
105static inline bool guest_cpuid_has_gbpages(struct kvm_vcpu *vcpu)
106{
107	struct kvm_cpuid_entry2 *best;
108
109	best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
110	return best && (best->edx & bit(X86_FEATURE_GBPAGES));
111}
112
113static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
114{
115	struct kvm_cpuid_entry2 *best;
116
117	best = kvm_find_cpuid_entry(vcpu, 7, 0);
118	return best && (best->ebx & bit(X86_FEATURE_RTM));
119}
120
121static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
122{
123	struct kvm_cpuid_entry2 *best;
124
125	best = kvm_find_cpuid_entry(vcpu, 7, 0);
126	return best && (best->ebx & bit(X86_FEATURE_MPX));
127}
128#endif
129