1 #ifndef ARCH_X86_KVM_CPUID_H
2 #define ARCH_X86_KVM_CPUID_H
3
4 #include "x86.h"
5
6 int kvm_update_cpuid(struct kvm_vcpu *vcpu);
7 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
8 u32 function, u32 index);
9 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
10 struct kvm_cpuid_entry2 __user *entries,
11 unsigned int type);
12 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
13 struct kvm_cpuid *cpuid,
14 struct kvm_cpuid_entry __user *entries);
15 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
16 struct kvm_cpuid2 *cpuid,
17 struct kvm_cpuid_entry2 __user *entries);
18 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
19 struct kvm_cpuid2 *cpuid,
20 struct kvm_cpuid_entry2 __user *entries);
21 void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
22
23 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
24
cpuid_maxphyaddr(struct kvm_vcpu * vcpu)25 static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
26 {
27 return vcpu->arch.maxphyaddr;
28 }
29
guest_cpuid_has_xsave(struct kvm_vcpu * vcpu)30 static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
31 {
32 struct kvm_cpuid_entry2 *best;
33
34 if (!static_cpu_has(X86_FEATURE_XSAVE))
35 return false;
36
37 best = kvm_find_cpuid_entry(vcpu, 1, 0);
38 return best && (best->ecx & bit(X86_FEATURE_XSAVE));
39 }
40
guest_cpuid_has_mtrr(struct kvm_vcpu * vcpu)41 static inline bool guest_cpuid_has_mtrr(struct kvm_vcpu *vcpu)
42 {
43 struct kvm_cpuid_entry2 *best;
44
45 best = kvm_find_cpuid_entry(vcpu, 1, 0);
46 return best && (best->edx & bit(X86_FEATURE_MTRR));
47 }
48
guest_cpuid_has_tsc_adjust(struct kvm_vcpu * vcpu)49 static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu)
50 {
51 struct kvm_cpuid_entry2 *best;
52
53 best = kvm_find_cpuid_entry(vcpu, 7, 0);
54 return best && (best->ebx & bit(X86_FEATURE_TSC_ADJUST));
55 }
56
guest_cpuid_has_smep(struct kvm_vcpu * vcpu)57 static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu)
58 {
59 struct kvm_cpuid_entry2 *best;
60
61 best = kvm_find_cpuid_entry(vcpu, 7, 0);
62 return best && (best->ebx & bit(X86_FEATURE_SMEP));
63 }
64
guest_cpuid_has_smap(struct kvm_vcpu * vcpu)65 static inline bool guest_cpuid_has_smap(struct kvm_vcpu *vcpu)
66 {
67 struct kvm_cpuid_entry2 *best;
68
69 best = kvm_find_cpuid_entry(vcpu, 7, 0);
70 return best && (best->ebx & bit(X86_FEATURE_SMAP));
71 }
72
guest_cpuid_has_fsgsbase(struct kvm_vcpu * vcpu)73 static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
74 {
75 struct kvm_cpuid_entry2 *best;
76
77 best = kvm_find_cpuid_entry(vcpu, 7, 0);
78 return best && (best->ebx & bit(X86_FEATURE_FSGSBASE));
79 }
80
guest_cpuid_has_longmode(struct kvm_vcpu * vcpu)81 static inline bool guest_cpuid_has_longmode(struct kvm_vcpu *vcpu)
82 {
83 struct kvm_cpuid_entry2 *best;
84
85 best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
86 return best && (best->edx & bit(X86_FEATURE_LM));
87 }
88
guest_cpuid_has_osvw(struct kvm_vcpu * vcpu)89 static inline bool guest_cpuid_has_osvw(struct kvm_vcpu *vcpu)
90 {
91 struct kvm_cpuid_entry2 *best;
92
93 best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
94 return best && (best->ecx & bit(X86_FEATURE_OSVW));
95 }
96
guest_cpuid_has_pcid(struct kvm_vcpu * vcpu)97 static inline bool guest_cpuid_has_pcid(struct kvm_vcpu *vcpu)
98 {
99 struct kvm_cpuid_entry2 *best;
100
101 best = kvm_find_cpuid_entry(vcpu, 1, 0);
102 return best && (best->ecx & bit(X86_FEATURE_PCID));
103 }
104
guest_cpuid_has_x2apic(struct kvm_vcpu * vcpu)105 static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu *vcpu)
106 {
107 struct kvm_cpuid_entry2 *best;
108
109 best = kvm_find_cpuid_entry(vcpu, 1, 0);
110 return best && (best->ecx & bit(X86_FEATURE_X2APIC));
111 }
112
guest_cpuid_is_amd(struct kvm_vcpu * vcpu)113 static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
114 {
115 struct kvm_cpuid_entry2 *best;
116
117 best = kvm_find_cpuid_entry(vcpu, 0, 0);
118 return best && best->ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx;
119 }
120
guest_cpuid_has_gbpages(struct kvm_vcpu * vcpu)121 static inline bool guest_cpuid_has_gbpages(struct kvm_vcpu *vcpu)
122 {
123 struct kvm_cpuid_entry2 *best;
124
125 best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
126 return best && (best->edx & bit(X86_FEATURE_GBPAGES));
127 }
128
guest_cpuid_has_rtm(struct kvm_vcpu * vcpu)129 static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
130 {
131 struct kvm_cpuid_entry2 *best;
132
133 best = kvm_find_cpuid_entry(vcpu, 7, 0);
134 return best && (best->ebx & bit(X86_FEATURE_RTM));
135 }
136
guest_cpuid_has_mpx(struct kvm_vcpu * vcpu)137 static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
138 {
139 struct kvm_cpuid_entry2 *best;
140
141 best = kvm_find_cpuid_entry(vcpu, 7, 0);
142 return best && (best->ebx & bit(X86_FEATURE_MPX));
143 }
144
guest_cpuid_has_pcommit(struct kvm_vcpu * vcpu)145 static inline bool guest_cpuid_has_pcommit(struct kvm_vcpu *vcpu)
146 {
147 struct kvm_cpuid_entry2 *best;
148
149 best = kvm_find_cpuid_entry(vcpu, 7, 0);
150 return best && (best->ebx & bit(X86_FEATURE_PCOMMIT));
151 }
152
guest_cpuid_has_rdtscp(struct kvm_vcpu * vcpu)153 static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu)
154 {
155 struct kvm_cpuid_entry2 *best;
156
157 best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
158 return best && (best->edx & bit(X86_FEATURE_RDTSCP));
159 }
160
161 /*
162 * NRIPS is provided through cpuidfn 0x8000000a.edx bit 3
163 */
164 #define BIT_NRIPS 3
165
guest_cpuid_has_nrips(struct kvm_vcpu * vcpu)166 static inline bool guest_cpuid_has_nrips(struct kvm_vcpu *vcpu)
167 {
168 struct kvm_cpuid_entry2 *best;
169
170 best = kvm_find_cpuid_entry(vcpu, 0x8000000a, 0);
171
172 /*
173 * NRIPS is a scattered cpuid feature, so we can't use
174 * X86_FEATURE_NRIPS here (X86_FEATURE_NRIPS would be bit
175 * position 8, not 3).
176 */
177 return best && (best->edx & bit(BIT_NRIPS));
178 }
179 #undef BIT_NRIPS
180
181 #endif
182