1/*
2 * Kernel-based Virtual Machine driver for Linux
3 * cpuid support routines
4 *
5 * derived from arch/x86/kvm/x86.c
6 *
7 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
8 * Copyright IBM Corporation, 2008
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2.  See
11 * the COPYING file in the top-level directory.
12 *
13 */
14
15#include <linux/kvm_host.h>
16#include <linux/module.h>
17#include <linux/vmalloc.h>
18#include <linux/uaccess.h>
19#include <asm/i387.h> /* For use_eager_fpu.  Ugh! */
20#include <asm/fpu-internal.h> /* For use_eager_fpu.  Ugh! */
21#include <asm/user.h>
22#include <asm/xsave.h>
23#include "cpuid.h"
24#include "lapic.h"
25#include "mmu.h"
26#include "trace.h"
27
28static u32 xstate_required_size(u64 xstate_bv, bool compacted)
29{
30	int feature_bit = 0;
31	u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
32
33	xstate_bv &= XSTATE_EXTEND_MASK;
34	while (xstate_bv) {
35		if (xstate_bv & 0x1) {
36		        u32 eax, ebx, ecx, edx, offset;
37		        cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx);
38			offset = compacted ? ret : ebx;
39			ret = max(ret, offset + eax);
40		}
41
42		xstate_bv >>= 1;
43		feature_bit++;
44	}
45
46	return ret;
47}
48
49u64 kvm_supported_xcr0(void)
50{
51	u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0;
52
53	if (!kvm_x86_ops->mpx_supported())
54		xcr0 &= ~(XSTATE_BNDREGS | XSTATE_BNDCSR);
55
56	return xcr0;
57}
58
59#define F(x) bit(X86_FEATURE_##x)
60
61int kvm_update_cpuid(struct kvm_vcpu *vcpu)
62{
63	struct kvm_cpuid_entry2 *best;
64	struct kvm_lapic *apic = vcpu->arch.apic;
65
66	best = kvm_find_cpuid_entry(vcpu, 1, 0);
67	if (!best)
68		return 0;
69
70	/* Update OSXSAVE bit */
71	if (cpu_has_xsave && best->function == 0x1) {
72		best->ecx &= ~F(OSXSAVE);
73		if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
74			best->ecx |= F(OSXSAVE);
75	}
76
77	if (apic) {
78		if (best->ecx & F(TSC_DEADLINE_TIMER))
79			apic->lapic_timer.timer_mode_mask = 3 << 17;
80		else
81			apic->lapic_timer.timer_mode_mask = 1 << 17;
82	}
83
84	best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
85	if (!best) {
86		vcpu->arch.guest_supported_xcr0 = 0;
87		vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
88	} else {
89		vcpu->arch.guest_supported_xcr0 =
90			(best->eax | ((u64)best->edx << 32)) &
91			kvm_supported_xcr0();
92		vcpu->arch.guest_xstate_size = best->ebx =
93			xstate_required_size(vcpu->arch.xcr0, false);
94	}
95
96	best = kvm_find_cpuid_entry(vcpu, 0xD, 1);
97	if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
98		best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
99
100	vcpu->arch.eager_fpu = guest_cpuid_has_mpx(vcpu);
101
102	/*
103	 * The existing code assumes virtual address is 48-bit in the canonical
104	 * address checks; exit if it is ever changed.
105	 */
106	best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
107	if (best && ((best->eax & 0xff00) >> 8) != 48 &&
108		((best->eax & 0xff00) >> 8) != 0)
109		return -EINVAL;
110
111	/* Update physical-address width */
112	vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
113
114	kvm_pmu_cpuid_update(vcpu);
115	return 0;
116}
117
118static int is_efer_nx(void)
119{
120	unsigned long long efer = 0;
121
122	rdmsrl_safe(MSR_EFER, &efer);
123	return efer & EFER_NX;
124}
125
126static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
127{
128	int i;
129	struct kvm_cpuid_entry2 *e, *entry;
130
131	entry = NULL;
132	for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
133		e = &vcpu->arch.cpuid_entries[i];
134		if (e->function == 0x80000001) {
135			entry = e;
136			break;
137		}
138	}
139	if (entry && (entry->edx & F(NX)) && !is_efer_nx()) {
140		entry->edx &= ~F(NX);
141		printk(KERN_INFO "kvm: guest NX capability removed\n");
142	}
143}
144
145int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
146{
147	struct kvm_cpuid_entry2 *best;
148
149	best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
150	if (!best || best->eax < 0x80000008)
151		goto not_found;
152	best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
153	if (best)
154		return best->eax & 0xff;
155not_found:
156	return 36;
157}
158EXPORT_SYMBOL_GPL(cpuid_query_maxphyaddr);
159
160/* when an old userspace process fills a new kernel module */
161int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
162			     struct kvm_cpuid *cpuid,
163			     struct kvm_cpuid_entry __user *entries)
164{
165	int r, i;
166	struct kvm_cpuid_entry *cpuid_entries;
167
168	r = -E2BIG;
169	if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
170		goto out;
171	r = -ENOMEM;
172	cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
173	if (!cpuid_entries)
174		goto out;
175	r = -EFAULT;
176	if (copy_from_user(cpuid_entries, entries,
177			   cpuid->nent * sizeof(struct kvm_cpuid_entry)))
178		goto out_free;
179	for (i = 0; i < cpuid->nent; i++) {
180		vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
181		vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
182		vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
183		vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
184		vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
185		vcpu->arch.cpuid_entries[i].index = 0;
186		vcpu->arch.cpuid_entries[i].flags = 0;
187		vcpu->arch.cpuid_entries[i].padding[0] = 0;
188		vcpu->arch.cpuid_entries[i].padding[1] = 0;
189		vcpu->arch.cpuid_entries[i].padding[2] = 0;
190	}
191	vcpu->arch.cpuid_nent = cpuid->nent;
192	cpuid_fix_nx_cap(vcpu);
193	kvm_apic_set_version(vcpu);
194	kvm_x86_ops->cpuid_update(vcpu);
195	r = kvm_update_cpuid(vcpu);
196
197out_free:
198	vfree(cpuid_entries);
199out:
200	return r;
201}
202
203int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
204			      struct kvm_cpuid2 *cpuid,
205			      struct kvm_cpuid_entry2 __user *entries)
206{
207	int r;
208
209	r = -E2BIG;
210	if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
211		goto out;
212	r = -EFAULT;
213	if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
214			   cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
215		goto out;
216	vcpu->arch.cpuid_nent = cpuid->nent;
217	kvm_apic_set_version(vcpu);
218	kvm_x86_ops->cpuid_update(vcpu);
219	r = kvm_update_cpuid(vcpu);
220out:
221	return r;
222}
223
224int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
225			      struct kvm_cpuid2 *cpuid,
226			      struct kvm_cpuid_entry2 __user *entries)
227{
228	int r;
229
230	r = -E2BIG;
231	if (cpuid->nent < vcpu->arch.cpuid_nent)
232		goto out;
233	r = -EFAULT;
234	if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
235			 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
236		goto out;
237	return 0;
238
239out:
240	cpuid->nent = vcpu->arch.cpuid_nent;
241	return r;
242}
243
244static void cpuid_mask(u32 *word, int wordnum)
245{
246	*word &= boot_cpu_data.x86_capability[wordnum];
247}
248
249static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
250			   u32 index)
251{
252	entry->function = function;
253	entry->index = index;
254	cpuid_count(entry->function, entry->index,
255		    &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
256	entry->flags = 0;
257}
258
259static int __do_cpuid_ent_emulated(struct kvm_cpuid_entry2 *entry,
260				   u32 func, u32 index, int *nent, int maxnent)
261{
262	switch (func) {
263	case 0:
264		entry->eax = 1;		/* only one leaf currently */
265		++*nent;
266		break;
267	case 1:
268		entry->ecx = F(MOVBE);
269		++*nent;
270		break;
271	default:
272		break;
273	}
274
275	entry->function = func;
276	entry->index = index;
277
278	return 0;
279}
280
281static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
282				 u32 index, int *nent, int maxnent)
283{
284	int r;
285	unsigned f_nx = is_efer_nx() ? F(NX) : 0;
286#ifdef CONFIG_X86_64
287	unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
288				? F(GBPAGES) : 0;
289	unsigned f_lm = F(LM);
290#else
291	unsigned f_gbpages = 0;
292	unsigned f_lm = 0;
293#endif
294	unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
295	unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0;
296	unsigned f_mpx = kvm_x86_ops->mpx_supported() ? F(MPX) : 0;
297	unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
298
299	/* cpuid 1.edx */
300	const u32 kvm_supported_word0_x86_features =
301		F(FPU) | F(VME) | F(DE) | F(PSE) |
302		F(TSC) | F(MSR) | F(PAE) | F(MCE) |
303		F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
304		F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
305		F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) |
306		0 /* Reserved, DS, ACPI */ | F(MMX) |
307		F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
308		0 /* HTT, TM, Reserved, PBE */;
309	/* cpuid 0x80000001.edx */
310	const u32 kvm_supported_word1_x86_features =
311		F(FPU) | F(VME) | F(DE) | F(PSE) |
312		F(TSC) | F(MSR) | F(PAE) | F(MCE) |
313		F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
314		F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
315		F(PAT) | F(PSE36) | 0 /* Reserved */ |
316		f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
317		F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
318		0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
319	/* cpuid 1.ecx */
320	const u32 kvm_supported_word4_x86_features =
321		/* NOTE: MONITOR (and MWAIT) are emulated as NOP,
322		 * but *not* advertised to guests via CPUID ! */
323		F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
324		0 /* DS-CPL, VMX, SMX, EST */ |
325		0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
326		F(FMA) | F(CX16) | 0 /* xTPR Update, PDCM */ |
327		F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) |
328		F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
329		0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
330		F(F16C) | F(RDRAND);
331	/* cpuid 0x80000001.ecx */
332	const u32 kvm_supported_word6_x86_features =
333		F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
334		F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
335		F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) |
336		0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM);
337
338	/* cpuid 0xC0000001.edx */
339	const u32 kvm_supported_word5_x86_features =
340		F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
341		F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
342		F(PMM) | F(PMM_EN);
343
344	/* cpuid 7.0.ebx */
345	const u32 kvm_supported_word9_x86_features =
346		F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
347		F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) |
348		F(ADX) | F(SMAP) | F(AVX512F) | F(AVX512PF) | F(AVX512ER) |
349		F(AVX512CD);
350
351	/* cpuid 0xD.1.eax */
352	const u32 kvm_supported_word10_x86_features =
353		F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | f_xsaves;
354
355	/* all calls to cpuid_count() should be made on the same cpu */
356	get_cpu();
357
358	r = -E2BIG;
359
360	if (*nent >= maxnent)
361		goto out;
362
363	do_cpuid_1_ent(entry, function, index);
364	++*nent;
365
366	switch (function) {
367	case 0:
368		entry->eax = min(entry->eax, (u32)0xd);
369		break;
370	case 1:
371		entry->edx &= kvm_supported_word0_x86_features;
372		cpuid_mask(&entry->edx, 0);
373		entry->ecx &= kvm_supported_word4_x86_features;
374		cpuid_mask(&entry->ecx, 4);
375		/* we support x2apic emulation even if host does not support
376		 * it since we emulate x2apic in software */
377		entry->ecx |= F(X2APIC);
378		break;
379	/* function 2 entries are STATEFUL. That is, repeated cpuid commands
380	 * may return different values. This forces us to get_cpu() before
381	 * issuing the first command, and also to emulate this annoying behavior
382	 * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
383	case 2: {
384		int t, times = entry->eax & 0xff;
385
386		entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
387		entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
388		for (t = 1; t < times; ++t) {
389			if (*nent >= maxnent)
390				goto out;
391
392			do_cpuid_1_ent(&entry[t], function, 0);
393			entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
394			++*nent;
395		}
396		break;
397	}
398	/* function 4 has additional index. */
399	case 4: {
400		int i, cache_type;
401
402		entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
403		/* read more entries until cache_type is zero */
404		for (i = 1; ; ++i) {
405			if (*nent >= maxnent)
406				goto out;
407
408			cache_type = entry[i - 1].eax & 0x1f;
409			if (!cache_type)
410				break;
411			do_cpuid_1_ent(&entry[i], function, i);
412			entry[i].flags |=
413			       KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
414			++*nent;
415		}
416		break;
417	}
418	case 7: {
419		entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
420		/* Mask ebx against host capability word 9 */
421		if (index == 0) {
422			entry->ebx &= kvm_supported_word9_x86_features;
423			cpuid_mask(&entry->ebx, 9);
424			// TSC_ADJUST is emulated
425			entry->ebx |= F(TSC_ADJUST);
426		} else
427			entry->ebx = 0;
428		entry->eax = 0;
429		entry->ecx = 0;
430		entry->edx = 0;
431		break;
432	}
433	case 9:
434		break;
435	case 0xa: { /* Architectural Performance Monitoring */
436		struct x86_pmu_capability cap;
437		union cpuid10_eax eax;
438		union cpuid10_edx edx;
439
440		perf_get_x86_pmu_capability(&cap);
441
442		/*
443		 * Only support guest architectural pmu on a host
444		 * with architectural pmu.
445		 */
446		if (!cap.version)
447			memset(&cap, 0, sizeof(cap));
448
449		eax.split.version_id = min(cap.version, 2);
450		eax.split.num_counters = cap.num_counters_gp;
451		eax.split.bit_width = cap.bit_width_gp;
452		eax.split.mask_length = cap.events_mask_len;
453
454		edx.split.num_counters_fixed = cap.num_counters_fixed;
455		edx.split.bit_width_fixed = cap.bit_width_fixed;
456		edx.split.reserved = 0;
457
458		entry->eax = eax.full;
459		entry->ebx = cap.events_mask;
460		entry->ecx = 0;
461		entry->edx = edx.full;
462		break;
463	}
464	/* function 0xb has additional index. */
465	case 0xb: {
466		int i, level_type;
467
468		entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
469		/* read more entries until level_type is zero */
470		for (i = 1; ; ++i) {
471			if (*nent >= maxnent)
472				goto out;
473
474			level_type = entry[i - 1].ecx & 0xff00;
475			if (!level_type)
476				break;
477			do_cpuid_1_ent(&entry[i], function, i);
478			entry[i].flags |=
479			       KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
480			++*nent;
481		}
482		break;
483	}
484	case 0xd: {
485		int idx, i;
486		u64 supported = kvm_supported_xcr0();
487
488		entry->eax &= supported;
489		entry->ebx = xstate_required_size(supported, false);
490		entry->ecx = entry->ebx;
491		entry->edx &= supported >> 32;
492		entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
493		if (!supported)
494			break;
495
496		for (idx = 1, i = 1; idx < 64; ++idx) {
497			u64 mask = ((u64)1 << idx);
498			if (*nent >= maxnent)
499				goto out;
500
501			do_cpuid_1_ent(&entry[i], function, idx);
502			if (idx == 1) {
503				entry[i].eax &= kvm_supported_word10_x86_features;
504				entry[i].ebx = 0;
505				if (entry[i].eax & (F(XSAVES)|F(XSAVEC)))
506					entry[i].ebx =
507						xstate_required_size(supported,
508								     true);
509			} else {
510				if (entry[i].eax == 0 || !(supported & mask))
511					continue;
512				if (WARN_ON_ONCE(entry[i].ecx & 1))
513					continue;
514			}
515			entry[i].ecx = 0;
516			entry[i].edx = 0;
517			entry[i].flags |=
518			       KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
519			++*nent;
520			++i;
521		}
522		break;
523	}
524	case KVM_CPUID_SIGNATURE: {
525		static const char signature[12] = "KVMKVMKVM\0\0";
526		const u32 *sigptr = (const u32 *)signature;
527		entry->eax = KVM_CPUID_FEATURES;
528		entry->ebx = sigptr[0];
529		entry->ecx = sigptr[1];
530		entry->edx = sigptr[2];
531		break;
532	}
533	case KVM_CPUID_FEATURES:
534		entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
535			     (1 << KVM_FEATURE_NOP_IO_DELAY) |
536			     (1 << KVM_FEATURE_CLOCKSOURCE2) |
537			     (1 << KVM_FEATURE_ASYNC_PF) |
538			     (1 << KVM_FEATURE_PV_EOI) |
539			     (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) |
540			     (1 << KVM_FEATURE_PV_UNHALT);
541
542		if (sched_info_on())
543			entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
544
545		entry->ebx = 0;
546		entry->ecx = 0;
547		entry->edx = 0;
548		break;
549	case 0x80000000:
550		entry->eax = min(entry->eax, 0x8000001a);
551		break;
552	case 0x80000001:
553		entry->edx &= kvm_supported_word1_x86_features;
554		cpuid_mask(&entry->edx, 1);
555		entry->ecx &= kvm_supported_word6_x86_features;
556		cpuid_mask(&entry->ecx, 6);
557		break;
558	case 0x80000007: /* Advanced power management */
559		/* invariant TSC is CPUID.80000007H:EDX[8] */
560		entry->edx &= (1 << 8);
561		/* mask against host */
562		entry->edx &= boot_cpu_data.x86_power;
563		entry->eax = entry->ebx = entry->ecx = 0;
564		break;
565	case 0x80000008: {
566		unsigned g_phys_as = (entry->eax >> 16) & 0xff;
567		unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
568		unsigned phys_as = entry->eax & 0xff;
569
570		if (!g_phys_as)
571			g_phys_as = phys_as;
572		entry->eax = g_phys_as | (virt_as << 8);
573		entry->ebx = entry->edx = 0;
574		break;
575	}
576	case 0x80000019:
577		entry->ecx = entry->edx = 0;
578		break;
579	case 0x8000001a:
580		break;
581	case 0x8000001d:
582		break;
583	/*Add support for Centaur's CPUID instruction*/
584	case 0xC0000000:
585		/*Just support up to 0xC0000004 now*/
586		entry->eax = min(entry->eax, 0xC0000004);
587		break;
588	case 0xC0000001:
589		entry->edx &= kvm_supported_word5_x86_features;
590		cpuid_mask(&entry->edx, 5);
591		break;
592	case 3: /* Processor serial number */
593	case 5: /* MONITOR/MWAIT */
594	case 6: /* Thermal management */
595	case 0xC0000002:
596	case 0xC0000003:
597	case 0xC0000004:
598	default:
599		entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
600		break;
601	}
602
603	kvm_x86_ops->set_supported_cpuid(function, entry);
604
605	r = 0;
606
607out:
608	put_cpu();
609
610	return r;
611}
612
613static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 func,
614			u32 idx, int *nent, int maxnent, unsigned int type)
615{
616	if (type == KVM_GET_EMULATED_CPUID)
617		return __do_cpuid_ent_emulated(entry, func, idx, nent, maxnent);
618
619	return __do_cpuid_ent(entry, func, idx, nent, maxnent);
620}
621
622#undef F
623
624struct kvm_cpuid_param {
625	u32 func;
626	u32 idx;
627	bool has_leaf_count;
628	bool (*qualifier)(const struct kvm_cpuid_param *param);
629};
630
631static bool is_centaur_cpu(const struct kvm_cpuid_param *param)
632{
633	return boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR;
634}
635
636static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries,
637				 __u32 num_entries, unsigned int ioctl_type)
638{
639	int i;
640	__u32 pad[3];
641
642	if (ioctl_type != KVM_GET_EMULATED_CPUID)
643		return false;
644
645	/*
646	 * We want to make sure that ->padding is being passed clean from
647	 * userspace in case we want to use it for something in the future.
648	 *
649	 * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we
650	 * have to give ourselves satisfied only with the emulated side. /me
651	 * sheds a tear.
652	 */
653	for (i = 0; i < num_entries; i++) {
654		if (copy_from_user(pad, entries[i].padding, sizeof(pad)))
655			return true;
656
657		if (pad[0] || pad[1] || pad[2])
658			return true;
659	}
660	return false;
661}
662
663int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
664			    struct kvm_cpuid_entry2 __user *entries,
665			    unsigned int type)
666{
667	struct kvm_cpuid_entry2 *cpuid_entries;
668	int limit, nent = 0, r = -E2BIG, i;
669	u32 func;
670	static const struct kvm_cpuid_param param[] = {
671		{ .func = 0, .has_leaf_count = true },
672		{ .func = 0x80000000, .has_leaf_count = true },
673		{ .func = 0xC0000000, .qualifier = is_centaur_cpu, .has_leaf_count = true },
674		{ .func = KVM_CPUID_SIGNATURE },
675		{ .func = KVM_CPUID_FEATURES },
676	};
677
678	if (cpuid->nent < 1)
679		goto out;
680	if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
681		cpuid->nent = KVM_MAX_CPUID_ENTRIES;
682
683	if (sanity_check_entries(entries, cpuid->nent, type))
684		return -EINVAL;
685
686	r = -ENOMEM;
687	cpuid_entries = vzalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
688	if (!cpuid_entries)
689		goto out;
690
691	r = 0;
692	for (i = 0; i < ARRAY_SIZE(param); i++) {
693		const struct kvm_cpuid_param *ent = &param[i];
694
695		if (ent->qualifier && !ent->qualifier(ent))
696			continue;
697
698		r = do_cpuid_ent(&cpuid_entries[nent], ent->func, ent->idx,
699				&nent, cpuid->nent, type);
700
701		if (r)
702			goto out_free;
703
704		if (!ent->has_leaf_count)
705			continue;
706
707		limit = cpuid_entries[nent - 1].eax;
708		for (func = ent->func + 1; func <= limit && nent < cpuid->nent && r == 0; ++func)
709			r = do_cpuid_ent(&cpuid_entries[nent], func, ent->idx,
710				     &nent, cpuid->nent, type);
711
712		if (r)
713			goto out_free;
714	}
715
716	r = -EFAULT;
717	if (copy_to_user(entries, cpuid_entries,
718			 nent * sizeof(struct kvm_cpuid_entry2)))
719		goto out_free;
720	cpuid->nent = nent;
721	r = 0;
722
723out_free:
724	vfree(cpuid_entries);
725out:
726	return r;
727}
728
729static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
730{
731	struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
732	int j, nent = vcpu->arch.cpuid_nent;
733
734	e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
735	/* when no next entry is found, the current entry[i] is reselected */
736	for (j = i + 1; ; j = (j + 1) % nent) {
737		struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
738		if (ej->function == e->function) {
739			ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
740			return j;
741		}
742	}
743	return 0; /* silence gcc, even though control never reaches here */
744}
745
746/* find an entry with matching function, matching index (if needed), and that
747 * should be read next (if it's stateful) */
748static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
749	u32 function, u32 index)
750{
751	if (e->function != function)
752		return 0;
753	if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
754		return 0;
755	if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
756	    !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
757		return 0;
758	return 1;
759}
760
761struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
762					      u32 function, u32 index)
763{
764	int i;
765	struct kvm_cpuid_entry2 *best = NULL;
766
767	for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
768		struct kvm_cpuid_entry2 *e;
769
770		e = &vcpu->arch.cpuid_entries[i];
771		if (is_matching_cpuid_entry(e, function, index)) {
772			if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
773				move_to_next_stateful_cpuid_entry(vcpu, i);
774			best = e;
775			break;
776		}
777	}
778	return best;
779}
780EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
781
782/*
783 * If no match is found, check whether we exceed the vCPU's limit
784 * and return the content of the highest valid _standard_ leaf instead.
785 * This is to satisfy the CPUID specification.
786 */
787static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu,
788                                                  u32 function, u32 index)
789{
790	struct kvm_cpuid_entry2 *maxlevel;
791
792	maxlevel = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
793	if (!maxlevel || maxlevel->eax >= function)
794		return NULL;
795	if (function & 0x80000000) {
796		maxlevel = kvm_find_cpuid_entry(vcpu, 0, 0);
797		if (!maxlevel)
798			return NULL;
799	}
800	return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index);
801}
802
803void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
804{
805	u32 function = *eax, index = *ecx;
806	struct kvm_cpuid_entry2 *best;
807
808	best = kvm_find_cpuid_entry(vcpu, function, index);
809
810	if (!best)
811		best = check_cpuid_limit(vcpu, function, index);
812
813	/*
814	 * Perfmon not yet supported for L2 guest.
815	 */
816	if (is_guest_mode(vcpu) && function == 0xa)
817		best = NULL;
818
819	if (best) {
820		*eax = best->eax;
821		*ebx = best->ebx;
822		*ecx = best->ecx;
823		*edx = best->edx;
824	} else
825		*eax = *ebx = *ecx = *edx = 0;
826	trace_kvm_cpuid(function, *eax, *ebx, *ecx, *edx);
827}
828EXPORT_SYMBOL_GPL(kvm_cpuid);
829
830void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
831{
832	u32 function, eax, ebx, ecx, edx;
833
834	function = eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
835	ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
836	kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx);
837	kvm_register_write(vcpu, VCPU_REGS_RAX, eax);
838	kvm_register_write(vcpu, VCPU_REGS_RBX, ebx);
839	kvm_register_write(vcpu, VCPU_REGS_RCX, ecx);
840	kvm_register_write(vcpu, VCPU_REGS_RDX, edx);
841	kvm_x86_ops->skip_emulated_instruction(vcpu);
842}
843EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
844