cpuid_entries     651 arch/x86/include/asm/kvm_host.h 	struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
cpuid_entries     170 arch/x86/kvm/cpuid.c 		e = &vcpu->arch.cpuid_entries[i];
cpuid_entries     203 arch/x86/kvm/cpuid.c 	struct kvm_cpuid_entry *cpuid_entries = NULL;
cpuid_entries     210 arch/x86/kvm/cpuid.c 		cpuid_entries =
cpuid_entries     213 arch/x86/kvm/cpuid.c 		if (!cpuid_entries)
cpuid_entries     216 arch/x86/kvm/cpuid.c 		if (copy_from_user(cpuid_entries, entries,
cpuid_entries     221 arch/x86/kvm/cpuid.c 		vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
cpuid_entries     222 arch/x86/kvm/cpuid.c 		vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
cpuid_entries     223 arch/x86/kvm/cpuid.c 		vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
cpuid_entries     224 arch/x86/kvm/cpuid.c 		vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
cpuid_entries     225 arch/x86/kvm/cpuid.c 		vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
cpuid_entries     226 arch/x86/kvm/cpuid.c 		vcpu->arch.cpuid_entries[i].index = 0;
cpuid_entries     227 arch/x86/kvm/cpuid.c 		vcpu->arch.cpuid_entries[i].flags = 0;
cpuid_entries     228 arch/x86/kvm/cpuid.c 		vcpu->arch.cpuid_entries[i].padding[0] = 0;
cpuid_entries     229 arch/x86/kvm/cpuid.c 		vcpu->arch.cpuid_entries[i].padding[1] = 0;
cpuid_entries     230 arch/x86/kvm/cpuid.c 		vcpu->arch.cpuid_entries[i].padding[2] = 0;
cpuid_entries     239 arch/x86/kvm/cpuid.c 	vfree(cpuid_entries);
cpuid_entries     253 arch/x86/kvm/cpuid.c 	if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
cpuid_entries     274 arch/x86/kvm/cpuid.c 	if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
cpuid_entries     869 arch/x86/kvm/cpuid.c 	struct kvm_cpuid_entry2 *cpuid_entries;
cpuid_entries     888 arch/x86/kvm/cpuid.c 	cpuid_entries = vzalloc(array_size(sizeof(struct kvm_cpuid_entry2),
cpuid_entries     890 arch/x86/kvm/cpuid.c 	if (!cpuid_entries)
cpuid_entries     900 arch/x86/kvm/cpuid.c 		r = do_cpuid_func(&cpuid_entries[nent], ent->func,
cpuid_entries     906 arch/x86/kvm/cpuid.c 		limit = cpuid_entries[nent - 1].eax;
cpuid_entries     908 arch/x86/kvm/cpuid.c 			r = do_cpuid_func(&cpuid_entries[nent], func,
cpuid_entries     916 arch/x86/kvm/cpuid.c 	if (copy_to_user(entries, cpuid_entries,
cpuid_entries     923 arch/x86/kvm/cpuid.c 	vfree(cpuid_entries);
cpuid_entries     930 arch/x86/kvm/cpuid.c 	struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
cpuid_entries     939 arch/x86/kvm/cpuid.c 		ej = &vcpu->arch.cpuid_entries[j];
cpuid_entries     971 arch/x86/kvm/cpuid.c 		e = &vcpu->arch.cpuid_entries[i];
cpuid_entries    1790 arch/x86/kvm/hyperv.c 	struct kvm_cpuid_entry2 cpuid_entries[] = {
cpuid_entries    1799 arch/x86/kvm/hyperv.c 	int i, nent = ARRAY_SIZE(cpuid_entries);
cpuid_entries    1815 arch/x86/kvm/hyperv.c 		struct kvm_cpuid_entry2 *ent = &cpuid_entries[i];
cpuid_entries    1909 arch/x86/kvm/hyperv.c 	if (copy_to_user(entries, cpuid_entries,