efer              556 arch/x86/include/asm/kvm_host.h 	u64 efer;
efer             1050 arch/x86/include/asm/kvm_host.h 	void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
efer             1361 arch/x86/include/asm/kvm_host.h bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
efer               48 arch/x86/include/asm/realmode.h 	u64 efer;
efer               41 arch/x86/include/asm/suspend_64.h 	unsigned long efer;
efer              175 arch/x86/include/asm/svm.h 	u64 efer;
efer              112 arch/x86/include/asm/virtext.h 	uint64_t efer;
efer              115 arch/x86/include/asm/virtext.h 	rdmsrl(MSR_EFER, efer);
efer              116 arch/x86/include/asm/virtext.h 	wrmsrl(MSR_EFER, efer & ~EFER_SVME);
efer              155 arch/x86/include/uapi/asm/kvm.h 	__u64 efer;
efer              157 arch/x86/kvm/cpuid.c 	unsigned long long efer = 0;
efer              159 arch/x86/kvm/cpuid.c 	rdmsrl_safe(MSR_EFER, &efer);
efer              160 arch/x86/kvm/cpuid.c 	return efer & EFER_NX;
efer              810 arch/x86/kvm/emulate.c 			u64 efer = 0;
efer              812 arch/x86/kvm/emulate.c 			ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
efer              813 arch/x86/kvm/emulate.c 			if (efer & EFER_LMA)
efer             1597 arch/x86/kvm/emulate.c 		u64 efer = 0;
efer             1599 arch/x86/kvm/emulate.c 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
efer             1600 arch/x86/kvm/emulate.c 		if (!(efer & EFER_LMA))
efer             1748 arch/x86/kvm/emulate.c 			u64 efer = 0;
efer             1750 arch/x86/kvm/emulate.c 			ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
efer             1751 arch/x86/kvm/emulate.c 			if (efer & EFER_LMA)
efer             2613 arch/x86/kvm/emulate.c 	unsigned long cr0, cr4, efer;
efer             2665 arch/x86/kvm/emulate.c 		efer = 0;
efer             2666 arch/x86/kvm/emulate.c 		ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
efer             2791 arch/x86/kvm/emulate.c 	u64 efer = 0;
efer             2801 arch/x86/kvm/emulate.c 	ops->get_msr(ctxt, MSR_EFER, &efer);
efer             2804 arch/x86/kvm/emulate.c 	if (!(efer & EFER_SCE))
efer             2812 arch/x86/kvm/emulate.c 	if (efer & EFER_LMA) {
efer             2820 arch/x86/kvm/emulate.c 	if (efer & EFER_LMA) {
efer             2851 arch/x86/kvm/emulate.c 	u64 efer = 0;
efer             2853 arch/x86/kvm/emulate.c 	ops->get_msr(ctxt, MSR_EFER, &efer);
efer             2862 arch/x86/kvm/emulate.c 	if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
efer             2879 arch/x86/kvm/emulate.c 	if (efer & EFER_LMA) {
efer             2888 arch/x86/kvm/emulate.c 	ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
efer             2891 arch/x86/kvm/emulate.c 	*reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
efer             4234 arch/x86/kvm/emulate.c 	u64 efer = 0;
efer             4258 arch/x86/kvm/emulate.c 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
efer             4260 arch/x86/kvm/emulate.c 		if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
efer             4269 arch/x86/kvm/emulate.c 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
efer             4270 arch/x86/kvm/emulate.c 		if (efer & EFER_LMA) {
efer             4292 arch/x86/kvm/emulate.c 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
efer             4294 arch/x86/kvm/emulate.c 		if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
efer             4352 arch/x86/kvm/emulate.c 	u64 efer = 0;
efer             4354 arch/x86/kvm/emulate.c 	ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
efer             4356 arch/x86/kvm/emulate.c 	if (!(efer & EFER_SVME))
efer              606 arch/x86/kvm/mmu.c 	return vcpu->arch.efer & EFER_NX;
efer              734 arch/x86/kvm/svm.c static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
efer              736 arch/x86/kvm/svm.c 	vcpu->arch.efer = efer;
efer              740 arch/x86/kvm/svm.c 		efer |= EFER_NX;
efer              742 arch/x86/kvm/svm.c 		if (!(efer & EFER_LMA))
efer              743 arch/x86/kvm/svm.c 			efer &= ~EFER_LME;
efer              746 arch/x86/kvm/svm.c 	to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
efer              913 arch/x86/kvm/svm.c 	uint64_t efer;
efer              917 arch/x86/kvm/svm.c 	rdmsrl(MSR_EFER, efer);
efer              918 arch/x86/kvm/svm.c 	if (efer & EFER_SVME)
efer              939 arch/x86/kvm/svm.c 	wrmsrl(MSR_EFER, efer | EFER_SVME);
efer             2604 arch/x86/kvm/svm.c 	if (vcpu->arch.efer & EFER_LME) {
efer             2606 arch/x86/kvm/svm.c 			vcpu->arch.efer |= EFER_LMA;
efer             2607 arch/x86/kvm/svm.c 			svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
efer             2611 arch/x86/kvm/svm.c 			vcpu->arch.efer &= ~EFER_LMA;
efer             2612 arch/x86/kvm/svm.c 			svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
efer             3051 arch/x86/kvm/svm.c 	if (!(svm->vcpu.arch.efer & EFER_SVME) ||
efer             3387 arch/x86/kvm/svm.c 	nested_vmcb->save.efer   = svm->vcpu.arch.efer;
efer             3458 arch/x86/kvm/svm.c 	svm_set_efer(&svm->vcpu, hsave->save.efer);
efer             3562 arch/x86/kvm/svm.c 	svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
efer             3703 arch/x86/kvm/svm.c 	hsave->save.efer   = svm->vcpu.arch.efer;
efer             4296 arch/x86/kvm/svm.c 	if (svm_dis && (vcpu->arch.efer & EFER_SVME))
efer             4938 arch/x86/kvm/svm.c 		save->cpl, save->efer);
efer             1967 arch/x86/kvm/vmx/nested.c 		return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME);
efer             1969 arch/x86/kvm/vmx/nested.c 		return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME);
efer             2403 arch/x86/kvm/vmx/nested.c 	vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12);
efer             2405 arch/x86/kvm/vmx/nested.c 	vmx_set_efer(vcpu, vcpu->arch.efer);
efer             2679 arch/x86/kvm/vmx/nested.c 	ia32e = !!(vcpu->arch.efer & EFER_LMA);
efer             3725 arch/x86/kvm/vmx/nested.c 		vmcs12->guest_ia32_efer = vcpu->arch.efer;
efer             3803 arch/x86/kvm/vmx/nested.c 		vcpu->arch.efer = vmcs12->host_ia32_efer;
efer             3805 arch/x86/kvm/vmx/nested.c 		vcpu->arch.efer |= (EFER_LMA | EFER_LME);
efer             3807 arch/x86/kvm/vmx/nested.c 		vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
efer             3808 arch/x86/kvm/vmx/nested.c 	vmx_set_efer(vcpu, vcpu->arch.efer);
efer              946 arch/x86/kvm/vmx/vmx.c 	u64 guest_efer = vmx->vcpu.arch.efer;
efer              970 arch/x86/kvm/vmx/vmx.c 	    (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
efer             1653 arch/x86/kvm/vmx/vmx.c 	if (is_long_mode(&vmx->vcpu) && (vmx->vcpu.arch.efer & EFER_SCE)) {
efer             2765 arch/x86/kvm/vmx/vmx.c void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
efer             2773 arch/x86/kvm/vmx/vmx.c 	vcpu->arch.efer = efer;
efer             2774 arch/x86/kvm/vmx/vmx.c 	if (efer & EFER_LMA) {
efer             2776 arch/x86/kvm/vmx/vmx.c 		msr->data = efer;
efer             2780 arch/x86/kvm/vmx/vmx.c 		msr->data = efer & ~EFER_LME;
efer             2801 arch/x86/kvm/vmx/vmx.c 	vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA);
efer             2807 arch/x86/kvm/vmx/vmx.c 	vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
efer             2927 arch/x86/kvm/vmx/vmx.c 	if (vcpu->arch.efer & EFER_LME) {
efer             5686 arch/x86/kvm/vmx/vmx.c 	u64 efer;
efer             5699 arch/x86/kvm/vmx/vmx.c 	efer = vmcs_read64(GUEST_IA32_EFER);
efer             5712 arch/x86/kvm/vmx/vmx.c 	    (cr4 & X86_CR4_PAE) && !(efer & EFER_LMA))
efer             5739 arch/x86/kvm/vmx/vmx.c 		       efer, vmcs_read64(GUEST_IA32_PAT));
efer              321 arch/x86/kvm/vmx/vmx.h void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
efer              783 arch/x86/kvm/x86.c 		if ((vcpu->arch.efer & EFER_LME)) {
efer             1431 arch/x86/kvm/x86.c static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
efer             1433 arch/x86/kvm/x86.c 	if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT))
efer             1436 arch/x86/kvm/x86.c 	if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM))
efer             1439 arch/x86/kvm/x86.c 	if (efer & (EFER_LME | EFER_LMA) &&
efer             1443 arch/x86/kvm/x86.c 	if (efer & EFER_NX && !guest_cpuid_has(vcpu, X86_FEATURE_NX))
efer             1449 arch/x86/kvm/x86.c bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
efer             1451 arch/x86/kvm/x86.c 	if (efer & efer_reserved_bits)
efer             1454 arch/x86/kvm/x86.c 	return __kvm_valid_efer(vcpu, efer);
efer             1460 arch/x86/kvm/x86.c 	u64 old_efer = vcpu->arch.efer;
efer             1461 arch/x86/kvm/x86.c 	u64 efer = msr_info->data;
efer             1463 arch/x86/kvm/x86.c 	if (efer & efer_reserved_bits)
efer             1467 arch/x86/kvm/x86.c 		if (!__kvm_valid_efer(vcpu, efer))
efer             1471 arch/x86/kvm/x86.c 		    (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
efer             1475 arch/x86/kvm/x86.c 	efer &= ~EFER_LMA;
efer             1476 arch/x86/kvm/x86.c 	efer |= vcpu->arch.efer & EFER_LMA;
efer             1478 arch/x86/kvm/x86.c 	kvm_x86_ops->set_efer(vcpu, efer);
efer             1481 arch/x86/kvm/x86.c 	if ((efer ^ old_efer) & EFER_NX)
efer             3087 arch/x86/kvm/x86.c 		msr_info->data = vcpu->arch.efer;
efer             7829 arch/x86/kvm/x86.c 	put_smstate(u64, buf, 0x7ed0, vcpu->arch.efer);
efer             8748 arch/x86/kvm/x86.c 	sregs->efer = vcpu->arch.efer;
efer             8843 arch/x86/kvm/x86.c 	if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) {
efer             8850 arch/x86/kvm/x86.c 		    || !(sregs->efer & EFER_LMA))
efer             8857 arch/x86/kvm/x86.c 		if (sregs->efer & EFER_LMA || sregs->cs.l)
efer             8895 arch/x86/kvm/x86.c 	mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
efer             8896 arch/x86/kvm/x86.c 	kvm_x86_ops->set_efer(vcpu, sregs->efer);
efer               87 arch/x86/kvm/x86.h 	return vcpu->arch.efer & EFER_LMA;
efer              106 arch/x86/kvm/x86.h 	return (vcpu->arch.efer & EFER_LMA) &&
efer              116 arch/x86/power/cpu.c 	rdmsrl(MSR_EFER, ctxt->efer);
efer              207 arch/x86/power/cpu.c 	wrmsrl(MSR_EFER, ctxt->efer);
efer               50 arch/x86/realmode/init.c 	u64 efer;
efer               97 arch/x86/realmode/init.c 	rdmsrl(MSR_EFER, efer);
efer               98 arch/x86/realmode/init.c 	trampoline_header->efer = efer & ~EFER_LMA;
efer             1110 drivers/parport/parport_pc.c static void decode_winbond(int efer, int key, int devid, int devrev, int oldid)
efer             1156 drivers/parport/parport_pc.c 		       efer, key, devid, devrev, oldid, type);
efer             1159 drivers/parport/parport_pc.c 		show_parconfig_winbond(efer, key);
efer             1162 drivers/parport/parport_pc.c static void decode_smsc(int efer, int key, int devid, int devrev)
efer             1189 drivers/parport/parport_pc.c 		       efer, key, devid, devrev, type);
efer             1192 drivers/parport/parport_pc.c 		func(efer, key);
efer               52 include/xen/interface/hvm/hvm_vcpu.h     uint64_t efer;
efer              106 include/xen/interface/hvm/hvm_vcpu.h     uint64_t efer;
efer              155 tools/arch/x86/include/uapi/asm/kvm.h 	__u64 efer;
efer              220 tools/testing/selftests/kvm/lib/x86_64/processor.c 		sregs->cr8, sregs->efer, sregs->apic_base);
efer              627 tools/testing/selftests/kvm/lib/x86_64/processor.c 		sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX);