get_cpl 1040 arch/x86/include/asm/kvm_host.h int (*get_cpl)(struct kvm_vcpu *vcpu); get_cpl 1608 arch/x86/kvm/hyperv.c if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) { get_cpl 160 arch/x86/kvm/mmu.h int cpl = kvm_x86_ops->get_cpl(vcpu); get_cpl 7270 arch/x86/kvm/svm.c .get_cpl = svm_get_cpl, get_cpl 7815 arch/x86/kvm/vmx/vmx.c .get_cpl = vmx_get_cpl, get_cpl 641 arch/x86/kvm/x86.c if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl) get_cpl 904 arch/x86/kvm/x86.c if (kvm_x86_ops->get_cpl(vcpu) != 0 || get_cpl 3571 arch/x86/kvm/x86.c vcpu->arch.preempted_in_kernel = !kvm_x86_ops->get_cpl(vcpu); get_cpl 5359 arch/x86/kvm/x86.c u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; get_cpl 5366 arch/x86/kvm/x86.c u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; get_cpl 5374 arch/x86/kvm/x86.c u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; get_cpl 5423 arch/x86/kvm/x86.c u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; get_cpl 5448 arch/x86/kvm/x86.c u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; get_cpl 5469 arch/x86/kvm/x86.c if (!system && kvm_x86_ops->get_cpl(vcpu) == 3) get_cpl 5522 arch/x86/kvm/x86.c if (!system && kvm_x86_ops->get_cpl(vcpu) == 3) get_cpl 5584 arch/x86/kvm/x86.c u32 access = ((kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0) get_cpl 6090 arch/x86/kvm/x86.c return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt)); get_cpl 6424 arch/x86/kvm/x86.c if (!is_guest_mode(vcpu) && kvm_x86_ops->get_cpl(vcpu) == 0) { get_cpl 7166 arch/x86/kvm/x86.c user_mode = kvm_x86_ops->get_cpl(__this_cpu_read(current_vcpu)); get_cpl 7475 arch/x86/kvm/x86.c if (kvm_x86_ops->get_cpl(vcpu) != 0) { get_cpl 10164 arch/x86/kvm/x86.c kvm_x86_ops->get_cpl(vcpu) == 0))