svm 50 arch/arm/mm/ioremap.c struct static_vm *svm; svm 53 arch/arm/mm/ioremap.c list_for_each_entry(svm, &static_vmlist, list) { svm 54 arch/arm/mm/ioremap.c vm = &svm->vm; svm 64 arch/arm/mm/ioremap.c return svm; svm 72 arch/arm/mm/ioremap.c struct static_vm *svm; svm 75 arch/arm/mm/ioremap.c list_for_each_entry(svm, &static_vmlist, list) { svm 76 arch/arm/mm/ioremap.c vm = &svm->vm; svm 83 arch/arm/mm/ioremap.c return svm; svm 89 arch/arm/mm/ioremap.c void __init add_static_vm_early(struct static_vm *svm) svm 95 arch/arm/mm/ioremap.c vm = &svm->vm; svm 105 arch/arm/mm/ioremap.c list_add_tail(&svm->list, &curr_svm->list); svm 290 arch/arm/mm/ioremap.c struct static_vm *svm; svm 292 arch/arm/mm/ioremap.c svm = find_static_vm_paddr(paddr, size, mtype); svm 293 arch/arm/mm/ioremap.c if (svm) { svm 294 arch/arm/mm/ioremap.c addr = (unsigned long)svm->vm.addr; svm 295 arch/arm/mm/ioremap.c addr += paddr - svm->vm.phys_addr; svm 433 arch/arm/mm/ioremap.c struct static_vm *svm; svm 436 arch/arm/mm/ioremap.c svm = find_static_vm_vaddr(addr); svm 437 arch/arm/mm/ioremap.c if (svm) svm 81 arch/arm/mm/mm.h extern __init void add_static_vm_early(struct static_vm *svm); svm 994 arch/arm/mm/mmu.c struct static_vm *svm; svm 999 arch/arm/mm/mmu.c svm = memblock_alloc(sizeof(*svm) * nr, __alignof__(*svm)); svm 1000 arch/arm/mm/mmu.c if (!svm) svm 1002 arch/arm/mm/mmu.c __func__, sizeof(*svm) * nr, __alignof__(*svm)); svm 1007 arch/arm/mm/mmu.c vm = &svm->vm; svm 1014 arch/arm/mm/mmu.c add_static_vm_early(svm++); svm 1022 arch/arm/mm/mmu.c struct static_vm *svm; svm 1024 arch/arm/mm/mmu.c svm = memblock_alloc(sizeof(*svm), __alignof__(*svm)); svm 1025 arch/arm/mm/mmu.c if (!svm) svm 1027 arch/arm/mm/mmu.c __func__, sizeof(*svm), __alignof__(*svm)); svm 1029 arch/arm/mm/mmu.c vm = &svm->vm; svm 1034 arch/arm/mm/mmu.c add_static_vm_early(svm); svm 1059 arch/arm/mm/mmu.c struct static_vm *svm; svm 1064 arch/arm/mm/mmu.c list_for_each_entry(svm, &static_vmlist, list) { svm 1065 arch/arm/mm/mmu.c vm = &svm->vm; svm 1105 arch/arm/mm/mmu.c struct static_vm *svm; svm 1107 arch/arm/mm/mmu.c svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE); svm 1108 arch/arm/mm/mmu.c if (svm) svm 724 arch/powerpc/kernel/sysfs.c static DEVICE_ATTR(svm, 0444, show_svm, NULL); svm 388 arch/x86/kvm/svm.c static void svm_complete_interrupts(struct vcpu_svm *svm); svm 390 arch/x86/kvm/svm.c static int nested_svm_exit_handled(struct vcpu_svm *svm); svm 391 arch/x86/kvm/svm.c static int nested_svm_intercept(struct vcpu_svm *svm); svm 392 arch/x86/kvm/svm.c static int nested_svm_vmexit(struct vcpu_svm *svm); svm 393 arch/x86/kvm/svm.c static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, svm 484 arch/x86/kvm/svm.c static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data) svm 486 arch/x86/kvm/svm.c svm->vmcb->control.avic_vapic_bar = data & VMCB_AVIC_APIC_BAR_MASK; svm 487 arch/x86/kvm/svm.c mark_dirty(svm->vmcb, VMCB_AVIC); svm 492 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 493 arch/x86/kvm/svm.c u64 *entry = svm->avic_physical_id_cache; svm 501 arch/x86/kvm/svm.c static void recalc_intercepts(struct vcpu_svm *svm) svm 506 arch/x86/kvm/svm.c mark_dirty(svm->vmcb, VMCB_INTERCEPTS); svm 508 arch/x86/kvm/svm.c if (!is_guest_mode(&svm->vcpu)) svm 511 arch/x86/kvm/svm.c c = &svm->vmcb->control; svm 512 arch/x86/kvm/svm.c h = &svm->nested.hsave->control; svm 513 arch/x86/kvm/svm.c g = &svm->nested; svm 521 arch/x86/kvm/svm.c static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm) svm 523 arch/x86/kvm/svm.c if (is_guest_mode(&svm->vcpu)) svm 524 arch/x86/kvm/svm.c return svm->nested.hsave; svm 526 arch/x86/kvm/svm.c return svm->vmcb; svm 529 arch/x86/kvm/svm.c static inline void set_cr_intercept(struct vcpu_svm *svm, int bit) svm 531 arch/x86/kvm/svm.c struct vmcb *vmcb = get_host_vmcb(svm); svm 535 arch/x86/kvm/svm.c recalc_intercepts(svm); svm 538 arch/x86/kvm/svm.c static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit) svm 540 arch/x86/kvm/svm.c struct vmcb *vmcb = get_host_vmcb(svm); svm 544 arch/x86/kvm/svm.c recalc_intercepts(svm); svm 547 arch/x86/kvm/svm.c static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit) svm 549 arch/x86/kvm/svm.c struct vmcb *vmcb = get_host_vmcb(svm); svm 554 arch/x86/kvm/svm.c static inline void set_dr_intercepts(struct vcpu_svm *svm) svm 556 arch/x86/kvm/svm.c struct vmcb *vmcb = get_host_vmcb(svm); svm 575 arch/x86/kvm/svm.c recalc_intercepts(svm); svm 578 arch/x86/kvm/svm.c static inline void clr_dr_intercepts(struct vcpu_svm *svm) svm 580 arch/x86/kvm/svm.c struct vmcb *vmcb = get_host_vmcb(svm); svm 584 arch/x86/kvm/svm.c recalc_intercepts(svm); svm 587 arch/x86/kvm/svm.c static inline void set_exception_intercept(struct vcpu_svm *svm, int bit) svm 589 arch/x86/kvm/svm.c struct vmcb *vmcb = get_host_vmcb(svm); svm 593 arch/x86/kvm/svm.c recalc_intercepts(svm); svm 596 arch/x86/kvm/svm.c static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit) svm 598 arch/x86/kvm/svm.c struct vmcb *vmcb = get_host_vmcb(svm); svm 602 arch/x86/kvm/svm.c recalc_intercepts(svm); svm 605 arch/x86/kvm/svm.c static inline void set_intercept(struct vcpu_svm *svm, int bit) svm 607 arch/x86/kvm/svm.c struct vmcb *vmcb = get_host_vmcb(svm); svm 611 arch/x86/kvm/svm.c recalc_intercepts(svm); svm 614 arch/x86/kvm/svm.c static inline void clr_intercept(struct vcpu_svm *svm, int bit) svm 616 arch/x86/kvm/svm.c struct vmcb *vmcb = get_host_vmcb(svm); svm 620 arch/x86/kvm/svm.c recalc_intercepts(svm); svm 623 arch/x86/kvm/svm.c static inline bool vgif_enabled(struct vcpu_svm *svm) svm 625 arch/x86/kvm/svm.c return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK); svm 628 arch/x86/kvm/svm.c static inline void enable_gif(struct vcpu_svm *svm) svm 630 arch/x86/kvm/svm.c if (vgif_enabled(svm)) svm 631 arch/x86/kvm/svm.c svm->vmcb->control.int_ctl |= V_GIF_MASK; svm 633 arch/x86/kvm/svm.c svm->vcpu.arch.hflags |= HF_GIF_MASK; svm 636 arch/x86/kvm/svm.c static inline void disable_gif(struct vcpu_svm *svm) svm 638 arch/x86/kvm/svm.c if (vgif_enabled(svm)) svm 639 arch/x86/kvm/svm.c svm->vmcb->control.int_ctl &= ~V_GIF_MASK; svm 641 arch/x86/kvm/svm.c svm->vcpu.arch.hflags &= ~HF_GIF_MASK; svm 644 arch/x86/kvm/svm.c static inline bool gif_set(struct vcpu_svm *svm) svm 646 arch/x86/kvm/svm.c if (vgif_enabled(svm)) svm 647 arch/x86/kvm/svm.c return !!(svm->vmcb->control.int_ctl & V_GIF_MASK); svm 649 arch/x86/kvm/svm.c return !!(svm->vcpu.arch.hflags & HF_GIF_MASK); svm 758 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 761 arch/x86/kvm/svm.c if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) svm 768 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 771 arch/x86/kvm/svm.c svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; svm 773 arch/x86/kvm/svm.c svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK; svm 779 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 781 arch/x86/kvm/svm.c if (nrips && svm->vmcb->control.next_rip != 0) { svm 783 arch/x86/kvm/svm.c svm->next_rip = svm->vmcb->control.next_rip; svm 786 arch/x86/kvm/svm.c if (!svm->next_rip) { svm 790 arch/x86/kvm/svm.c if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE) svm 792 arch/x86/kvm/svm.c __func__, kvm_rip_read(vcpu), svm->next_rip); svm 793 arch/x86/kvm/svm.c kvm_rip_write(vcpu, svm->next_rip); svm 802 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 813 arch/x86/kvm/svm.c nested_svm_check_exception(svm, nr, has_error_code, error_code)) svm 816 arch/x86/kvm/svm.c kvm_deliver_exception_payload(&svm->vcpu); svm 819 arch/x86/kvm/svm.c unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu); svm 828 arch/x86/kvm/svm.c (void)skip_emulated_instruction(&svm->vcpu); svm 829 arch/x86/kvm/svm.c rip = kvm_rip_read(&svm->vcpu); svm 830 arch/x86/kvm/svm.c svm->int3_rip = rip + svm->vmcb->save.cs.base; svm 831 arch/x86/kvm/svm.c svm->int3_injected = rip - old_rip; svm 834 arch/x86/kvm/svm.c svm->vmcb->control.event_inj = nr svm 838 arch/x86/kvm/svm.c svm->vmcb->control.event_inj_err = error_code; svm 1143 arch/x86/kvm/svm.c static void svm_enable_lbrv(struct vcpu_svm *svm) svm 1145 arch/x86/kvm/svm.c u32 *msrpm = svm->msrpm; svm 1147 arch/x86/kvm/svm.c svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK; svm 1154 arch/x86/kvm/svm.c static void svm_disable_lbrv(struct vcpu_svm *svm) svm 1156 arch/x86/kvm/svm.c u32 *msrpm = svm->msrpm; svm 1158 arch/x86/kvm/svm.c svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK; svm 1165 arch/x86/kvm/svm.c static void disable_nmi_singlestep(struct vcpu_svm *svm) svm 1167 arch/x86/kvm/svm.c svm->nmi_singlestep = false; svm 1169 arch/x86/kvm/svm.c if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) { svm 1171 arch/x86/kvm/svm.c if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF)) svm 1172 arch/x86/kvm/svm.c svm->vmcb->save.rflags &= ~X86_EFLAGS_TF; svm 1173 arch/x86/kvm/svm.c if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF)) svm 1174 arch/x86/kvm/svm.c svm->vmcb->save.rflags &= ~X86_EFLAGS_RF; svm 1266 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 1267 arch/x86/kvm/svm.c struct vmcb_control_area *control = &svm->vmcb->control; svm 1276 arch/x86/kvm/svm.c mark_dirty(svm->vmcb, VMCB_INTERCEPTS); svm 1284 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 1285 arch/x86/kvm/svm.c struct vmcb_control_area *control = &svm->vmcb->control; svm 1294 arch/x86/kvm/svm.c mark_dirty(svm->vmcb, VMCB_INTERCEPTS); svm 1492 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 1495 arch/x86/kvm/svm.c return svm->nested.hsave->control.tsc_offset; svm 1502 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 1507 arch/x86/kvm/svm.c g_tsc_offset = svm->vmcb->control.tsc_offset - svm 1508 arch/x86/kvm/svm.c svm->nested.hsave->control.tsc_offset; svm 1509 arch/x86/kvm/svm.c svm->nested.hsave->control.tsc_offset = offset; svm 1513 arch/x86/kvm/svm.c svm->vmcb->control.tsc_offset - g_tsc_offset, svm 1516 arch/x86/kvm/svm.c svm->vmcb->control.tsc_offset = offset + g_tsc_offset; svm 1518 arch/x86/kvm/svm.c mark_dirty(svm->vmcb, VMCB_INTERCEPTS); svm 1519 arch/x86/kvm/svm.c return svm->vmcb->control.tsc_offset; svm 1522 arch/x86/kvm/svm.c static void avic_init_vmcb(struct vcpu_svm *svm) svm 1524 arch/x86/kvm/svm.c struct vmcb *vmcb = svm->vmcb; svm 1525 arch/x86/kvm/svm.c struct kvm_svm *kvm_svm = to_kvm_svm(svm->vcpu.kvm); svm 1526 arch/x86/kvm/svm.c phys_addr_t bpa = __sme_set(page_to_phys(svm->avic_backing_page)); svm 1537 arch/x86/kvm/svm.c static void init_vmcb(struct vcpu_svm *svm) svm 1539 arch/x86/kvm/svm.c struct vmcb_control_area *control = &svm->vmcb->control; svm 1540 arch/x86/kvm/svm.c struct vmcb_save_area *save = &svm->vmcb->save; svm 1542 arch/x86/kvm/svm.c svm->vcpu.arch.hflags = 0; svm 1544 arch/x86/kvm/svm.c set_cr_intercept(svm, INTERCEPT_CR0_READ); svm 1545 arch/x86/kvm/svm.c set_cr_intercept(svm, INTERCEPT_CR3_READ); svm 1546 arch/x86/kvm/svm.c set_cr_intercept(svm, INTERCEPT_CR4_READ); svm 1547 arch/x86/kvm/svm.c set_cr_intercept(svm, INTERCEPT_CR0_WRITE); svm 1548 arch/x86/kvm/svm.c set_cr_intercept(svm, INTERCEPT_CR3_WRITE); svm 1549 arch/x86/kvm/svm.c set_cr_intercept(svm, INTERCEPT_CR4_WRITE); svm 1550 arch/x86/kvm/svm.c if (!kvm_vcpu_apicv_active(&svm->vcpu)) svm 1551 arch/x86/kvm/svm.c set_cr_intercept(svm, INTERCEPT_CR8_WRITE); svm 1553 arch/x86/kvm/svm.c set_dr_intercepts(svm); svm 1555 arch/x86/kvm/svm.c set_exception_intercept(svm, PF_VECTOR); svm 1556 arch/x86/kvm/svm.c set_exception_intercept(svm, UD_VECTOR); svm 1557 arch/x86/kvm/svm.c set_exception_intercept(svm, MC_VECTOR); svm 1558 arch/x86/kvm/svm.c set_exception_intercept(svm, AC_VECTOR); svm 1559 arch/x86/kvm/svm.c set_exception_intercept(svm, DB_VECTOR); svm 1567 arch/x86/kvm/svm.c set_exception_intercept(svm, GP_VECTOR); svm 1569 arch/x86/kvm/svm.c set_intercept(svm, INTERCEPT_INTR); svm 1570 arch/x86/kvm/svm.c set_intercept(svm, INTERCEPT_NMI); svm 1571 arch/x86/kvm/svm.c set_intercept(svm, INTERCEPT_SMI); svm 1572 arch/x86/kvm/svm.c set_intercept(svm, INTERCEPT_SELECTIVE_CR0); svm 1573 arch/x86/kvm/svm.c set_intercept(svm, INTERCEPT_RDPMC); svm 1574 arch/x86/kvm/svm.c set_intercept(svm, INTERCEPT_CPUID); svm 1575 arch/x86/kvm/svm.c set_intercept(svm, INTERCEPT_INVD); svm 1576 arch/x86/kvm/svm.c set_intercept(svm, INTERCEPT_INVLPG); svm 1577 arch/x86/kvm/svm.c set_intercept(svm, INTERCEPT_INVLPGA); svm 1578 arch/x86/kvm/svm.c set_intercept(svm, INTERCEPT_IOIO_PROT); svm 1579 arch/x86/kvm/svm.c set_intercept(svm, INTERCEPT_MSR_PROT); svm 1580 arch/x86/kvm/svm.c set_intercept(svm, INTERCEPT_TASK_SWITCH); svm 1581 arch/x86/kvm/svm.c set_intercept(svm, INTERCEPT_SHUTDOWN); svm 1582 arch/x86/kvm/svm.c set_intercept(svm, INTERCEPT_VMRUN); svm 1583 arch/x86/kvm/svm.c set_intercept(svm, INTERCEPT_VMMCALL); svm 1584 arch/x86/kvm/svm.c set_intercept(svm, INTERCEPT_VMLOAD); svm 1585 arch/x86/kvm/svm.c set_intercept(svm, INTERCEPT_VMSAVE); svm 1586 arch/x86/kvm/svm.c set_intercept(svm, INTERCEPT_STGI); svm 1587 arch/x86/kvm/svm.c set_intercept(svm, INTERCEPT_CLGI); svm 1588 arch/x86/kvm/svm.c set_intercept(svm, INTERCEPT_SKINIT); svm 1589 arch/x86/kvm/svm.c set_intercept(svm, INTERCEPT_WBINVD); svm 1590 arch/x86/kvm/svm.c set_intercept(svm, INTERCEPT_XSETBV); svm 1591 arch/x86/kvm/svm.c set_intercept(svm, INTERCEPT_RDPRU); svm 1592 arch/x86/kvm/svm.c set_intercept(svm, INTERCEPT_RSM); svm 1594 arch/x86/kvm/svm.c if (!kvm_mwait_in_guest(svm->vcpu.kvm)) { svm 1595 arch/x86/kvm/svm.c set_intercept(svm, INTERCEPT_MONITOR); svm 1596 arch/x86/kvm/svm.c set_intercept(svm, INTERCEPT_MWAIT); svm 1599 arch/x86/kvm/svm.c if (!kvm_hlt_in_guest(svm->vcpu.kvm)) svm 1600 arch/x86/kvm/svm.c set_intercept(svm, INTERCEPT_HLT); svm 1603 arch/x86/kvm/svm.c control->msrpm_base_pa = __sme_set(__pa(svm->msrpm)); svm 1625 arch/x86/kvm/svm.c svm_set_efer(&svm->vcpu, 0); svm 1627 arch/x86/kvm/svm.c kvm_set_rflags(&svm->vcpu, 2); svm 1629 arch/x86/kvm/svm.c svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip; svm 1635 arch/x86/kvm/svm.c svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET); svm 1636 arch/x86/kvm/svm.c kvm_mmu_reset_context(&svm->vcpu); svm 1644 arch/x86/kvm/svm.c clr_intercept(svm, INTERCEPT_INVLPG); svm 1645 arch/x86/kvm/svm.c clr_exception_intercept(svm, PF_VECTOR); svm 1646 arch/x86/kvm/svm.c clr_cr_intercept(svm, INTERCEPT_CR3_READ); svm 1647 arch/x86/kvm/svm.c clr_cr_intercept(svm, INTERCEPT_CR3_WRITE); svm 1648 arch/x86/kvm/svm.c save->g_pat = svm->vcpu.arch.pat; svm 1652 arch/x86/kvm/svm.c svm->asid_generation = 0; svm 1654 arch/x86/kvm/svm.c svm->nested.vmcb = 0; svm 1655 arch/x86/kvm/svm.c svm->vcpu.arch.hflags = 0; svm 1661 arch/x86/kvm/svm.c set_intercept(svm, INTERCEPT_PAUSE); svm 1663 arch/x86/kvm/svm.c clr_intercept(svm, INTERCEPT_PAUSE); svm 1666 arch/x86/kvm/svm.c if (kvm_vcpu_apicv_active(&svm->vcpu)) svm 1667 arch/x86/kvm/svm.c avic_init_vmcb(svm); svm 1674 arch/x86/kvm/svm.c clr_intercept(svm, INTERCEPT_VMLOAD); svm 1675 arch/x86/kvm/svm.c clr_intercept(svm, INTERCEPT_VMSAVE); svm 1676 arch/x86/kvm/svm.c svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; svm 1680 arch/x86/kvm/svm.c clr_intercept(svm, INTERCEPT_STGI); svm 1681 arch/x86/kvm/svm.c clr_intercept(svm, INTERCEPT_CLGI); svm 1682 arch/x86/kvm/svm.c svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK; svm 1685 arch/x86/kvm/svm.c if (sev_guest(svm->vcpu.kvm)) { svm 1686 arch/x86/kvm/svm.c svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE; svm 1687 arch/x86/kvm/svm.c clr_exception_intercept(svm, UD_VECTOR); svm 1690 arch/x86/kvm/svm.c mark_all_dirty(svm->vmcb); svm 1692 arch/x86/kvm/svm.c enable_gif(svm); svm 1745 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 1754 arch/x86/kvm/svm.c if (!svm->vcpu.arch.apic->regs) svm 1757 arch/x86/kvm/svm.c svm->avic_backing_page = virt_to_page(svm->vcpu.arch.apic->regs); svm 1764 arch/x86/kvm/svm.c new_entry = __sme_set((page_to_phys(svm->avic_backing_page) & svm 1769 arch/x86/kvm/svm.c svm->avic_physical_id_cache = entry; svm 2052 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 2061 arch/x86/kvm/svm.c spin_lock_irqsave(&svm->ir_list_lock, flags); svm 2063 arch/x86/kvm/svm.c if (list_empty(&svm->ir_list)) svm 2066 arch/x86/kvm/svm.c list_for_each_entry(ir, &svm->ir_list, node) { svm 2072 arch/x86/kvm/svm.c spin_unlock_irqrestore(&svm->ir_list_lock, flags); svm 2081 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 2093 arch/x86/kvm/svm.c entry = READ_ONCE(*(svm->avic_physical_id_cache)); svm 2100 arch/x86/kvm/svm.c if (svm->avic_is_running) svm 2103 arch/x86/kvm/svm.c WRITE_ONCE(*(svm->avic_physical_id_cache), entry); svm 2105 arch/x86/kvm/svm.c svm->avic_is_running); svm 2111 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 2116 arch/x86/kvm/svm.c entry = READ_ONCE(*(svm->avic_physical_id_cache)); svm 2121 arch/x86/kvm/svm.c WRITE_ONCE(*(svm->avic_physical_id_cache), entry); svm 2129 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 2131 arch/x86/kvm/svm.c svm->avic_is_running = is_run; svm 2140 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 2145 arch/x86/kvm/svm.c svm->spec_ctrl = 0; svm 2146 arch/x86/kvm/svm.c svm->virt_spec_ctrl = 0; svm 2149 arch/x86/kvm/svm.c svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE | svm 2151 arch/x86/kvm/svm.c if (kvm_vcpu_is_reset_bsp(&svm->vcpu)) svm 2152 arch/x86/kvm/svm.c svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP; svm 2154 arch/x86/kvm/svm.c init_vmcb(svm); svm 2160 arch/x86/kvm/svm.c avic_update_vapic_bar(svm, APIC_DEFAULT_PHYS_BASE); svm 2163 arch/x86/kvm/svm.c static int avic_init_vcpu(struct vcpu_svm *svm) svm 2167 arch/x86/kvm/svm.c if (!kvm_vcpu_apicv_active(&svm->vcpu)) svm 2170 arch/x86/kvm/svm.c ret = avic_init_backing_page(&svm->vcpu); svm 2174 arch/x86/kvm/svm.c INIT_LIST_HEAD(&svm->ir_list); svm 2175 arch/x86/kvm/svm.c spin_lock_init(&svm->ir_list_lock); svm 2176 arch/x86/kvm/svm.c svm->dfr_reg = APIC_DFR_FLAT; svm 2183 arch/x86/kvm/svm.c struct vcpu_svm *svm; svm 2193 arch/x86/kvm/svm.c svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT); svm 2194 arch/x86/kvm/svm.c if (!svm) { svm 2199 arch/x86/kvm/svm.c svm->vcpu.arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache, svm 2201 arch/x86/kvm/svm.c if (!svm->vcpu.arch.user_fpu) { svm 2207 arch/x86/kvm/svm.c svm->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache, svm 2209 arch/x86/kvm/svm.c if (!svm->vcpu.arch.guest_fpu) { svm 2215 arch/x86/kvm/svm.c err = kvm_vcpu_init(&svm->vcpu, kvm, id); svm 2236 arch/x86/kvm/svm.c err = avic_init_vcpu(svm); svm 2243 arch/x86/kvm/svm.c svm->avic_is_running = true; svm 2245 arch/x86/kvm/svm.c svm->nested.hsave = page_address(hsave_page); svm 2247 arch/x86/kvm/svm.c svm->msrpm = page_address(msrpm_pages); svm 2248 arch/x86/kvm/svm.c svm_vcpu_init_msrpm(svm->msrpm); svm 2250 arch/x86/kvm/svm.c svm->nested.msrpm = page_address(nested_msrpm_pages); svm 2251 arch/x86/kvm/svm.c svm_vcpu_init_msrpm(svm->nested.msrpm); svm 2253 arch/x86/kvm/svm.c svm->vmcb = page_address(page); svm 2254 arch/x86/kvm/svm.c clear_page(svm->vmcb); svm 2255 arch/x86/kvm/svm.c svm->vmcb_pa = __sme_set(page_to_pfn(page) << PAGE_SHIFT); svm 2256 arch/x86/kvm/svm.c svm->asid_generation = 0; svm 2257 arch/x86/kvm/svm.c init_vmcb(svm); svm 2259 arch/x86/kvm/svm.c svm_init_osvw(&svm->vcpu); svm 2261 arch/x86/kvm/svm.c return &svm->vcpu; svm 2272 arch/x86/kvm/svm.c kvm_vcpu_uninit(&svm->vcpu); svm 2274 arch/x86/kvm/svm.c kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu); svm 2276 arch/x86/kvm/svm.c kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.user_fpu); svm 2278 arch/x86/kvm/svm.c kmem_cache_free(kvm_vcpu_cache, svm); svm 2293 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 2300 arch/x86/kvm/svm.c svm_clear_current_vmcb(svm->vmcb); svm 2302 arch/x86/kvm/svm.c __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT)); svm 2303 arch/x86/kvm/svm.c __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER); svm 2304 arch/x86/kvm/svm.c __free_page(virt_to_page(svm->nested.hsave)); svm 2305 arch/x86/kvm/svm.c __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER); svm 2307 arch/x86/kvm/svm.c kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.user_fpu); svm 2308 arch/x86/kvm/svm.c kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu); svm 2309 arch/x86/kvm/svm.c kmem_cache_free(kvm_vcpu_cache, svm); svm 2314 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 2319 arch/x86/kvm/svm.c svm->asid_generation = 0; svm 2320 arch/x86/kvm/svm.c mark_all_dirty(svm->vmcb); svm 2326 arch/x86/kvm/svm.c savesegment(fs, svm->host.fs); svm 2327 arch/x86/kvm/svm.c savesegment(gs, svm->host.gs); svm 2328 arch/x86/kvm/svm.c svm->host.ldt = kvm_read_ldt(); svm 2331 arch/x86/kvm/svm.c rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); svm 2342 arch/x86/kvm/svm.c wrmsrl(MSR_TSC_AUX, svm->tsc_aux); svm 2344 arch/x86/kvm/svm.c if (sd->current_vmcb != svm->vmcb) { svm 2345 arch/x86/kvm/svm.c sd->current_vmcb = svm->vmcb; svm 2353 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 2359 arch/x86/kvm/svm.c kvm_load_ldt(svm->host.ldt); svm 2361 arch/x86/kvm/svm.c loadsegment(fs, svm->host.fs); svm 2363 arch/x86/kvm/svm.c load_gs_index(svm->host.gs); svm 2366 arch/x86/kvm/svm.c loadsegment(gs, svm->host.gs); svm 2370 arch/x86/kvm/svm.c wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); svm 2385 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 2386 arch/x86/kvm/svm.c unsigned long rflags = svm->vmcb->save.rflags; svm 2388 arch/x86/kvm/svm.c if (svm->nmi_singlestep) { svm 2390 arch/x86/kvm/svm.c if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF)) svm 2392 arch/x86/kvm/svm.c if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF)) svm 2423 arch/x86/kvm/svm.c static void svm_set_vintr(struct vcpu_svm *svm) svm 2425 arch/x86/kvm/svm.c set_intercept(svm, INTERCEPT_VINTR); svm 2428 arch/x86/kvm/svm.c static void svm_clear_vintr(struct vcpu_svm *svm) svm 2430 arch/x86/kvm/svm.c clr_intercept(svm, INTERCEPT_VINTR); svm 2536 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 2538 arch/x86/kvm/svm.c dt->size = svm->vmcb->save.idtr.limit; svm 2539 arch/x86/kvm/svm.c dt->address = svm->vmcb->save.idtr.base; svm 2544 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 2546 arch/x86/kvm/svm.c svm->vmcb->save.idtr.limit = dt->size; svm 2547 arch/x86/kvm/svm.c svm->vmcb->save.idtr.base = dt->address ; svm 2548 arch/x86/kvm/svm.c mark_dirty(svm->vmcb, VMCB_DT); svm 2553 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 2555 arch/x86/kvm/svm.c dt->size = svm->vmcb->save.gdtr.limit; svm 2556 arch/x86/kvm/svm.c dt->address = svm->vmcb->save.gdtr.base; svm 2561 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 2563 arch/x86/kvm/svm.c svm->vmcb->save.gdtr.limit = dt->size; svm 2564 arch/x86/kvm/svm.c svm->vmcb->save.gdtr.base = dt->address ; svm 2565 arch/x86/kvm/svm.c mark_dirty(svm->vmcb, VMCB_DT); svm 2580 arch/x86/kvm/svm.c static void update_cr0_intercept(struct vcpu_svm *svm) svm 2582 arch/x86/kvm/svm.c ulong gcr0 = svm->vcpu.arch.cr0; svm 2583 arch/x86/kvm/svm.c u64 *hcr0 = &svm->vmcb->save.cr0; svm 2588 arch/x86/kvm/svm.c mark_dirty(svm->vmcb, VMCB_CR); svm 2591 arch/x86/kvm/svm.c clr_cr_intercept(svm, INTERCEPT_CR0_READ); svm 2592 arch/x86/kvm/svm.c clr_cr_intercept(svm, INTERCEPT_CR0_WRITE); svm 2594 arch/x86/kvm/svm.c set_cr_intercept(svm, INTERCEPT_CR0_READ); svm 2595 arch/x86/kvm/svm.c set_cr_intercept(svm, INTERCEPT_CR0_WRITE); svm 2601 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 2607 arch/x86/kvm/svm.c svm->vmcb->save.efer |= EFER_LMA | EFER_LME; svm 2612 arch/x86/kvm/svm.c svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME); svm 2628 arch/x86/kvm/svm.c svm->vmcb->save.cr0 = cr0; svm 2629 arch/x86/kvm/svm.c mark_dirty(svm->vmcb, VMCB_CR); svm 2630 arch/x86/kvm/svm.c update_cr0_intercept(svm); svm 2656 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 2679 arch/x86/kvm/svm.c svm->vmcb->save.cpl = (var->dpl & 3); svm 2681 arch/x86/kvm/svm.c mark_dirty(svm->vmcb, VMCB_SEG); svm 2686 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 2688 arch/x86/kvm/svm.c clr_exception_intercept(svm, BP_VECTOR); svm 2692 arch/x86/kvm/svm.c set_exception_intercept(svm, BP_VECTOR); svm 2697 arch/x86/kvm/svm.c static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd) svm 2702 arch/x86/kvm/svm.c svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; svm 2705 arch/x86/kvm/svm.c svm->asid_generation = sd->asid_generation; svm 2706 arch/x86/kvm/svm.c svm->vmcb->control.asid = sd->next_asid++; svm 2708 arch/x86/kvm/svm.c mark_dirty(svm->vmcb, VMCB_ASID); svm 2718 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 2720 arch/x86/kvm/svm.c svm->vmcb->save.dr6 = value; svm 2721 arch/x86/kvm/svm.c mark_dirty(svm->vmcb, VMCB_DR); svm 2726 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 2733 arch/x86/kvm/svm.c vcpu->arch.dr7 = svm->vmcb->save.dr7; svm 2736 arch/x86/kvm/svm.c set_dr_intercepts(svm); svm 2741 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 2743 arch/x86/kvm/svm.c svm->vmcb->save.dr7 = value; svm 2744 arch/x86/kvm/svm.c mark_dirty(svm->vmcb, VMCB_DR); svm 2747 arch/x86/kvm/svm.c static int pf_interception(struct vcpu_svm *svm) svm 2749 arch/x86/kvm/svm.c u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2); svm 2750 arch/x86/kvm/svm.c u64 error_code = svm->vmcb->control.exit_info_1; svm 2752 arch/x86/kvm/svm.c return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address, svm 2754 arch/x86/kvm/svm.c svm->vmcb->control.insn_bytes : NULL, svm 2755 arch/x86/kvm/svm.c svm->vmcb->control.insn_len); svm 2758 arch/x86/kvm/svm.c static int npf_interception(struct vcpu_svm *svm) svm 2760 arch/x86/kvm/svm.c u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2); svm 2761 arch/x86/kvm/svm.c u64 error_code = svm->vmcb->control.exit_info_1; svm 2764 arch/x86/kvm/svm.c return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code, svm 2766 arch/x86/kvm/svm.c svm->vmcb->control.insn_bytes : NULL, svm 2767 arch/x86/kvm/svm.c svm->vmcb->control.insn_len); svm 2770 arch/x86/kvm/svm.c static int db_interception(struct vcpu_svm *svm) svm 2772 arch/x86/kvm/svm.c struct kvm_run *kvm_run = svm->vcpu.run; svm 2773 arch/x86/kvm/svm.c struct kvm_vcpu *vcpu = &svm->vcpu; svm 2775 arch/x86/kvm/svm.c if (!(svm->vcpu.guest_debug & svm 2777 arch/x86/kvm/svm.c !svm->nmi_singlestep) { svm 2778 arch/x86/kvm/svm.c kvm_queue_exception(&svm->vcpu, DB_VECTOR); svm 2782 arch/x86/kvm/svm.c if (svm->nmi_singlestep) { svm 2783 arch/x86/kvm/svm.c disable_nmi_singlestep(svm); svm 2788 arch/x86/kvm/svm.c if (svm->vcpu.guest_debug & svm 2792 arch/x86/kvm/svm.c svm->vmcb->save.cs.base + svm->vmcb->save.rip; svm 2800 arch/x86/kvm/svm.c static int bp_interception(struct vcpu_svm *svm) svm 2802 arch/x86/kvm/svm.c struct kvm_run *kvm_run = svm->vcpu.run; svm 2805 arch/x86/kvm/svm.c kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; svm 2810 arch/x86/kvm/svm.c static int ud_interception(struct vcpu_svm *svm) svm 2812 arch/x86/kvm/svm.c return handle_ud(&svm->vcpu); svm 2815 arch/x86/kvm/svm.c static int ac_interception(struct vcpu_svm *svm) svm 2817 arch/x86/kvm/svm.c kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0); svm 2821 arch/x86/kvm/svm.c static int gp_interception(struct vcpu_svm *svm) svm 2823 arch/x86/kvm/svm.c struct kvm_vcpu *vcpu = &svm->vcpu; svm 2824 arch/x86/kvm/svm.c u32 error_code = svm->vmcb->control.exit_info_1; svm 2878 arch/x86/kvm/svm.c static void svm_handle_mce(struct vcpu_svm *svm) svm 2887 arch/x86/kvm/svm.c kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu); svm 2903 arch/x86/kvm/svm.c static int mc_interception(struct vcpu_svm *svm) svm 2908 arch/x86/kvm/svm.c static int shutdown_interception(struct vcpu_svm *svm) svm 2910 arch/x86/kvm/svm.c struct kvm_run *kvm_run = svm->vcpu.run; svm 2916 arch/x86/kvm/svm.c clear_page(svm->vmcb); svm 2917 arch/x86/kvm/svm.c init_vmcb(svm); svm 2923 arch/x86/kvm/svm.c static int io_interception(struct vcpu_svm *svm) svm 2925 arch/x86/kvm/svm.c struct kvm_vcpu *vcpu = &svm->vcpu; svm 2926 arch/x86/kvm/svm.c u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */ svm 2930 arch/x86/kvm/svm.c ++svm->vcpu.stat.io_exits; svm 2938 arch/x86/kvm/svm.c svm->next_rip = svm->vmcb->control.exit_info_2; svm 2940 arch/x86/kvm/svm.c return kvm_fast_pio(&svm->vcpu, size, port, in); svm 2943 arch/x86/kvm/svm.c static int nmi_interception(struct vcpu_svm *svm) svm 2948 arch/x86/kvm/svm.c static int intr_interception(struct vcpu_svm *svm) svm 2950 arch/x86/kvm/svm.c ++svm->vcpu.stat.irq_exits; svm 2954 arch/x86/kvm/svm.c static int nop_on_interception(struct vcpu_svm *svm) svm 2959 arch/x86/kvm/svm.c static int halt_interception(struct vcpu_svm *svm) svm 2961 arch/x86/kvm/svm.c return kvm_emulate_halt(&svm->vcpu); svm 2964 arch/x86/kvm/svm.c static int vmmcall_interception(struct vcpu_svm *svm) svm 2966 arch/x86/kvm/svm.c return kvm_emulate_hypercall(&svm->vcpu); svm 2971 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 2973 arch/x86/kvm/svm.c return svm->nested.nested_cr3; svm 2978 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 2979 arch/x86/kvm/svm.c u64 cr3 = svm->nested.nested_cr3; svm 2993 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 2995 arch/x86/kvm/svm.c svm->vmcb->control.nested_cr3 = __sme_set(root); svm 2996 arch/x86/kvm/svm.c mark_dirty(svm->vmcb, VMCB_NPT); svm 3002 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 3004 arch/x86/kvm/svm.c if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) { svm 3009 arch/x86/kvm/svm.c svm->vmcb->control.exit_code = SVM_EXIT_NPF; svm 3010 arch/x86/kvm/svm.c svm->vmcb->control.exit_code_hi = 0; svm 3011 arch/x86/kvm/svm.c svm->vmcb->control.exit_info_1 = (1ULL << 32); svm 3012 arch/x86/kvm/svm.c svm->vmcb->control.exit_info_2 = fault->address; svm 3015 arch/x86/kvm/svm.c svm->vmcb->control.exit_info_1 &= ~0xffffffffULL; svm 3016 arch/x86/kvm/svm.c svm->vmcb->control.exit_info_1 |= fault->error_code; svm 3022 arch/x86/kvm/svm.c if (svm->vmcb->control.exit_info_1 & (2ULL << 32)) svm 3023 arch/x86/kvm/svm.c svm->vmcb->control.exit_info_1 &= ~1; svm 3025 arch/x86/kvm/svm.c nested_svm_vmexit(svm); svm 3049 arch/x86/kvm/svm.c static int nested_svm_check_permissions(struct vcpu_svm *svm) svm 3051 arch/x86/kvm/svm.c if (!(svm->vcpu.arch.efer & EFER_SVME) || svm 3052 arch/x86/kvm/svm.c !is_paging(&svm->vcpu)) { svm 3053 arch/x86/kvm/svm.c kvm_queue_exception(&svm->vcpu, UD_VECTOR); svm 3057 arch/x86/kvm/svm.c if (svm->vmcb->save.cpl) { svm 3058 arch/x86/kvm/svm.c kvm_inject_gp(&svm->vcpu, 0); svm 3065 arch/x86/kvm/svm.c static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, svm 3070 arch/x86/kvm/svm.c if (!is_guest_mode(&svm->vcpu)) svm 3073 arch/x86/kvm/svm.c vmexit = nested_svm_intercept(svm); svm 3077 arch/x86/kvm/svm.c svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr; svm 3078 arch/x86/kvm/svm.c svm->vmcb->control.exit_code_hi = 0; svm 3079 arch/x86/kvm/svm.c svm->vmcb->control.exit_info_1 = error_code; svm 3085 arch/x86/kvm/svm.c if (svm->vcpu.arch.exception.nested_apf) svm 3086 arch/x86/kvm/svm.c svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token; svm 3087 arch/x86/kvm/svm.c else if (svm->vcpu.arch.exception.has_payload) svm 3088 arch/x86/kvm/svm.c svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload; svm 3090 arch/x86/kvm/svm.c svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2; svm 3092 arch/x86/kvm/svm.c svm->nested.exit_required = true; svm 3097 arch/x86/kvm/svm.c static inline bool nested_svm_intr(struct vcpu_svm *svm) svm 3099 arch/x86/kvm/svm.c if (!is_guest_mode(&svm->vcpu)) svm 3102 arch/x86/kvm/svm.c if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK)) svm 3105 arch/x86/kvm/svm.c if (!(svm->vcpu.arch.hflags & HF_HIF_MASK)) svm 3113 arch/x86/kvm/svm.c if (svm->nested.exit_required) svm 3116 arch/x86/kvm/svm.c svm->vmcb->control.exit_code = SVM_EXIT_INTR; svm 3117 arch/x86/kvm/svm.c svm->vmcb->control.exit_info_1 = 0; svm 3118 arch/x86/kvm/svm.c svm->vmcb->control.exit_info_2 = 0; svm 3120 arch/x86/kvm/svm.c if (svm->nested.intercept & 1ULL) { svm 3127 arch/x86/kvm/svm.c svm->nested.exit_required = true; svm 3128 arch/x86/kvm/svm.c trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip); svm 3136 arch/x86/kvm/svm.c static inline bool nested_svm_nmi(struct vcpu_svm *svm) svm 3138 arch/x86/kvm/svm.c if (!is_guest_mode(&svm->vcpu)) svm 3141 arch/x86/kvm/svm.c if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI))) svm 3144 arch/x86/kvm/svm.c svm->vmcb->control.exit_code = SVM_EXIT_NMI; svm 3145 arch/x86/kvm/svm.c svm->nested.exit_required = true; svm 3150 arch/x86/kvm/svm.c static int nested_svm_intercept_ioio(struct vcpu_svm *svm) svm 3157 arch/x86/kvm/svm.c if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT))) svm 3160 arch/x86/kvm/svm.c port = svm->vmcb->control.exit_info_1 >> 16; svm 3161 arch/x86/kvm/svm.c size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >> svm 3163 arch/x86/kvm/svm.c gpa = svm->nested.vmcb_iopm + (port / 8); svm 3169 arch/x86/kvm/svm.c if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len)) svm 3175 arch/x86/kvm/svm.c static int nested_svm_exit_handled_msr(struct vcpu_svm *svm) svm 3180 arch/x86/kvm/svm.c if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT))) svm 3183 arch/x86/kvm/svm.c msr = svm->vcpu.arch.regs[VCPU_REGS_RCX]; svm 3185 arch/x86/kvm/svm.c write = svm->vmcb->control.exit_info_1 & 1; svm 3194 arch/x86/kvm/svm.c if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.vmcb_msrpm + offset, &value, 4)) svm 3201 arch/x86/kvm/svm.c static int nested_svm_intercept_db(struct vcpu_svm *svm) svm 3206 arch/x86/kvm/svm.c if (!svm->nmi_singlestep) svm 3210 arch/x86/kvm/svm.c if (kvm_get_dr(&svm->vcpu, 6, &dr6)) svm 3216 arch/x86/kvm/svm.c if (svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF) { svm 3217 arch/x86/kvm/svm.c disable_nmi_singlestep(svm); svm 3225 arch/x86/kvm/svm.c static int nested_svm_exit_special(struct vcpu_svm *svm) svm 3227 arch/x86/kvm/svm.c u32 exit_code = svm->vmcb->control.exit_code; svm 3241 arch/x86/kvm/svm.c if (!npt_enabled || svm->vcpu.arch.apf.host_apf_reason) svm 3254 arch/x86/kvm/svm.c static int nested_svm_intercept(struct vcpu_svm *svm) svm 3256 arch/x86/kvm/svm.c u32 exit_code = svm->vmcb->control.exit_code; svm 3261 arch/x86/kvm/svm.c vmexit = nested_svm_exit_handled_msr(svm); svm 3264 arch/x86/kvm/svm.c vmexit = nested_svm_intercept_ioio(svm); svm 3268 arch/x86/kvm/svm.c if (svm->nested.intercept_cr & bit) svm 3274 arch/x86/kvm/svm.c if (svm->nested.intercept_dr & bit) svm 3280 arch/x86/kvm/svm.c if (svm->nested.intercept_exceptions & excp_bits) { svm 3282 arch/x86/kvm/svm.c vmexit = nested_svm_intercept_db(svm); svm 3288 arch/x86/kvm/svm.c svm->vcpu.arch.exception.nested_apf != 0) svm 3298 arch/x86/kvm/svm.c if (svm->nested.intercept & exit_bits) svm 3306 arch/x86/kvm/svm.c static int nested_svm_exit_handled(struct vcpu_svm *svm) svm 3310 arch/x86/kvm/svm.c vmexit = nested_svm_intercept(svm); svm 3313 arch/x86/kvm/svm.c nested_svm_vmexit(svm); svm 3350 arch/x86/kvm/svm.c static int nested_svm_vmexit(struct vcpu_svm *svm) svm 3354 arch/x86/kvm/svm.c struct vmcb *hsave = svm->nested.hsave; svm 3355 arch/x86/kvm/svm.c struct vmcb *vmcb = svm->vmcb; svm 3365 arch/x86/kvm/svm.c rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb), &map); svm 3368 arch/x86/kvm/svm.c kvm_inject_gp(&svm->vcpu, 0); svm 3375 arch/x86/kvm/svm.c leave_guest_mode(&svm->vcpu); svm 3376 arch/x86/kvm/svm.c svm->nested.vmcb = 0; svm 3379 arch/x86/kvm/svm.c disable_gif(svm); svm 3387 arch/x86/kvm/svm.c nested_vmcb->save.efer = svm->vcpu.arch.efer; svm 3388 arch/x86/kvm/svm.c nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu); svm 3389 arch/x86/kvm/svm.c nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu); svm 3391 arch/x86/kvm/svm.c nested_vmcb->save.cr4 = svm->vcpu.arch.cr4; svm 3392 arch/x86/kvm/svm.c nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu); svm 3410 arch/x86/kvm/svm.c if (svm->nrips_enabled) svm 3433 arch/x86/kvm/svm.c svm->vmcb->control.pause_filter_count; svm 3435 arch/x86/kvm/svm.c svm->vmcb->control.pause_filter_thresh; svm 3438 arch/x86/kvm/svm.c if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK)) svm 3444 arch/x86/kvm/svm.c svm->vcpu.arch.tsc_offset = svm->vmcb->control.tsc_offset; svm 3445 arch/x86/kvm/svm.c kvm_clear_exception_queue(&svm->vcpu); svm 3446 arch/x86/kvm/svm.c kvm_clear_interrupt_queue(&svm->vcpu); svm 3448 arch/x86/kvm/svm.c svm->nested.nested_cr3 = 0; svm 3451 arch/x86/kvm/svm.c svm->vmcb->save.es = hsave->save.es; svm 3452 arch/x86/kvm/svm.c svm->vmcb->save.cs = hsave->save.cs; svm 3453 arch/x86/kvm/svm.c svm->vmcb->save.ss = hsave->save.ss; svm 3454 arch/x86/kvm/svm.c svm->vmcb->save.ds = hsave->save.ds; svm 3455 arch/x86/kvm/svm.c svm->vmcb->save.gdtr = hsave->save.gdtr; svm 3456 arch/x86/kvm/svm.c svm->vmcb->save.idtr = hsave->save.idtr; svm 3457 arch/x86/kvm/svm.c kvm_set_rflags(&svm->vcpu, hsave->save.rflags); svm 3458 arch/x86/kvm/svm.c svm_set_efer(&svm->vcpu, hsave->save.efer); svm 3459 arch/x86/kvm/svm.c svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE); svm 3460 arch/x86/kvm/svm.c svm_set_cr4(&svm->vcpu, hsave->save.cr4); svm 3462 arch/x86/kvm/svm.c svm->vmcb->save.cr3 = hsave->save.cr3; svm 3463 arch/x86/kvm/svm.c svm->vcpu.arch.cr3 = hsave->save.cr3; svm 3465 arch/x86/kvm/svm.c (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3); svm 3467 arch/x86/kvm/svm.c kvm_rax_write(&svm->vcpu, hsave->save.rax); svm 3468 arch/x86/kvm/svm.c kvm_rsp_write(&svm->vcpu, hsave->save.rsp); svm 3469 arch/x86/kvm/svm.c kvm_rip_write(&svm->vcpu, hsave->save.rip); svm 3470 arch/x86/kvm/svm.c svm->vmcb->save.dr7 = 0; svm 3471 arch/x86/kvm/svm.c svm->vmcb->save.cpl = 0; svm 3472 arch/x86/kvm/svm.c svm->vmcb->control.exit_int_info = 0; svm 3474 arch/x86/kvm/svm.c mark_all_dirty(svm->vmcb); svm 3476 arch/x86/kvm/svm.c kvm_vcpu_unmap(&svm->vcpu, &map, true); svm 3478 arch/x86/kvm/svm.c nested_svm_uninit_mmu_context(&svm->vcpu); svm 3479 arch/x86/kvm/svm.c kvm_mmu_reset_context(&svm->vcpu); svm 3480 arch/x86/kvm/svm.c kvm_mmu_load(&svm->vcpu); svm 3486 arch/x86/kvm/svm.c svm->vcpu.arch.nmi_injected = false; svm 3487 arch/x86/kvm/svm.c kvm_clear_exception_queue(&svm->vcpu); svm 3488 arch/x86/kvm/svm.c kvm_clear_interrupt_queue(&svm->vcpu); svm 3493 arch/x86/kvm/svm.c static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) svm 3502 arch/x86/kvm/svm.c if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT))) svm 3513 arch/x86/kvm/svm.c offset = svm->nested.vmcb_msrpm + (p * 4); svm 3515 arch/x86/kvm/svm.c if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4)) svm 3518 arch/x86/kvm/svm.c svm->nested.msrpm[p] = svm->msrpm[p] | value; svm 3521 arch/x86/kvm/svm.c svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm)); svm 3541 arch/x86/kvm/svm.c static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, svm 3544 arch/x86/kvm/svm.c if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF) svm 3545 arch/x86/kvm/svm.c svm->vcpu.arch.hflags |= HF_HIF_MASK; svm 3547 arch/x86/kvm/svm.c svm->vcpu.arch.hflags &= ~HF_HIF_MASK; svm 3550 arch/x86/kvm/svm.c svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3; svm 3551 arch/x86/kvm/svm.c nested_svm_init_mmu_context(&svm->vcpu); svm 3555 arch/x86/kvm/svm.c svm->vmcb->save.es = nested_vmcb->save.es; svm 3556 arch/x86/kvm/svm.c svm->vmcb->save.cs = nested_vmcb->save.cs; svm 3557 arch/x86/kvm/svm.c svm->vmcb->save.ss = nested_vmcb->save.ss; svm 3558 arch/x86/kvm/svm.c svm->vmcb->save.ds = nested_vmcb->save.ds; svm 3559 arch/x86/kvm/svm.c svm->vmcb->save.gdtr = nested_vmcb->save.gdtr; svm 3560 arch/x86/kvm/svm.c svm->vmcb->save.idtr = nested_vmcb->save.idtr; svm 3561 arch/x86/kvm/svm.c kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags); svm 3562 arch/x86/kvm/svm.c svm_set_efer(&svm->vcpu, nested_vmcb->save.efer); svm 3563 arch/x86/kvm/svm.c svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0); svm 3564 arch/x86/kvm/svm.c svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4); svm 3566 arch/x86/kvm/svm.c svm->vmcb->save.cr3 = nested_vmcb->save.cr3; svm 3567 arch/x86/kvm/svm.c svm->vcpu.arch.cr3 = nested_vmcb->save.cr3; svm 3569 arch/x86/kvm/svm.c (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3); svm 3572 arch/x86/kvm/svm.c kvm_mmu_reset_context(&svm->vcpu); svm 3574 arch/x86/kvm/svm.c svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2; svm 3575 arch/x86/kvm/svm.c kvm_rax_write(&svm->vcpu, nested_vmcb->save.rax); svm 3576 arch/x86/kvm/svm.c kvm_rsp_write(&svm->vcpu, nested_vmcb->save.rsp); svm 3577 arch/x86/kvm/svm.c kvm_rip_write(&svm->vcpu, nested_vmcb->save.rip); svm 3580 arch/x86/kvm/svm.c svm->vmcb->save.rax = nested_vmcb->save.rax; svm 3581 arch/x86/kvm/svm.c svm->vmcb->save.rsp = nested_vmcb->save.rsp; svm 3582 arch/x86/kvm/svm.c svm->vmcb->save.rip = nested_vmcb->save.rip; svm 3583 arch/x86/kvm/svm.c svm->vmcb->save.dr7 = nested_vmcb->save.dr7; svm 3584 arch/x86/kvm/svm.c svm->vmcb->save.dr6 = nested_vmcb->save.dr6; svm 3585 arch/x86/kvm/svm.c svm->vmcb->save.cpl = nested_vmcb->save.cpl; svm 3587 arch/x86/kvm/svm.c svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL; svm 3588 arch/x86/kvm/svm.c svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL; svm 3591 arch/x86/kvm/svm.c svm->nested.intercept_cr = nested_vmcb->control.intercept_cr; svm 3592 arch/x86/kvm/svm.c svm->nested.intercept_dr = nested_vmcb->control.intercept_dr; svm 3593 arch/x86/kvm/svm.c svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions; svm 3594 arch/x86/kvm/svm.c svm->nested.intercept = nested_vmcb->control.intercept; svm 3596 arch/x86/kvm/svm.c svm_flush_tlb(&svm->vcpu, true); svm 3597 arch/x86/kvm/svm.c svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK; svm 3599 arch/x86/kvm/svm.c svm->vcpu.arch.hflags |= HF_VINTR_MASK; svm 3601 arch/x86/kvm/svm.c svm->vcpu.arch.hflags &= ~HF_VINTR_MASK; svm 3603 arch/x86/kvm/svm.c if (svm->vcpu.arch.hflags & HF_VINTR_MASK) { svm 3605 arch/x86/kvm/svm.c clr_cr_intercept(svm, INTERCEPT_CR8_READ); svm 3606 arch/x86/kvm/svm.c clr_cr_intercept(svm, INTERCEPT_CR8_WRITE); svm 3610 arch/x86/kvm/svm.c clr_intercept(svm, INTERCEPT_VMMCALL); svm 3612 arch/x86/kvm/svm.c svm->vcpu.arch.tsc_offset += nested_vmcb->control.tsc_offset; svm 3613 arch/x86/kvm/svm.c svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset; svm 3615 arch/x86/kvm/svm.c svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext; svm 3616 arch/x86/kvm/svm.c svm->vmcb->control.int_vector = nested_vmcb->control.int_vector; svm 3617 arch/x86/kvm/svm.c svm->vmcb->control.int_state = nested_vmcb->control.int_state; svm 3618 arch/x86/kvm/svm.c svm->vmcb->control.event_inj = nested_vmcb->control.event_inj; svm 3619 arch/x86/kvm/svm.c svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err; svm 3621 arch/x86/kvm/svm.c svm->vmcb->control.pause_filter_count = svm 3623 arch/x86/kvm/svm.c svm->vmcb->control.pause_filter_thresh = svm 3626 arch/x86/kvm/svm.c kvm_vcpu_unmap(&svm->vcpu, map, true); svm 3629 arch/x86/kvm/svm.c enter_guest_mode(&svm->vcpu); svm 3635 arch/x86/kvm/svm.c recalc_intercepts(svm); svm 3637 arch/x86/kvm/svm.c svm->nested.vmcb = vmcb_gpa; svm 3639 arch/x86/kvm/svm.c enable_gif(svm); svm 3641 arch/x86/kvm/svm.c mark_all_dirty(svm->vmcb); svm 3644 arch/x86/kvm/svm.c static int nested_svm_vmrun(struct vcpu_svm *svm) svm 3648 arch/x86/kvm/svm.c struct vmcb *hsave = svm->nested.hsave; svm 3649 arch/x86/kvm/svm.c struct vmcb *vmcb = svm->vmcb; svm 3653 arch/x86/kvm/svm.c vmcb_gpa = svm->vmcb->save.rax; svm 3655 arch/x86/kvm/svm.c ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb_gpa), &map); svm 3657 arch/x86/kvm/svm.c kvm_inject_gp(&svm->vcpu, 0); svm 3660 arch/x86/kvm/svm.c return kvm_skip_emulated_instruction(&svm->vcpu); svm 3663 arch/x86/kvm/svm.c ret = kvm_skip_emulated_instruction(&svm->vcpu); svm 3673 arch/x86/kvm/svm.c kvm_vcpu_unmap(&svm->vcpu, &map, true); svm 3678 arch/x86/kvm/svm.c trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa, svm 3690 arch/x86/kvm/svm.c kvm_clear_exception_queue(&svm->vcpu); svm 3691 arch/x86/kvm/svm.c kvm_clear_interrupt_queue(&svm->vcpu); svm 3703 arch/x86/kvm/svm.c hsave->save.efer = svm->vcpu.arch.efer; svm 3704 arch/x86/kvm/svm.c hsave->save.cr0 = kvm_read_cr0(&svm->vcpu); svm 3705 arch/x86/kvm/svm.c hsave->save.cr4 = svm->vcpu.arch.cr4; svm 3706 arch/x86/kvm/svm.c hsave->save.rflags = kvm_get_rflags(&svm->vcpu); svm 3707 arch/x86/kvm/svm.c hsave->save.rip = kvm_rip_read(&svm->vcpu); svm 3713 arch/x86/kvm/svm.c hsave->save.cr3 = kvm_read_cr3(&svm->vcpu); svm 3717 arch/x86/kvm/svm.c enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb, &map); svm 3719 arch/x86/kvm/svm.c if (!nested_svm_vmrun_msrpm(svm)) { svm 3720 arch/x86/kvm/svm.c svm->vmcb->control.exit_code = SVM_EXIT_ERR; svm 3721 arch/x86/kvm/svm.c svm->vmcb->control.exit_code_hi = 0; svm 3722 arch/x86/kvm/svm.c svm->vmcb->control.exit_info_1 = 0; svm 3723 arch/x86/kvm/svm.c svm->vmcb->control.exit_info_2 = 0; svm 3725 arch/x86/kvm/svm.c nested_svm_vmexit(svm); svm 3747 arch/x86/kvm/svm.c static int vmload_interception(struct vcpu_svm *svm) svm 3753 arch/x86/kvm/svm.c if (nested_svm_check_permissions(svm)) svm 3756 arch/x86/kvm/svm.c ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map); svm 3759 arch/x86/kvm/svm.c kvm_inject_gp(&svm->vcpu, 0); svm 3765 arch/x86/kvm/svm.c ret = kvm_skip_emulated_instruction(&svm->vcpu); svm 3767 arch/x86/kvm/svm.c nested_svm_vmloadsave(nested_vmcb, svm->vmcb); svm 3768 arch/x86/kvm/svm.c kvm_vcpu_unmap(&svm->vcpu, &map, true); svm 3773 arch/x86/kvm/svm.c static int vmsave_interception(struct vcpu_svm *svm) svm 3779 arch/x86/kvm/svm.c if (nested_svm_check_permissions(svm)) svm 3782 arch/x86/kvm/svm.c ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map); svm 3785 arch/x86/kvm/svm.c kvm_inject_gp(&svm->vcpu, 0); svm 3791 arch/x86/kvm/svm.c ret = kvm_skip_emulated_instruction(&svm->vcpu); svm 3793 arch/x86/kvm/svm.c nested_svm_vmloadsave(svm->vmcb, nested_vmcb); svm 3794 arch/x86/kvm/svm.c kvm_vcpu_unmap(&svm->vcpu, &map, true); svm 3799 arch/x86/kvm/svm.c static int vmrun_interception(struct vcpu_svm *svm) svm 3801 arch/x86/kvm/svm.c if (nested_svm_check_permissions(svm)) svm 3804 arch/x86/kvm/svm.c return nested_svm_vmrun(svm); svm 3807 arch/x86/kvm/svm.c static int stgi_interception(struct vcpu_svm *svm) svm 3811 arch/x86/kvm/svm.c if (nested_svm_check_permissions(svm)) svm 3818 arch/x86/kvm/svm.c if (vgif_enabled(svm)) svm 3819 arch/x86/kvm/svm.c clr_intercept(svm, INTERCEPT_STGI); svm 3821 arch/x86/kvm/svm.c ret = kvm_skip_emulated_instruction(&svm->vcpu); svm 3822 arch/x86/kvm/svm.c kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); svm 3824 arch/x86/kvm/svm.c enable_gif(svm); svm 3829 arch/x86/kvm/svm.c static int clgi_interception(struct vcpu_svm *svm) svm 3833 arch/x86/kvm/svm.c if (nested_svm_check_permissions(svm)) svm 3836 arch/x86/kvm/svm.c ret = kvm_skip_emulated_instruction(&svm->vcpu); svm 3838 arch/x86/kvm/svm.c disable_gif(svm); svm 3841 arch/x86/kvm/svm.c if (!kvm_vcpu_apicv_active(&svm->vcpu)) { svm 3842 arch/x86/kvm/svm.c svm_clear_vintr(svm); svm 3843 arch/x86/kvm/svm.c svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; svm 3844 arch/x86/kvm/svm.c mark_dirty(svm->vmcb, VMCB_INTR); svm 3850 arch/x86/kvm/svm.c static int invlpga_interception(struct vcpu_svm *svm) svm 3852 arch/x86/kvm/svm.c struct kvm_vcpu *vcpu = &svm->vcpu; svm 3854 arch/x86/kvm/svm.c trace_kvm_invlpga(svm->vmcb->save.rip, kvm_rcx_read(&svm->vcpu), svm 3855 arch/x86/kvm/svm.c kvm_rax_read(&svm->vcpu)); svm 3858 arch/x86/kvm/svm.c kvm_mmu_invlpg(vcpu, kvm_rax_read(&svm->vcpu)); svm 3860 arch/x86/kvm/svm.c return kvm_skip_emulated_instruction(&svm->vcpu); svm 3863 arch/x86/kvm/svm.c static int skinit_interception(struct vcpu_svm *svm) svm 3865 arch/x86/kvm/svm.c trace_kvm_skinit(svm->vmcb->save.rip, kvm_rax_read(&svm->vcpu)); svm 3867 arch/x86/kvm/svm.c kvm_queue_exception(&svm->vcpu, UD_VECTOR); svm 3871 arch/x86/kvm/svm.c static int wbinvd_interception(struct vcpu_svm *svm) svm 3873 arch/x86/kvm/svm.c return kvm_emulate_wbinvd(&svm->vcpu); svm 3876 arch/x86/kvm/svm.c static int xsetbv_interception(struct vcpu_svm *svm) svm 3878 arch/x86/kvm/svm.c u64 new_bv = kvm_read_edx_eax(&svm->vcpu); svm 3879 arch/x86/kvm/svm.c u32 index = kvm_rcx_read(&svm->vcpu); svm 3881 arch/x86/kvm/svm.c if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) { svm 3882 arch/x86/kvm/svm.c return kvm_skip_emulated_instruction(&svm->vcpu); svm 3888 arch/x86/kvm/svm.c static int rdpru_interception(struct vcpu_svm *svm) svm 3890 arch/x86/kvm/svm.c kvm_queue_exception(&svm->vcpu, UD_VECTOR); svm 3894 arch/x86/kvm/svm.c static int task_switch_interception(struct vcpu_svm *svm) svm 3898 arch/x86/kvm/svm.c int int_type = svm->vmcb->control.exit_int_info & svm 3900 arch/x86/kvm/svm.c int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK; svm 3902 arch/x86/kvm/svm.c svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK; svm 3904 arch/x86/kvm/svm.c svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID; svm 3908 arch/x86/kvm/svm.c tss_selector = (u16)svm->vmcb->control.exit_info_1; svm 3910 arch/x86/kvm/svm.c if (svm->vmcb->control.exit_info_2 & svm 3913 arch/x86/kvm/svm.c else if (svm->vmcb->control.exit_info_2 & svm 3924 arch/x86/kvm/svm.c svm->vcpu.arch.nmi_injected = false; svm 3927 arch/x86/kvm/svm.c if (svm->vmcb->control.exit_info_2 & svm 3931 arch/x86/kvm/svm.c (u32)svm->vmcb->control.exit_info_2; svm 3933 arch/x86/kvm/svm.c kvm_clear_exception_queue(&svm->vcpu); svm 3936 arch/x86/kvm/svm.c kvm_clear_interrupt_queue(&svm->vcpu); svm 3947 arch/x86/kvm/svm.c if (!skip_emulated_instruction(&svm->vcpu)) svm 3954 arch/x86/kvm/svm.c return kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason, svm 3958 arch/x86/kvm/svm.c static int cpuid_interception(struct vcpu_svm *svm) svm 3960 arch/x86/kvm/svm.c return kvm_emulate_cpuid(&svm->vcpu); svm 3963 arch/x86/kvm/svm.c static int iret_interception(struct vcpu_svm *svm) svm 3965 arch/x86/kvm/svm.c ++svm->vcpu.stat.nmi_window_exits; svm 3966 arch/x86/kvm/svm.c clr_intercept(svm, INTERCEPT_IRET); svm 3967 arch/x86/kvm/svm.c svm->vcpu.arch.hflags |= HF_IRET_MASK; svm 3968 arch/x86/kvm/svm.c svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu); svm 3969 arch/x86/kvm/svm.c kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); svm 3973 arch/x86/kvm/svm.c static int invlpg_interception(struct vcpu_svm *svm) svm 3976 arch/x86/kvm/svm.c return kvm_emulate_instruction(&svm->vcpu, 0); svm 3978 arch/x86/kvm/svm.c kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1); svm 3979 arch/x86/kvm/svm.c return kvm_skip_emulated_instruction(&svm->vcpu); svm 3982 arch/x86/kvm/svm.c static int emulate_on_interception(struct vcpu_svm *svm) svm 3984 arch/x86/kvm/svm.c return kvm_emulate_instruction(&svm->vcpu, 0); svm 3987 arch/x86/kvm/svm.c static int rsm_interception(struct vcpu_svm *svm) svm 3989 arch/x86/kvm/svm.c return kvm_emulate_instruction_from_buffer(&svm->vcpu, rsm_ins_bytes, 2); svm 3992 arch/x86/kvm/svm.c static int rdpmc_interception(struct vcpu_svm *svm) svm 3997 arch/x86/kvm/svm.c return emulate_on_interception(svm); svm 3999 arch/x86/kvm/svm.c err = kvm_rdpmc(&svm->vcpu); svm 4000 arch/x86/kvm/svm.c return kvm_complete_insn_gp(&svm->vcpu, err); svm 4003 arch/x86/kvm/svm.c static bool check_selective_cr0_intercepted(struct vcpu_svm *svm, svm 4006 arch/x86/kvm/svm.c unsigned long cr0 = svm->vcpu.arch.cr0; svm 4010 arch/x86/kvm/svm.c intercept = svm->nested.intercept; svm 4012 arch/x86/kvm/svm.c if (!is_guest_mode(&svm->vcpu) || svm 4020 arch/x86/kvm/svm.c svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE; svm 4021 arch/x86/kvm/svm.c ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE); svm 4029 arch/x86/kvm/svm.c static int cr_interception(struct vcpu_svm *svm) svm 4036 arch/x86/kvm/svm.c return emulate_on_interception(svm); svm 4038 arch/x86/kvm/svm.c if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0)) svm 4039 arch/x86/kvm/svm.c return emulate_on_interception(svm); svm 4041 arch/x86/kvm/svm.c reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; svm 4042 arch/x86/kvm/svm.c if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE) svm 4045 arch/x86/kvm/svm.c cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0; svm 4050 arch/x86/kvm/svm.c val = kvm_register_read(&svm->vcpu, reg); svm 4053 arch/x86/kvm/svm.c if (!check_selective_cr0_intercepted(svm, val)) svm 4054 arch/x86/kvm/svm.c err = kvm_set_cr0(&svm->vcpu, val); svm 4060 arch/x86/kvm/svm.c err = kvm_set_cr3(&svm->vcpu, val); svm 4063 arch/x86/kvm/svm.c err = kvm_set_cr4(&svm->vcpu, val); svm 4066 arch/x86/kvm/svm.c err = kvm_set_cr8(&svm->vcpu, val); svm 4070 arch/x86/kvm/svm.c kvm_queue_exception(&svm->vcpu, UD_VECTOR); svm 4076 arch/x86/kvm/svm.c val = kvm_read_cr0(&svm->vcpu); svm 4079 arch/x86/kvm/svm.c val = svm->vcpu.arch.cr2; svm 4082 arch/x86/kvm/svm.c val = kvm_read_cr3(&svm->vcpu); svm 4085 arch/x86/kvm/svm.c val = kvm_read_cr4(&svm->vcpu); svm 4088 arch/x86/kvm/svm.c val = kvm_get_cr8(&svm->vcpu); svm 4092 arch/x86/kvm/svm.c kvm_queue_exception(&svm->vcpu, UD_VECTOR); svm 4095 arch/x86/kvm/svm.c kvm_register_write(&svm->vcpu, reg, val); svm 4097 arch/x86/kvm/svm.c return kvm_complete_insn_gp(&svm->vcpu, err); svm 4100 arch/x86/kvm/svm.c static int dr_interception(struct vcpu_svm *svm) svm 4105 arch/x86/kvm/svm.c if (svm->vcpu.guest_debug == 0) { svm 4111 arch/x86/kvm/svm.c clr_dr_intercepts(svm); svm 4112 arch/x86/kvm/svm.c svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; svm 4117 arch/x86/kvm/svm.c return emulate_on_interception(svm); svm 4119 arch/x86/kvm/svm.c reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; svm 4120 arch/x86/kvm/svm.c dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0; svm 4123 arch/x86/kvm/svm.c if (!kvm_require_dr(&svm->vcpu, dr - 16)) svm 4125 arch/x86/kvm/svm.c val = kvm_register_read(&svm->vcpu, reg); svm 4126 arch/x86/kvm/svm.c kvm_set_dr(&svm->vcpu, dr - 16, val); svm 4128 arch/x86/kvm/svm.c if (!kvm_require_dr(&svm->vcpu, dr)) svm 4130 arch/x86/kvm/svm.c kvm_get_dr(&svm->vcpu, dr, &val); svm 4131 arch/x86/kvm/svm.c kvm_register_write(&svm->vcpu, reg, val); svm 4134 arch/x86/kvm/svm.c return kvm_skip_emulated_instruction(&svm->vcpu); svm 4137 arch/x86/kvm/svm.c static int cr8_write_interception(struct vcpu_svm *svm) svm 4139 arch/x86/kvm/svm.c struct kvm_run *kvm_run = svm->vcpu.run; svm 4142 arch/x86/kvm/svm.c u8 cr8_prev = kvm_get_cr8(&svm->vcpu); svm 4144 arch/x86/kvm/svm.c r = cr_interception(svm); svm 4145 arch/x86/kvm/svm.c if (lapic_in_kernel(&svm->vcpu)) svm 4147 arch/x86/kvm/svm.c if (cr8_prev <= kvm_get_cr8(&svm->vcpu)) svm 4171 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 4175 arch/x86/kvm/svm.c msr_info->data = svm->vmcb->save.star; svm 4179 arch/x86/kvm/svm.c msr_info->data = svm->vmcb->save.lstar; svm 4182 arch/x86/kvm/svm.c msr_info->data = svm->vmcb->save.cstar; svm 4185 arch/x86/kvm/svm.c msr_info->data = svm->vmcb->save.kernel_gs_base; svm 4188 arch/x86/kvm/svm.c msr_info->data = svm->vmcb->save.sfmask; svm 4192 arch/x86/kvm/svm.c msr_info->data = svm->vmcb->save.sysenter_cs; svm 4195 arch/x86/kvm/svm.c msr_info->data = svm->sysenter_eip; svm 4198 arch/x86/kvm/svm.c msr_info->data = svm->sysenter_esp; svm 4203 arch/x86/kvm/svm.c msr_info->data = svm->tsc_aux; svm 4211 arch/x86/kvm/svm.c msr_info->data = svm->vmcb->save.dbgctl; svm 4214 arch/x86/kvm/svm.c msr_info->data = svm->vmcb->save.br_from; svm 4217 arch/x86/kvm/svm.c msr_info->data = svm->vmcb->save.br_to; svm 4220 arch/x86/kvm/svm.c msr_info->data = svm->vmcb->save.last_excp_from; svm 4223 arch/x86/kvm/svm.c msr_info->data = svm->vmcb->save.last_excp_to; svm 4226 arch/x86/kvm/svm.c msr_info->data = svm->nested.hsave_msr; svm 4229 arch/x86/kvm/svm.c msr_info->data = svm->nested.vm_cr_msr; svm 4237 arch/x86/kvm/svm.c msr_info->data = svm->spec_ctrl; svm 4244 arch/x86/kvm/svm.c msr_info->data = svm->virt_spec_ctrl; svm 4264 arch/x86/kvm/svm.c msr_info->data = svm->msr_decfg; svm 4272 arch/x86/kvm/svm.c static int rdmsr_interception(struct vcpu_svm *svm) svm 4274 arch/x86/kvm/svm.c return kvm_emulate_rdmsr(&svm->vcpu); svm 4279 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 4287 arch/x86/kvm/svm.c if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK) svm 4290 arch/x86/kvm/svm.c svm->nested.vm_cr_msr &= ~chg_mask; svm 4291 arch/x86/kvm/svm.c svm->nested.vm_cr_msr |= (data & chg_mask); svm 4293 arch/x86/kvm/svm.c svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK; svm 4304 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 4313 arch/x86/kvm/svm.c svm->vmcb->save.g_pat = data; svm 4314 arch/x86/kvm/svm.c mark_dirty(svm->vmcb, VMCB_NPT); svm 4326 arch/x86/kvm/svm.c svm->spec_ctrl = data; svm 4342 arch/x86/kvm/svm.c set_msr_interception(svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1); svm 4358 arch/x86/kvm/svm.c set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1); svm 4368 arch/x86/kvm/svm.c svm->virt_spec_ctrl = data; svm 4371 arch/x86/kvm/svm.c svm->vmcb->save.star = data; svm 4375 arch/x86/kvm/svm.c svm->vmcb->save.lstar = data; svm 4378 arch/x86/kvm/svm.c svm->vmcb->save.cstar = data; svm 4381 arch/x86/kvm/svm.c svm->vmcb->save.kernel_gs_base = data; svm 4384 arch/x86/kvm/svm.c svm->vmcb->save.sfmask = data; svm 4388 arch/x86/kvm/svm.c svm->vmcb->save.sysenter_cs = data; svm 4391 arch/x86/kvm/svm.c svm->sysenter_eip = data; svm 4392 arch/x86/kvm/svm.c svm->vmcb->save.sysenter_eip = data; svm 4395 arch/x86/kvm/svm.c svm->sysenter_esp = data; svm 4396 arch/x86/kvm/svm.c svm->vmcb->save.sysenter_esp = data; svm 4407 arch/x86/kvm/svm.c svm->tsc_aux = data; svm 4408 arch/x86/kvm/svm.c wrmsrl(MSR_TSC_AUX, svm->tsc_aux); svm 4419 arch/x86/kvm/svm.c svm->vmcb->save.dbgctl = data; svm 4420 arch/x86/kvm/svm.c mark_dirty(svm->vmcb, VMCB_LBR); svm 4422 arch/x86/kvm/svm.c svm_enable_lbrv(svm); svm 4424 arch/x86/kvm/svm.c svm_disable_lbrv(svm); svm 4427 arch/x86/kvm/svm.c svm->nested.hsave_msr = data; svm 4449 arch/x86/kvm/svm.c svm->msr_decfg = data; svm 4462 arch/x86/kvm/svm.c static int wrmsr_interception(struct vcpu_svm *svm) svm 4464 arch/x86/kvm/svm.c return kvm_emulate_wrmsr(&svm->vcpu); svm 4467 arch/x86/kvm/svm.c static int msr_interception(struct vcpu_svm *svm) svm 4469 arch/x86/kvm/svm.c if (svm->vmcb->control.exit_info_1) svm 4470 arch/x86/kvm/svm.c return wrmsr_interception(svm); svm 4472 arch/x86/kvm/svm.c return rdmsr_interception(svm); svm 4475 arch/x86/kvm/svm.c static int interrupt_window_interception(struct vcpu_svm *svm) svm 4477 arch/x86/kvm/svm.c kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); svm 4478 arch/x86/kvm/svm.c svm_clear_vintr(svm); svm 4479 arch/x86/kvm/svm.c svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; svm 4480 arch/x86/kvm/svm.c mark_dirty(svm->vmcb, VMCB_INTR); svm 4481 arch/x86/kvm/svm.c ++svm->vcpu.stat.irq_window_exits; svm 4485 arch/x86/kvm/svm.c static int pause_interception(struct vcpu_svm *svm) svm 4487 arch/x86/kvm/svm.c struct kvm_vcpu *vcpu = &svm->vcpu; svm 4497 arch/x86/kvm/svm.c static int nop_interception(struct vcpu_svm *svm) svm 4499 arch/x86/kvm/svm.c return kvm_skip_emulated_instruction(&(svm->vcpu)); svm 4502 arch/x86/kvm/svm.c static int monitor_interception(struct vcpu_svm *svm) svm 4505 arch/x86/kvm/svm.c return nop_interception(svm); svm 4508 arch/x86/kvm/svm.c static int mwait_interception(struct vcpu_svm *svm) svm 4511 arch/x86/kvm/svm.c return nop_interception(svm); svm 4521 arch/x86/kvm/svm.c static int avic_incomplete_ipi_interception(struct vcpu_svm *svm) svm 4523 arch/x86/kvm/svm.c u32 icrh = svm->vmcb->control.exit_info_1 >> 32; svm 4524 arch/x86/kvm/svm.c u32 icrl = svm->vmcb->control.exit_info_1; svm 4525 arch/x86/kvm/svm.c u32 id = svm->vmcb->control.exit_info_2 >> 32; svm 4526 arch/x86/kvm/svm.c u32 index = svm->vmcb->control.exit_info_2 & 0xFF; svm 4527 arch/x86/kvm/svm.c struct kvm_lapic *apic = svm->vcpu.arch.apic; svm 4529 arch/x86/kvm/svm.c trace_kvm_avic_incomplete_ipi(svm->vcpu.vcpu_id, icrh, icrl, id, index); svm 4550 arch/x86/kvm/svm.c struct kvm *kvm = svm->vcpu.kvm; svm 4551 arch/x86/kvm/svm.c struct kvm_lapic *apic = svm->vcpu.arch.apic; svm 4571 arch/x86/kvm/svm.c index, svm->vcpu.vcpu_id, icrh, icrl); svm 4633 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 4634 arch/x86/kvm/svm.c bool flat = svm->dfr_reg == APIC_DFR_FLAT; svm 4635 arch/x86/kvm/svm.c u32 *entry = avic_get_logical_id_entry(vcpu, svm->ldr_reg, flat); svm 4644 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 4648 arch/x86/kvm/svm.c if (ldr == svm->ldr_reg) svm 4657 arch/x86/kvm/svm.c svm->ldr_reg = ldr; svm 4665 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 4685 arch/x86/kvm/svm.c if (svm->ldr_reg) svm 4693 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 4696 arch/x86/kvm/svm.c if (svm->dfr_reg == dfr) svm 4700 arch/x86/kvm/svm.c svm->dfr_reg = dfr; svm 4703 arch/x86/kvm/svm.c static int avic_unaccel_trap_write(struct vcpu_svm *svm) svm 4705 arch/x86/kvm/svm.c struct kvm_lapic *apic = svm->vcpu.arch.apic; svm 4706 arch/x86/kvm/svm.c u32 offset = svm->vmcb->control.exit_info_1 & svm 4711 arch/x86/kvm/svm.c if (avic_handle_apic_id_update(&svm->vcpu)) svm 4715 arch/x86/kvm/svm.c if (avic_handle_ldr_update(&svm->vcpu)) svm 4719 arch/x86/kvm/svm.c avic_handle_dfr_update(&svm->vcpu); svm 4759 arch/x86/kvm/svm.c static int avic_unaccelerated_access_interception(struct vcpu_svm *svm) svm 4762 arch/x86/kvm/svm.c u32 offset = svm->vmcb->control.exit_info_1 & svm 4764 arch/x86/kvm/svm.c u32 vector = svm->vmcb->control.exit_info_2 & svm 4766 arch/x86/kvm/svm.c bool write = (svm->vmcb->control.exit_info_1 >> 32) & svm 4770 arch/x86/kvm/svm.c trace_kvm_avic_unaccelerated_access(svm->vcpu.vcpu_id, offset, svm 4775 arch/x86/kvm/svm.c ret = avic_unaccel_trap_write(svm); svm 4778 arch/x86/kvm/svm.c ret = kvm_emulate_instruction(&svm->vcpu, 0); svm 4784 arch/x86/kvm/svm.c static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = { svm 4854 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 4855 arch/x86/kvm/svm.c struct vmcb_control_area *control = &svm->vmcb->control; svm 4856 arch/x86/kvm/svm.c struct vmcb_save_area *save = &svm->vmcb->save; svm 4978 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 4980 arch/x86/kvm/svm.c u32 exit_code = svm->vmcb->control.exit_code; svm 4984 arch/x86/kvm/svm.c if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE)) svm 4985 arch/x86/kvm/svm.c vcpu->arch.cr0 = svm->vmcb->save.cr0; svm 4987 arch/x86/kvm/svm.c vcpu->arch.cr3 = svm->vmcb->save.cr3; svm 4989 arch/x86/kvm/svm.c if (unlikely(svm->nested.exit_required)) { svm 4990 arch/x86/kvm/svm.c nested_svm_vmexit(svm); svm 4991 arch/x86/kvm/svm.c svm->nested.exit_required = false; svm 4999 arch/x86/kvm/svm.c trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code, svm 5000 arch/x86/kvm/svm.c svm->vmcb->control.exit_info_1, svm 5001 arch/x86/kvm/svm.c svm->vmcb->control.exit_info_2, svm 5002 arch/x86/kvm/svm.c svm->vmcb->control.exit_int_info, svm 5003 arch/x86/kvm/svm.c svm->vmcb->control.exit_int_info_err, svm 5006 arch/x86/kvm/svm.c vmexit = nested_svm_exit_special(svm); svm 5009 arch/x86/kvm/svm.c vmexit = nested_svm_exit_handled(svm); svm 5015 arch/x86/kvm/svm.c svm_complete_interrupts(svm); svm 5017 arch/x86/kvm/svm.c if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) { svm 5020 arch/x86/kvm/svm.c = svm->vmcb->control.exit_code; svm 5025 arch/x86/kvm/svm.c if (is_external_interrupt(svm->vmcb->control.exit_int_info) && svm 5031 arch/x86/kvm/svm.c __func__, svm->vmcb->control.exit_int_info, svm 5046 arch/x86/kvm/svm.c return svm_exit_handlers[exit_code](svm); svm 5058 arch/x86/kvm/svm.c static void pre_sev_run(struct vcpu_svm *svm, int cpu) svm 5061 arch/x86/kvm/svm.c int asid = sev_get_asid(svm->vcpu.kvm); svm 5064 arch/x86/kvm/svm.c svm->vmcb->control.asid = asid; svm 5072 arch/x86/kvm/svm.c if (sd->sev_vmcbs[asid] == svm->vmcb && svm 5073 arch/x86/kvm/svm.c svm->last_cpu == cpu) svm 5076 arch/x86/kvm/svm.c svm->last_cpu = cpu; svm 5077 arch/x86/kvm/svm.c sd->sev_vmcbs[asid] = svm->vmcb; svm 5078 arch/x86/kvm/svm.c svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; svm 5079 arch/x86/kvm/svm.c mark_dirty(svm->vmcb, VMCB_ASID); svm 5082 arch/x86/kvm/svm.c static void pre_svm_run(struct vcpu_svm *svm) svm 5088 arch/x86/kvm/svm.c if (sev_guest(svm->vcpu.kvm)) svm 5089 arch/x86/kvm/svm.c return pre_sev_run(svm, cpu); svm 5092 arch/x86/kvm/svm.c if (svm->asid_generation != sd->asid_generation) svm 5093 arch/x86/kvm/svm.c new_asid(svm, sd); svm 5098 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 5100 arch/x86/kvm/svm.c svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; svm 5102 arch/x86/kvm/svm.c set_intercept(svm, INTERCEPT_IRET); svm 5106 arch/x86/kvm/svm.c static inline void svm_inject_irq(struct vcpu_svm *svm, int irq) svm 5111 arch/x86/kvm/svm.c control = &svm->vmcb->control; svm 5116 arch/x86/kvm/svm.c mark_dirty(svm->vmcb, VMCB_INTR); svm 5121 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 5123 arch/x86/kvm/svm.c BUG_ON(!(gif_set(svm))); svm 5128 arch/x86/kvm/svm.c svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr | svm 5139 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 5145 arch/x86/kvm/svm.c clr_cr_intercept(svm, INTERCEPT_CR8_WRITE); svm 5151 arch/x86/kvm/svm.c set_cr_intercept(svm, INTERCEPT_CR8_WRITE); svm 5175 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 5176 arch/x86/kvm/svm.c struct vmcb *vmcb = svm->vmcb; svm 5215 arch/x86/kvm/svm.c static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi) svm 5220 arch/x86/kvm/svm.c spin_lock_irqsave(&svm->ir_list_lock, flags); svm 5221 arch/x86/kvm/svm.c list_for_each_entry(cur, &svm->ir_list, node) { svm 5228 arch/x86/kvm/svm.c spin_unlock_irqrestore(&svm->ir_list_lock, flags); svm 5231 arch/x86/kvm/svm.c static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi) svm 5243 arch/x86/kvm/svm.c struct kvm *kvm = svm->vcpu.kvm; svm 5268 arch/x86/kvm/svm.c spin_lock_irqsave(&svm->ir_list_lock, flags); svm 5269 arch/x86/kvm/svm.c list_add(&ir->node, &svm->ir_list); svm 5270 arch/x86/kvm/svm.c spin_unlock_irqrestore(&svm->ir_list_lock, flags); svm 5288 arch/x86/kvm/svm.c struct vcpu_data *vcpu_info, struct vcpu_svm **svm) svm 5304 arch/x86/kvm/svm.c *svm = to_svm(vcpu); svm 5305 arch/x86/kvm/svm.c vcpu_info->pi_desc_addr = __sme_set(page_to_phys((*svm)->avic_backing_page)); svm 5340 arch/x86/kvm/svm.c struct vcpu_svm *svm = NULL; svm 5352 arch/x86/kvm/svm.c if (!get_pi_vcpu_info(kvm, e, &vcpu_info, &svm) && set && svm 5353 arch/x86/kvm/svm.c kvm_vcpu_apicv_active(&svm->vcpu)) { svm 5357 arch/x86/kvm/svm.c pi.base = __sme_set(page_to_phys(svm->avic_backing_page) & svm 5360 arch/x86/kvm/svm.c svm->vcpu.vcpu_id); svm 5373 arch/x86/kvm/svm.c svm_ir_list_add(svm, &pi); svm 5402 arch/x86/kvm/svm.c if (!ret && svm) { svm 5403 arch/x86/kvm/svm.c trace_kvm_pi_irte_update(host_irq, svm->vcpu.vcpu_id, svm 5422 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 5423 arch/x86/kvm/svm.c struct vmcb *vmcb = svm->vmcb; svm 5426 arch/x86/kvm/svm.c !(svm->vcpu.arch.hflags & HF_NMI_MASK); svm 5427 arch/x86/kvm/svm.c ret = ret && gif_set(svm) && nested_svm_nmi(svm); svm 5434 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 5436 arch/x86/kvm/svm.c return !!(svm->vcpu.arch.hflags & HF_NMI_MASK); svm 5441 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 5444 arch/x86/kvm/svm.c svm->vcpu.arch.hflags |= HF_NMI_MASK; svm 5445 arch/x86/kvm/svm.c set_intercept(svm, INTERCEPT_IRET); svm 5447 arch/x86/kvm/svm.c svm->vcpu.arch.hflags &= ~HF_NMI_MASK; svm 5448 arch/x86/kvm/svm.c clr_intercept(svm, INTERCEPT_IRET); svm 5454 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 5455 arch/x86/kvm/svm.c struct vmcb *vmcb = svm->vmcb; svm 5458 arch/x86/kvm/svm.c if (!gif_set(svm) || svm 5465 arch/x86/kvm/svm.c return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK); svm 5472 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 5485 arch/x86/kvm/svm.c if ((vgif_enabled(svm) || gif_set(svm)) && nested_svm_intr(svm)) { svm 5486 arch/x86/kvm/svm.c svm_set_vintr(svm); svm 5487 arch/x86/kvm/svm.c svm_inject_irq(svm, 0x0); svm 5493 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 5495 arch/x86/kvm/svm.c if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK)) svm 5499 arch/x86/kvm/svm.c if (!gif_set(svm)) { svm 5500 arch/x86/kvm/svm.c if (vgif_enabled(svm)) svm 5501 arch/x86/kvm/svm.c set_intercept(svm, INTERCEPT_STGI); svm 5505 arch/x86/kvm/svm.c if (svm->nested.exit_required) svm 5512 arch/x86/kvm/svm.c svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu); svm 5513 arch/x86/kvm/svm.c svm->nmi_singlestep = true; svm 5514 arch/x86/kvm/svm.c svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); svm 5529 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 5532 arch/x86/kvm/svm.c svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; svm 5534 arch/x86/kvm/svm.c svm->asid_generation--; svm 5539 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 5541 arch/x86/kvm/svm.c invlpga(gva, svm->vmcb->control.asid); svm 5550 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 5555 arch/x86/kvm/svm.c if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) { svm 5556 arch/x86/kvm/svm.c int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK; svm 5563 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 5571 arch/x86/kvm/svm.c svm->vmcb->control.int_ctl &= ~V_TPR_MASK; svm 5572 arch/x86/kvm/svm.c svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK; svm 5575 arch/x86/kvm/svm.c static void svm_complete_interrupts(struct vcpu_svm *svm) svm 5579 arch/x86/kvm/svm.c u32 exitintinfo = svm->vmcb->control.exit_int_info; svm 5580 arch/x86/kvm/svm.c unsigned int3_injected = svm->int3_injected; svm 5582 arch/x86/kvm/svm.c svm->int3_injected = 0; svm 5588 arch/x86/kvm/svm.c if ((svm->vcpu.arch.hflags & HF_IRET_MASK) svm 5589 arch/x86/kvm/svm.c && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) { svm 5590 arch/x86/kvm/svm.c svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK); svm 5591 arch/x86/kvm/svm.c kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); svm 5594 arch/x86/kvm/svm.c svm->vcpu.arch.nmi_injected = false; svm 5595 arch/x86/kvm/svm.c kvm_clear_exception_queue(&svm->vcpu); svm 5596 arch/x86/kvm/svm.c kvm_clear_interrupt_queue(&svm->vcpu); svm 5601 arch/x86/kvm/svm.c kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); svm 5608 arch/x86/kvm/svm.c svm->vcpu.arch.nmi_injected = true; svm 5618 arch/x86/kvm/svm.c kvm_is_linear_rip(&svm->vcpu, svm->int3_rip)) svm 5619 arch/x86/kvm/svm.c kvm_rip_write(&svm->vcpu, svm 5620 arch/x86/kvm/svm.c kvm_rip_read(&svm->vcpu) - svm 5625 arch/x86/kvm/svm.c u32 err = svm->vmcb->control.exit_int_info_err; svm 5626 arch/x86/kvm/svm.c kvm_requeue_exception_e(&svm->vcpu, vector, err); svm 5629 arch/x86/kvm/svm.c kvm_requeue_exception(&svm->vcpu, vector); svm 5632 arch/x86/kvm/svm.c kvm_queue_interrupt(&svm->vcpu, vector, false); svm 5641 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 5642 arch/x86/kvm/svm.c struct vmcb_control_area *control = &svm->vmcb->control; svm 5647 arch/x86/kvm/svm.c svm_complete_interrupts(svm); svm 5652 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 5654 arch/x86/kvm/svm.c svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; svm 5655 arch/x86/kvm/svm.c svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; svm 5656 arch/x86/kvm/svm.c svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; svm 5662 arch/x86/kvm/svm.c if (unlikely(svm->nested.exit_required)) svm 5671 arch/x86/kvm/svm.c if (svm->nmi_singlestep && svm->vmcb->control.event_inj) { svm 5677 arch/x86/kvm/svm.c disable_nmi_singlestep(svm); svm 5681 arch/x86/kvm/svm.c pre_svm_run(svm); svm 5685 arch/x86/kvm/svm.c svm->vmcb->save.cr2 = vcpu->arch.cr2; svm 5700 arch/x86/kvm/svm.c x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl); svm 5767 arch/x86/kvm/svm.c : [svm]"a"(svm), svm 5798 arch/x86/kvm/svm.c wrmsrl(MSR_GS_BASE, svm->host.gs_base); svm 5800 arch/x86/kvm/svm.c loadsegment(fs, svm->host.fs); svm 5802 arch/x86/kvm/svm.c loadsegment(gs, svm->host.gs); svm 5822 arch/x86/kvm/svm.c svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); svm 5828 arch/x86/kvm/svm.c x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl); svm 5830 arch/x86/kvm/svm.c vcpu->arch.cr2 = svm->vmcb->save.cr2; svm 5831 arch/x86/kvm/svm.c vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; svm 5832 arch/x86/kvm/svm.c vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; svm 5833 arch/x86/kvm/svm.c vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; svm 5835 arch/x86/kvm/svm.c if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) svm 5836 arch/x86/kvm/svm.c kvm_before_interrupt(&svm->vcpu); svm 5843 arch/x86/kvm/svm.c if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) svm 5844 arch/x86/kvm/svm.c kvm_after_interrupt(&svm->vcpu); svm 5848 arch/x86/kvm/svm.c svm->next_rip = 0; svm 5850 arch/x86/kvm/svm.c svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; svm 5853 arch/x86/kvm/svm.c if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) svm 5854 arch/x86/kvm/svm.c svm->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason(); svm 5865 arch/x86/kvm/svm.c if (unlikely(svm->vmcb->control.exit_code == svm 5867 arch/x86/kvm/svm.c svm_handle_mce(svm); svm 5869 arch/x86/kvm/svm.c mark_all_clean(svm->vmcb); svm 5875 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 5877 arch/x86/kvm/svm.c svm->vmcb->save.cr3 = __sme_set(root); svm 5878 arch/x86/kvm/svm.c mark_dirty(svm->vmcb, VMCB_CR); svm 5883 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 5885 arch/x86/kvm/svm.c svm->vmcb->control.nested_cr3 = __sme_set(root); svm 5886 arch/x86/kvm/svm.c mark_dirty(svm->vmcb, VMCB_NPT); svm 5889 arch/x86/kvm/svm.c svm->vmcb->save.cr3 = kvm_read_cr3(vcpu); svm 5890 arch/x86/kvm/svm.c mark_dirty(svm->vmcb, VMCB_CR); svm 5945 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 5948 arch/x86/kvm/svm.c svm->nrips_enabled = !!guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS); svm 6113 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 6116 arch/x86/kvm/svm.c struct vmcb *vmcb = svm->vmcb; svm 6142 arch/x86/kvm/svm.c intercept = svm->nested.intercept; svm 6221 arch/x86/kvm/svm.c vmexit = nested_svm_exit_handled(svm); svm 6257 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 6260 arch/x86/kvm/svm.c if (!gif_set(svm)) svm 6263 arch/x86/kvm/svm.c if (is_guest_mode(&svm->vcpu) && svm 6264 arch/x86/kvm/svm.c svm->nested.intercept & (1ULL << INTERCEPT_SMI)) { svm 6266 arch/x86/kvm/svm.c svm->vmcb->control.exit_code = SVM_EXIT_SMI; svm 6267 arch/x86/kvm/svm.c svm->nested.exit_required = true; svm 6276 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 6283 arch/x86/kvm/svm.c put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb); svm 6285 arch/x86/kvm/svm.c svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; svm 6286 arch/x86/kvm/svm.c svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; svm 6287 arch/x86/kvm/svm.c svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; svm 6289 arch/x86/kvm/svm.c ret = nested_svm_vmexit(svm); svm 6298 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 6308 arch/x86/kvm/svm.c if (kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb), &map) == -EINVAL) svm 6311 arch/x86/kvm/svm.c enter_svm_guest_mode(svm, vmcb, nested_vmcb, &map); svm 6318 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 6320 arch/x86/kvm/svm.c if (!gif_set(svm)) { svm 6321 arch/x86/kvm/svm.c if (vgif_enabled(svm)) svm 6322 arch/x86/kvm/svm.c set_intercept(svm, INTERCEPT_STGI); svm 7224 arch/x86/kvm/svm.c struct vcpu_svm *svm = to_svm(vcpu); svm 7233 arch/x86/kvm/svm.c return !gif_set(svm) || svm 7234 arch/x86/kvm/svm.c (svm->vmcb->control.intercept & (1ULL << INTERCEPT_INIT)); svm 201 drivers/gpu/drm/nouveau/nouveau_bo.c struct nvif_vmm *vmm = cli->svm.cli ? &cli->svm.vmm : &cli->vmm.vmm; svm 138 drivers/gpu/drm/nouveau/nouveau_chan.c chan->vmm = cli->svm.cli ? &cli->svm : &cli->vmm; svm 179 drivers/gpu/drm/nouveau/nouveau_drm.c nouveau_vmm_fini(&cli->svm); svm 100 drivers/gpu/drm/nouveau/nouveau_drv.h struct nouveau_vmm svm; svm 213 drivers/gpu/drm/nouveau/nouveau_drv.h struct nouveau_svm *svm; svm 67 drivers/gpu/drm/nouveau/nouveau_gem.c struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm; svm 142 drivers/gpu/drm/nouveau/nouveau_gem.c struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : & cli->vmm; svm 226 drivers/gpu/drm/nouveau/nouveau_gem.c struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm; svm 80 drivers/gpu/drm/nouveau/nouveau_svm.c nouveau_ivmm_find(struct nouveau_svm *svm, u64 inst) svm 83 drivers/gpu/drm/nouveau/nouveau_svm.c list_for_each_entry(ivmm, &svm->inst, head) { svm 176 drivers/gpu/drm/nouveau/nouveau_svm.c if (!cli->svm.svmm) { svm 215 drivers/gpu/drm/nouveau/nouveau_svm.c mutex_lock(&svmm->vmm->cli->drm->svm->mutex); svm 216 drivers/gpu/drm/nouveau/nouveau_svm.c ivmm = nouveau_ivmm_find(svmm->vmm->cli->drm->svm, inst); svm 221 drivers/gpu/drm/nouveau/nouveau_svm.c mutex_unlock(&svmm->vmm->cli->drm->svm->mutex); svm 236 drivers/gpu/drm/nouveau/nouveau_svm.c mutex_lock(&svmm->vmm->cli->drm->svm->mutex); svm 237 drivers/gpu/drm/nouveau/nouveau_svm.c list_add(&ivmm->head, &svmm->vmm->cli->drm->svm->inst); svm 238 drivers/gpu/drm/nouveau/nouveau_svm.c mutex_unlock(&svmm->vmm->cli->drm->svm->mutex); svm 320 drivers/gpu/drm/nouveau/nouveau_svm.c svmm->vmm = &cli->svm; svm 327 drivers/gpu/drm/nouveau/nouveau_svm.c if (cli->svm.cli) { svm 342 drivers/gpu/drm/nouveau/nouveau_svm.c }, sizeof(struct gp100_vmm_v0), &cli->svm.vmm); svm 352 drivers/gpu/drm/nouveau/nouveau_svm.c cli->svm.svmm = svmm; svm 353 drivers/gpu/drm/nouveau/nouveau_svm.c cli->svm.cli = cli; svm 381 drivers/gpu/drm/nouveau/nouveau_svm.c nouveau_svm_fault_replay(struct nouveau_svm *svm) svm 383 drivers/gpu/drm/nouveau/nouveau_svm.c SVM_DBG(svm, "replay"); svm 384 drivers/gpu/drm/nouveau/nouveau_svm.c WARN_ON(nvif_object_mthd(&svm->drm->client.vmm.vmm.object, svm 396 drivers/gpu/drm/nouveau/nouveau_svm.c nouveau_svm_fault_cancel(struct nouveau_svm *svm, svm 399 drivers/gpu/drm/nouveau/nouveau_svm.c SVM_DBG(svm, "cancel %016llx %d %02x %02x", inst, hub, gpc, client); svm 400 drivers/gpu/drm/nouveau/nouveau_svm.c WARN_ON(nvif_object_mthd(&svm->drm->client.vmm.vmm.object, svm 411 drivers/gpu/drm/nouveau/nouveau_svm.c nouveau_svm_fault_cancel_fault(struct nouveau_svm *svm, svm 414 drivers/gpu/drm/nouveau/nouveau_svm.c nouveau_svm_fault_cancel(svm, fault->inst, svm 436 drivers/gpu/drm/nouveau/nouveau_svm.c nouveau_svm_fault_cache(struct nouveau_svm *svm, svm 463 drivers/gpu/drm/nouveau/nouveau_svm.c nouveau_svm_fault_cancel(svm, inst, hub, gpc, client); svm 480 drivers/gpu/drm/nouveau/nouveau_svm.c SVM_DBG(svm, "fault %016llx %016llx %02x", svm 528 drivers/gpu/drm/nouveau/nouveau_svm.c struct nouveau_svm *svm = svm 529 drivers/gpu/drm/nouveau/nouveau_svm.c container_of(buffer, typeof(*svm), buffer[buffer->id]); svm 530 drivers/gpu/drm/nouveau/nouveau_svm.c struct nvif_object *device = &svm->drm->client.device.object; svm 549 drivers/gpu/drm/nouveau/nouveau_svm.c SVM_DBG(svm, "fault handler"); svm 558 drivers/gpu/drm/nouveau/nouveau_svm.c SVM_DBG(svm, "get %08x put %08x", buffer->get, buffer->put); svm 560 drivers/gpu/drm/nouveau/nouveau_svm.c nouveau_svm_fault_cache(svm, buffer, buffer->get * 0x20); svm 565 drivers/gpu/drm/nouveau/nouveau_svm.c SVM_DBG(svm, "%d fault(s) pending", buffer->fault_nr); svm 575 drivers/gpu/drm/nouveau/nouveau_svm.c mutex_lock(&svm->mutex); svm 579 drivers/gpu/drm/nouveau/nouveau_svm.c nouveau_ivmm_find(svm, buffer->fault[fi]->inst); svm 582 drivers/gpu/drm/nouveau/nouveau_svm.c SVM_DBG(svm, "inst %016llx -> svm-%p", inst, svmm); svm 586 drivers/gpu/drm/nouveau/nouveau_svm.c mutex_unlock(&svm->mutex); svm 598 drivers/gpu/drm/nouveau/nouveau_svm.c nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]); svm 623 drivers/gpu/drm/nouveau/nouveau_svm.c nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]); svm 633 drivers/gpu/drm/nouveau/nouveau_svm.c nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]); svm 704 drivers/gpu/drm/nouveau/nouveau_svm.c nouveau_dmem_convert_pfn(svm->drm, &range); svm 728 drivers/gpu/drm/nouveau/nouveau_svm.c nouveau_svm_fault_cancel_fault(svm, fault); svm 737 drivers/gpu/drm/nouveau/nouveau_svm.c nouveau_svm_fault_replay(svm); svm 742 drivers/gpu/drm/nouveau/nouveau_svm.c nouveau_svm_fault_buffer_fini(struct nouveau_svm *svm, int id) svm 744 drivers/gpu/drm/nouveau/nouveau_svm.c struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id]; svm 749 drivers/gpu/drm/nouveau/nouveau_svm.c nouveau_svm_fault_buffer_init(struct nouveau_svm *svm, int id) svm 751 drivers/gpu/drm/nouveau/nouveau_svm.c struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id]; svm 752 drivers/gpu/drm/nouveau/nouveau_svm.c struct nvif_object *device = &svm->drm->client.device.object; svm 755 drivers/gpu/drm/nouveau/nouveau_svm.c SVM_DBG(svm, "get %08x put %08x (init)", buffer->get, buffer->put); svm 760 drivers/gpu/drm/nouveau/nouveau_svm.c nouveau_svm_fault_buffer_dtor(struct nouveau_svm *svm, int id) svm 762 drivers/gpu/drm/nouveau/nouveau_svm.c struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id]; svm 771 drivers/gpu/drm/nouveau/nouveau_svm.c nouveau_svm_fault_buffer_fini(svm, id); svm 778 drivers/gpu/drm/nouveau/nouveau_svm.c nouveau_svm_fault_buffer_ctor(struct nouveau_svm *svm, s32 oclass, int id) svm 780 drivers/gpu/drm/nouveau/nouveau_svm.c struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id]; svm 781 drivers/gpu/drm/nouveau/nouveau_svm.c struct nouveau_drm *drm = svm->drm; svm 791 drivers/gpu/drm/nouveau/nouveau_svm.c SVM_ERR(svm, "Fault buffer allocation failed: %d", ret); svm 810 drivers/gpu/drm/nouveau/nouveau_svm.c return nouveau_svm_fault_buffer_init(svm, id); svm 816 drivers/gpu/drm/nouveau/nouveau_svm.c struct nouveau_svm *svm = drm->svm; svm 817 drivers/gpu/drm/nouveau/nouveau_svm.c if (svm) svm 818 drivers/gpu/drm/nouveau/nouveau_svm.c nouveau_svm_fault_buffer_init(svm, 0); svm 824 drivers/gpu/drm/nouveau/nouveau_svm.c struct nouveau_svm *svm = drm->svm; svm 825 drivers/gpu/drm/nouveau/nouveau_svm.c if (svm) svm 826 drivers/gpu/drm/nouveau/nouveau_svm.c nouveau_svm_fault_buffer_fini(svm, 0); svm 832 drivers/gpu/drm/nouveau/nouveau_svm.c struct nouveau_svm *svm = drm->svm; svm 833 drivers/gpu/drm/nouveau/nouveau_svm.c if (svm) { svm 834 drivers/gpu/drm/nouveau/nouveau_svm.c nouveau_svm_fault_buffer_dtor(svm, 0); svm 835 drivers/gpu/drm/nouveau/nouveau_svm.c kfree(drm->svm); svm 836 drivers/gpu/drm/nouveau/nouveau_svm.c drm->svm = NULL; svm 848 drivers/gpu/drm/nouveau/nouveau_svm.c struct nouveau_svm *svm; svm 858 drivers/gpu/drm/nouveau/nouveau_svm.c if (!(drm->svm = svm = kzalloc(sizeof(*drm->svm), GFP_KERNEL))) svm 861 drivers/gpu/drm/nouveau/nouveau_svm.c drm->svm->drm = drm; svm 862 drivers/gpu/drm/nouveau/nouveau_svm.c mutex_init(&drm->svm->mutex); svm 863 drivers/gpu/drm/nouveau/nouveau_svm.c INIT_LIST_HEAD(&drm->svm->inst); svm 867 drivers/gpu/drm/nouveau/nouveau_svm.c SVM_DBG(svm, "No supported fault buffer class"); svm 872 drivers/gpu/drm/nouveau/nouveau_svm.c ret = nouveau_svm_fault_buffer_ctor(svm, buffers[ret].oclass, 0); svm 878 drivers/gpu/drm/nouveau/nouveau_svm.c SVM_DBG(svm, "Initialised"); svm 102 drivers/iommu/intel-svm.c static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev, svm 108 drivers/iommu/intel-svm.c desc.qw0 = QI_EIOTLB_PASID(svm->pasid) | svm 116 drivers/iommu/intel-svm.c desc.qw0 = QI_EIOTLB_PASID(svm->pasid) | svm 126 drivers/iommu/intel-svm.c qi_submit_sync(&desc, svm->iommu); svm 129 drivers/iommu/intel-svm.c desc.qw0 = QI_DEV_EIOTLB_PASID(svm->pasid) | svm 150 drivers/iommu/intel-svm.c qi_submit_sync(&desc, svm->iommu); svm 154 drivers/iommu/intel-svm.c static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address, svm 160 drivers/iommu/intel-svm.c list_for_each_entry_rcu(sdev, &svm->devs, list) svm 161 drivers/iommu/intel-svm.c intel_flush_svm_range_dev(svm, sdev, address, pages, ih); svm 170 drivers/iommu/intel-svm.c struct intel_svm *svm = container_of(mn, struct intel_svm, notifier); svm 172 drivers/iommu/intel-svm.c intel_flush_svm_range(svm, start, svm 178 drivers/iommu/intel-svm.c struct intel_svm *svm = container_of(mn, struct intel_svm, notifier); svm 194 drivers/iommu/intel-svm.c list_for_each_entry_rcu(sdev, &svm->devs, list) { svm 195 drivers/iommu/intel-svm.c intel_pasid_tear_down_entry(svm->iommu, sdev->dev, svm->pasid); svm 196 drivers/iommu/intel-svm.c intel_flush_svm_range_dev(svm, sdev, 0, -1, 0); svm 215 drivers/iommu/intel-svm.c struct intel_svm *svm = NULL; svm 246 drivers/iommu/intel-svm.c svm = t; svm 247 drivers/iommu/intel-svm.c if (svm->pasid >= pasid_max) { svm 250 drivers/iommu/intel-svm.c svm->pasid); svm 255 drivers/iommu/intel-svm.c list_for_each_entry(sdev, &svm->devs, list) { svm 305 drivers/iommu/intel-svm.c if (!svm) { svm 306 drivers/iommu/intel-svm.c svm = kzalloc(sizeof(*svm), GFP_KERNEL); svm 307 drivers/iommu/intel-svm.c if (!svm) { svm 312 drivers/iommu/intel-svm.c svm->iommu = iommu; svm 318 drivers/iommu/intel-svm.c ret = intel_pasid_alloc_id(svm, svm 322 drivers/iommu/intel-svm.c kfree(svm); svm 326 drivers/iommu/intel-svm.c svm->pasid = ret; svm 327 drivers/iommu/intel-svm.c svm->notifier.ops = &intel_mmuops; svm 328 drivers/iommu/intel-svm.c svm->mm = mm; svm 329 drivers/iommu/intel-svm.c svm->flags = flags; svm 330 drivers/iommu/intel-svm.c INIT_LIST_HEAD_RCU(&svm->devs); svm 331 drivers/iommu/intel-svm.c INIT_LIST_HEAD(&svm->list); svm 334 drivers/iommu/intel-svm.c ret = mmu_notifier_register(&svm->notifier, mm); svm 336 drivers/iommu/intel-svm.c intel_pasid_free_id(svm->pasid); svm 337 drivers/iommu/intel-svm.c kfree(svm); svm 346 drivers/iommu/intel-svm.c svm->pasid, FLPT_DEFAULT_DID, svm 351 drivers/iommu/intel-svm.c mmu_notifier_unregister(&svm->notifier, mm); svm 352 drivers/iommu/intel-svm.c intel_pasid_free_id(svm->pasid); svm 353 drivers/iommu/intel-svm.c kfree(svm); svm 358 drivers/iommu/intel-svm.c list_add_tail(&svm->list, &global_svm_list); svm 367 drivers/iommu/intel-svm.c svm->pasid, FLPT_DEFAULT_DID, svm 375 drivers/iommu/intel-svm.c list_add_rcu(&sdev->list, &svm->devs); svm 378 drivers/iommu/intel-svm.c *pasid = svm->pasid; svm 392 drivers/iommu/intel-svm.c struct intel_svm *svm; svm 400 drivers/iommu/intel-svm.c svm = intel_pasid_lookup_id(pasid); svm 401 drivers/iommu/intel-svm.c if (!svm) svm 404 drivers/iommu/intel-svm.c list_for_each_entry(sdev, &svm->devs, list) { svm 417 drivers/iommu/intel-svm.c intel_pasid_tear_down_entry(iommu, dev, svm->pasid); svm 418 drivers/iommu/intel-svm.c intel_flush_svm_range_dev(svm, sdev, 0, -1, 0); svm 421 drivers/iommu/intel-svm.c if (list_empty(&svm->devs)) { svm 422 drivers/iommu/intel-svm.c intel_pasid_free_id(svm->pasid); svm 423 drivers/iommu/intel-svm.c if (svm->mm) svm 424 drivers/iommu/intel-svm.c mmu_notifier_unregister(&svm->notifier, svm->mm); svm 426 drivers/iommu/intel-svm.c list_del(&svm->list); svm 432 drivers/iommu/intel-svm.c memset(svm, 0x6b, sizeof(*svm)); svm 433 drivers/iommu/intel-svm.c kfree(svm); svm 449 drivers/iommu/intel-svm.c struct intel_svm *svm; svm 457 drivers/iommu/intel-svm.c svm = intel_pasid_lookup_id(pasid); svm 458 drivers/iommu/intel-svm.c if (!svm) svm 462 drivers/iommu/intel-svm.c if (!svm->mm) svm 464 drivers/iommu/intel-svm.c else if (atomic_read(&svm->mm->mm_users) > 0) svm 534 drivers/iommu/intel-svm.c struct intel_svm *svm = NULL; svm 565 drivers/iommu/intel-svm.c if (!svm || svm->pasid != req->pasid) { svm 567 drivers/iommu/intel-svm.c svm = intel_pasid_lookup_id(req->pasid); svm 573 drivers/iommu/intel-svm.c if (!svm) { svm 584 drivers/iommu/intel-svm.c if (!svm->mm) svm 592 drivers/iommu/intel-svm.c if (!mmget_not_zero(svm->mm)) svm 595 drivers/iommu/intel-svm.c down_read(&svm->mm->mmap_sem); svm 596 drivers/iommu/intel-svm.c vma = find_extend_vma(svm->mm, address); svm 610 drivers/iommu/intel-svm.c up_read(&svm->mm->mmap_sem); svm 611 drivers/iommu/intel-svm.c mmput(svm->mm); svm 615 drivers/iommu/intel-svm.c list_for_each_entry_rcu(sdev, &svm->devs, list) { svm 624 drivers/iommu/intel-svm.c if (WARN_ON(&sdev->list == &svm->devs)) svm 636 drivers/iommu/intel-svm.c svm = NULL; svm 353 tools/testing/vsock/vsock_diag_test.c struct sockaddr_vm svm; svm 355 tools/testing/vsock/vsock_diag_test.c .svm = { svm 367 tools/testing/vsock/vsock_diag_test.c if (bind(fd, &addr.sa, sizeof(addr.svm)) < 0) { svm 391 tools/testing/vsock/vsock_diag_test.c struct sockaddr_vm svm; svm 393 tools/testing/vsock/vsock_diag_test.c .svm = { svm 410 tools/testing/vsock/vsock_diag_test.c ret = connect(fd, &addr.sa, sizeof(addr.svm)); svm 437 tools/testing/vsock/vsock_diag_test.c struct sockaddr_vm svm; svm 439 tools/testing/vsock/vsock_diag_test.c .svm = { svm 447 tools/testing/vsock/vsock_diag_test.c struct sockaddr_vm svm; svm 449 tools/testing/vsock/vsock_diag_test.c socklen_t clientaddr_len = sizeof(clientaddr.svm); svm 457 tools/testing/vsock/vsock_diag_test.c if (bind(fd, &addr.sa, sizeof(addr.svm)) < 0) { svm 485 tools/testing/vsock/vsock_diag_test.c if (clientaddr.svm.svm_cid != peer_cid) { svm 487 tools/testing/vsock/vsock_diag_test.c peer_cid, clientaddr.svm.svm_cid);