kvm                50 arch/arm/include/asm/kvm_asm.h struct kvm;
kvm                57 arch/arm/include/asm/kvm_asm.h extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
kvm                58 arch/arm/include/asm/kvm_asm.h extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
kvm               268 arch/arm/include/asm/kvm_host.h int kvm_unmap_hva_range(struct kvm *kvm,
kvm               270 arch/arm/include/asm/kvm_host.h int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
kvm               274 arch/arm/include/asm/kvm_host.h int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
kvm               275 arch/arm/include/asm/kvm_host.h int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
kvm               279 arch/arm/include/asm/kvm_host.h void kvm_arm_halt_guest(struct kvm *kvm);
kvm               280 arch/arm/include/asm/kvm_host.h void kvm_arm_resume_guest(struct kvm *kvm);
kvm               318 arch/arm/include/asm/kvm_host.h static inline int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext)
kvm               326 arch/arm/include/asm/kvm_host.h void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
kvm               328 arch/arm/include/asm/kvm_host.h struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
kvm               332 arch/arm/include/asm/kvm_host.h static inline void kvm_arch_sync_events(struct kvm *kvm) {}
kvm               400 arch/arm/include/asm/kvm_host.h struct kvm *kvm_arch_alloc_vm(void);
kvm               401 arch/arm/include/asm/kvm_host.h void kvm_arch_free_vm(struct kvm *kvm);
kvm               403 arch/arm/include/asm/kvm_host.h static inline int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
kvm                39 arch/arm/include/asm/kvm_mmu.h #define kvm_phys_shift(kvm)		KVM_PHYS_SHIFT
kvm                40 arch/arm/include/asm/kvm_mmu.h #define kvm_phys_size(kvm)		(1ULL << kvm_phys_shift(kvm))
kvm                41 arch/arm/include/asm/kvm_mmu.h #define kvm_phys_mask(kvm)		(kvm_phys_size(kvm) - 1ULL)
kvm                42 arch/arm/include/asm/kvm_mmu.h #define kvm_vttbr_baddr_mask(kvm)	VTTBR_BADDR_MASK
kvm                44 arch/arm/include/asm/kvm_mmu.h #define stage2_pgd_size(kvm)		(PTRS_PER_S2_PGD * sizeof(pgd_t))
kvm                54 arch/arm/include/asm/kvm_mmu.h void stage2_unmap_vm(struct kvm *kvm);
kvm                55 arch/arm/include/asm/kvm_mmu.h int kvm_alloc_stage2_pgd(struct kvm *kvm);
kvm                56 arch/arm/include/asm/kvm_mmu.h void kvm_free_stage2_pgd(struct kvm *kvm);
kvm                57 arch/arm/include/asm/kvm_mmu.h int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
kvm               194 arch/arm/include/asm/kvm_mmu.h #define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
kvm               195 arch/arm/include/asm/kvm_mmu.h #define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
kvm               196 arch/arm/include/asm/kvm_mmu.h #define kvm_pud_table_empty(kvm, pudp) false
kvm               202 arch/arm/include/asm/kvm_mmu.h struct kvm;
kvm               361 arch/arm/include/asm/kvm_mmu.h static inline int kvm_read_guest_lock(struct kvm *kvm,
kvm               364 arch/arm/include/asm/kvm_mmu.h 	int srcu_idx = srcu_read_lock(&kvm->srcu);
kvm               365 arch/arm/include/asm/kvm_mmu.h 	int ret = kvm_read_guest(kvm, gpa, data, len);
kvm               367 arch/arm/include/asm/kvm_mmu.h 	srcu_read_unlock(&kvm->srcu, srcu_idx);
kvm               372 arch/arm/include/asm/kvm_mmu.h static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
kvm               375 arch/arm/include/asm/kvm_mmu.h 	int srcu_idx = srcu_read_lock(&kvm->srcu);
kvm               376 arch/arm/include/asm/kvm_mmu.h 	int ret = kvm_write_guest(kvm, gpa, data, len);
kvm               378 arch/arm/include/asm/kvm_mmu.h 	srcu_read_unlock(&kvm->srcu, srcu_idx);
kvm               423 arch/arm/include/asm/kvm_mmu.h static __always_inline u64 kvm_get_vttbr(struct kvm *kvm)
kvm               425 arch/arm/include/asm/kvm_mmu.h 	struct kvm_vmid *vmid = &kvm->arch.vmid;
kvm               428 arch/arm/include/asm/kvm_mmu.h 	baddr = kvm->arch.pgd_phys;
kvm                17 arch/arm/include/asm/stage2_pgtable.h #define kvm_mmu_cache_min_pages(kvm)	2
kvm                19 arch/arm/include/asm/stage2_pgtable.h #define stage2_pgd_none(kvm, pgd)		pgd_none(pgd)
kvm                20 arch/arm/include/asm/stage2_pgtable.h #define stage2_pgd_clear(kvm, pgd)		pgd_clear(pgd)
kvm                21 arch/arm/include/asm/stage2_pgtable.h #define stage2_pgd_present(kvm, pgd)		pgd_present(pgd)
kvm                22 arch/arm/include/asm/stage2_pgtable.h #define stage2_pgd_populate(kvm, pgd, pud)	pgd_populate(NULL, pgd, pud)
kvm                23 arch/arm/include/asm/stage2_pgtable.h #define stage2_pud_offset(kvm, pgd, address)	pud_offset(pgd, address)
kvm                24 arch/arm/include/asm/stage2_pgtable.h #define stage2_pud_free(kvm, pud)		do { } while (0)
kvm                26 arch/arm/include/asm/stage2_pgtable.h #define stage2_pud_none(kvm, pud)		pud_none(pud)
kvm                27 arch/arm/include/asm/stage2_pgtable.h #define stage2_pud_clear(kvm, pud)		pud_clear(pud)
kvm                28 arch/arm/include/asm/stage2_pgtable.h #define stage2_pud_present(kvm, pud)		pud_present(pud)
kvm                29 arch/arm/include/asm/stage2_pgtable.h #define stage2_pud_populate(kvm, pud, pmd)	pud_populate(NULL, pud, pmd)
kvm                30 arch/arm/include/asm/stage2_pgtable.h #define stage2_pmd_offset(kvm, pud, address)	pmd_offset(pud, address)
kvm                31 arch/arm/include/asm/stage2_pgtable.h #define stage2_pmd_free(kvm, pmd)		free_page((unsigned long)pmd)
kvm                33 arch/arm/include/asm/stage2_pgtable.h #define stage2_pud_huge(kvm, pud)		pud_huge(pud)
kvm                37 arch/arm/include/asm/stage2_pgtable.h stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
kvm                44 arch/arm/include/asm/stage2_pgtable.h #define stage2_pud_addr_end(kvm, addr, end)	(end)
kvm                47 arch/arm/include/asm/stage2_pgtable.h stage2_pmd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
kvm                54 arch/arm/include/asm/stage2_pgtable.h #define stage2_pgd_index(kvm, addr)		pgd_index(addr)
kvm                56 arch/arm/include/asm/stage2_pgtable.h #define stage2_pte_table_empty(kvm, ptep)	kvm_page_empty(ptep)
kvm                57 arch/arm/include/asm/stage2_pgtable.h #define stage2_pmd_table_empty(kvm, pmdp)	kvm_page_empty(pmdp)
kvm                58 arch/arm/include/asm/stage2_pgtable.h #define stage2_pud_table_empty(kvm, pudp)	false
kvm                60 arch/arm/include/asm/stage2_pgtable.h static inline bool kvm_stage2_has_pud(struct kvm *kvm)
kvm                70 arch/arm/include/asm/stage2_pgtable.h static inline bool kvm_stage2_has_pmd(struct kvm *kvm)
kvm               155 arch/arm/kvm/coproc.c 	ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1;
kvm               172 arch/arm/kvm/coproc.c 	if (atomic_read(&vcpu->kvm->online_vcpus) > 1)
kvm                20 arch/arm/kvm/guest.c #define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM }
kvm                68 arch/arm/kvm/hyp/switch.c 	struct kvm *kvm = kern_hyp_va(vcpu->kvm);
kvm                69 arch/arm/kvm/hyp/switch.c 	write_sysreg(kvm_get_vttbr(kvm), VTTBR);
kvm                27 arch/arm/kvm/hyp/tlb.c void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
kvm                32 arch/arm/kvm/hyp/tlb.c 	kvm = kern_hyp_va(kvm);
kvm                33 arch/arm/kvm/hyp/tlb.c 	write_sysreg(kvm_get_vttbr(kvm), VTTBR);
kvm                43 arch/arm/kvm/hyp/tlb.c void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
kvm                45 arch/arm/kvm/hyp/tlb.c 	__kvm_tlb_flush_vmid(kvm);
kvm                50 arch/arm/kvm/hyp/tlb.c 	struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
kvm                53 arch/arm/kvm/hyp/tlb.c 	write_sysreg(kvm_get_vttbr(kvm), VTTBR);
kvm                 8 arch/arm/kvm/trace.h #define TRACE_SYSTEM kvm
kvm                52 arch/arm64/include/asm/kvm_asm.h struct kvm;
kvm                61 arch/arm64/include/asm/kvm_asm.h extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
kvm                62 arch/arm64/include/asm/kvm_asm.h extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
kvm                56 arch/arm64/include/asm/kvm_host.h int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext);
kvm               429 arch/arm64/include/asm/kvm_host.h int kvm_unmap_hva_range(struct kvm *kvm,
kvm               431 arch/arm64/include/asm/kvm_host.h int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
kvm               432 arch/arm64/include/asm/kvm_host.h int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
kvm               433 arch/arm64/include/asm/kvm_host.h int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
kvm               437 arch/arm64/include/asm/kvm_host.h void kvm_arm_halt_guest(struct kvm *kvm);
kvm               438 arch/arm64/include/asm/kvm_host.h void kvm_arm_resume_guest(struct kvm *kvm);
kvm               473 arch/arm64/include/asm/kvm_host.h void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
kvm               485 arch/arm64/include/asm/kvm_host.h struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
kvm               548 arch/arm64/include/asm/kvm_host.h static inline void kvm_arch_sync_events(struct kvm *kvm) {}
kvm               671 arch/arm64/include/asm/kvm_host.h struct kvm *kvm_arch_alloc_vm(void);
kvm               672 arch/arm64/include/asm/kvm_host.h void kvm_arch_free_vm(struct kvm *kvm);
kvm               674 arch/arm64/include/asm/kvm_host.h int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type);
kvm                88 arch/arm64/include/asm/kvm_hyp.h static __always_inline void __hyp_text __load_guest_stage2(struct kvm *kvm)
kvm                90 arch/arm64/include/asm/kvm_hyp.h 	write_sysreg(kvm->arch.vtcr, vtcr_el2);
kvm                91 arch/arm64/include/asm/kvm_hyp.h 	write_sysreg(kvm_get_vttbr(kvm), vttbr_el2);
kvm               135 arch/arm64/include/asm/kvm_mmu.h #define kvm_phys_shift(kvm)		VTCR_EL2_IPA(kvm->arch.vtcr)
kvm               136 arch/arm64/include/asm/kvm_mmu.h #define kvm_phys_size(kvm)		(_AC(1, ULL) << kvm_phys_shift(kvm))
kvm               137 arch/arm64/include/asm/kvm_mmu.h #define kvm_phys_mask(kvm)		(kvm_phys_size(kvm) - _AC(1, ULL))
kvm               155 arch/arm64/include/asm/kvm_mmu.h void stage2_unmap_vm(struct kvm *kvm);
kvm               156 arch/arm64/include/asm/kvm_mmu.h int kvm_alloc_stage2_pgd(struct kvm *kvm);
kvm               157 arch/arm64/include/asm/kvm_mmu.h void kvm_free_stage2_pgd(struct kvm *kvm);
kvm               158 arch/arm64/include/asm/kvm_mmu.h int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
kvm               301 arch/arm64/include/asm/kvm_mmu.h struct kvm;
kvm               426 arch/arm64/include/asm/kvm_mmu.h static inline int kvm_read_guest_lock(struct kvm *kvm,
kvm               429 arch/arm64/include/asm/kvm_mmu.h 	int srcu_idx = srcu_read_lock(&kvm->srcu);
kvm               430 arch/arm64/include/asm/kvm_mmu.h 	int ret = kvm_read_guest(kvm, gpa, data, len);
kvm               432 arch/arm64/include/asm/kvm_mmu.h 	srcu_read_unlock(&kvm->srcu, srcu_idx);
kvm               437 arch/arm64/include/asm/kvm_mmu.h static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
kvm               440 arch/arm64/include/asm/kvm_mmu.h 	int srcu_idx = srcu_read_lock(&kvm->srcu);
kvm               441 arch/arm64/include/asm/kvm_mmu.h 	int ret = kvm_write_guest(kvm, gpa, data, len);
kvm               443 arch/arm64/include/asm/kvm_mmu.h 	srcu_read_unlock(&kvm->srcu, srcu_idx);
kvm               590 arch/arm64/include/asm/kvm_mmu.h static inline u64 kvm_vttbr_baddr_mask(struct kvm *kvm)
kvm               592 arch/arm64/include/asm/kvm_mmu.h 	return vttbr_baddr_mask(kvm_phys_shift(kvm), kvm_stage2_levels(kvm));
kvm               595 arch/arm64/include/asm/kvm_mmu.h static __always_inline u64 kvm_get_vttbr(struct kvm *kvm)
kvm               597 arch/arm64/include/asm/kvm_mmu.h 	struct kvm_vmid *vmid = &kvm->arch.vmid;
kvm               601 arch/arm64/include/asm/kvm_mmu.h 	baddr = kvm->arch.pgd_phys;
kvm                32 arch/arm64/include/asm/stage2_pgtable.h #define kvm_stage2_levels(kvm)		VTCR_EL2_LVLS(kvm->arch.vtcr)
kvm                35 arch/arm64/include/asm/stage2_pgtable.h #define stage2_pgdir_shift(kvm)		pt_levels_pgdir_shift(kvm_stage2_levels(kvm))
kvm                36 arch/arm64/include/asm/stage2_pgtable.h #define stage2_pgdir_size(kvm)		(1ULL << stage2_pgdir_shift(kvm))
kvm                37 arch/arm64/include/asm/stage2_pgtable.h #define stage2_pgdir_mask(kvm)		~(stage2_pgdir_size(kvm) - 1)
kvm                51 arch/arm64/include/asm/stage2_pgtable.h #define stage2_pgd_ptrs(kvm)		__s2_pgd_ptrs(kvm_phys_shift(kvm), kvm_stage2_levels(kvm))
kvm                52 arch/arm64/include/asm/stage2_pgtable.h #define stage2_pgd_size(kvm)		__s2_pgd_size(kvm_phys_shift(kvm), kvm_stage2_levels(kvm))
kvm                59 arch/arm64/include/asm/stage2_pgtable.h #define kvm_mmu_cache_min_pages(kvm)	(kvm_stage2_levels(kvm) - 1)
kvm                62 arch/arm64/include/asm/stage2_pgtable.h static inline bool kvm_stage2_has_pud(struct kvm *kvm)
kvm                64 arch/arm64/include/asm/stage2_pgtable.h 	return (CONFIG_PGTABLE_LEVELS > 3) && (kvm_stage2_levels(kvm) > 3);
kvm                71 arch/arm64/include/asm/stage2_pgtable.h static inline bool stage2_pgd_none(struct kvm *kvm, pgd_t pgd)
kvm                73 arch/arm64/include/asm/stage2_pgtable.h 	if (kvm_stage2_has_pud(kvm))
kvm                79 arch/arm64/include/asm/stage2_pgtable.h static inline void stage2_pgd_clear(struct kvm *kvm, pgd_t *pgdp)
kvm                81 arch/arm64/include/asm/stage2_pgtable.h 	if (kvm_stage2_has_pud(kvm))
kvm                85 arch/arm64/include/asm/stage2_pgtable.h static inline bool stage2_pgd_present(struct kvm *kvm, pgd_t pgd)
kvm                87 arch/arm64/include/asm/stage2_pgtable.h 	if (kvm_stage2_has_pud(kvm))
kvm                93 arch/arm64/include/asm/stage2_pgtable.h static inline void stage2_pgd_populate(struct kvm *kvm, pgd_t *pgd, pud_t *pud)
kvm                95 arch/arm64/include/asm/stage2_pgtable.h 	if (kvm_stage2_has_pud(kvm))
kvm                99 arch/arm64/include/asm/stage2_pgtable.h static inline pud_t *stage2_pud_offset(struct kvm *kvm,
kvm               102 arch/arm64/include/asm/stage2_pgtable.h 	if (kvm_stage2_has_pud(kvm))
kvm               108 arch/arm64/include/asm/stage2_pgtable.h static inline void stage2_pud_free(struct kvm *kvm, pud_t *pud)
kvm               110 arch/arm64/include/asm/stage2_pgtable.h 	if (kvm_stage2_has_pud(kvm))
kvm               114 arch/arm64/include/asm/stage2_pgtable.h static inline bool stage2_pud_table_empty(struct kvm *kvm, pud_t *pudp)
kvm               116 arch/arm64/include/asm/stage2_pgtable.h 	if (kvm_stage2_has_pud(kvm))
kvm               123 arch/arm64/include/asm/stage2_pgtable.h stage2_pud_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
kvm               125 arch/arm64/include/asm/stage2_pgtable.h 	if (kvm_stage2_has_pud(kvm)) {
kvm               135 arch/arm64/include/asm/stage2_pgtable.h static inline bool kvm_stage2_has_pmd(struct kvm *kvm)
kvm               137 arch/arm64/include/asm/stage2_pgtable.h 	return (CONFIG_PGTABLE_LEVELS > 2) && (kvm_stage2_levels(kvm) > 2);
kvm               144 arch/arm64/include/asm/stage2_pgtable.h static inline bool stage2_pud_none(struct kvm *kvm, pud_t pud)
kvm               146 arch/arm64/include/asm/stage2_pgtable.h 	if (kvm_stage2_has_pmd(kvm))
kvm               152 arch/arm64/include/asm/stage2_pgtable.h static inline void stage2_pud_clear(struct kvm *kvm, pud_t *pud)
kvm               154 arch/arm64/include/asm/stage2_pgtable.h 	if (kvm_stage2_has_pmd(kvm))
kvm               158 arch/arm64/include/asm/stage2_pgtable.h static inline bool stage2_pud_present(struct kvm *kvm, pud_t pud)
kvm               160 arch/arm64/include/asm/stage2_pgtable.h 	if (kvm_stage2_has_pmd(kvm))
kvm               166 arch/arm64/include/asm/stage2_pgtable.h static inline void stage2_pud_populate(struct kvm *kvm, pud_t *pud, pmd_t *pmd)
kvm               168 arch/arm64/include/asm/stage2_pgtable.h 	if (kvm_stage2_has_pmd(kvm))
kvm               172 arch/arm64/include/asm/stage2_pgtable.h static inline pmd_t *stage2_pmd_offset(struct kvm *kvm,
kvm               175 arch/arm64/include/asm/stage2_pgtable.h 	if (kvm_stage2_has_pmd(kvm))
kvm               181 arch/arm64/include/asm/stage2_pgtable.h static inline void stage2_pmd_free(struct kvm *kvm, pmd_t *pmd)
kvm               183 arch/arm64/include/asm/stage2_pgtable.h 	if (kvm_stage2_has_pmd(kvm))
kvm               187 arch/arm64/include/asm/stage2_pgtable.h static inline bool stage2_pud_huge(struct kvm *kvm, pud_t pud)
kvm               189 arch/arm64/include/asm/stage2_pgtable.h 	if (kvm_stage2_has_pmd(kvm))
kvm               195 arch/arm64/include/asm/stage2_pgtable.h static inline bool stage2_pmd_table_empty(struct kvm *kvm, pmd_t *pmdp)
kvm               197 arch/arm64/include/asm/stage2_pgtable.h 	if (kvm_stage2_has_pmd(kvm))
kvm               204 arch/arm64/include/asm/stage2_pgtable.h stage2_pmd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
kvm               206 arch/arm64/include/asm/stage2_pgtable.h 	if (kvm_stage2_has_pmd(kvm)) {
kvm               215 arch/arm64/include/asm/stage2_pgtable.h static inline bool stage2_pte_table_empty(struct kvm *kvm, pte_t *ptep)
kvm               220 arch/arm64/include/asm/stage2_pgtable.h static inline unsigned long stage2_pgd_index(struct kvm *kvm, phys_addr_t addr)
kvm               222 arch/arm64/include/asm/stage2_pgtable.h 	return (((addr) >> stage2_pgdir_shift(kvm)) & (stage2_pgd_ptrs(kvm) - 1));
kvm               226 arch/arm64/include/asm/stage2_pgtable.h stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
kvm               228 arch/arm64/include/asm/stage2_pgtable.h 	phys_addr_t boundary = (addr + stage2_pgdir_size(kvm)) & stage2_pgdir_mask(kvm);
kvm                33 arch/arm64/kvm/guest.c #define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM }
kvm               217 arch/arm64/kvm/hyp/switch.c static void __hyp_text __activate_vm(struct kvm *kvm)
kvm               219 arch/arm64/kvm/hyp/switch.c 	__load_guest_stage2(kvm);
kvm               620 arch/arm64/kvm/hyp/switch.c 	__activate_vm(vcpu->kvm);
kvm               681 arch/arm64/kvm/hyp/switch.c 	__activate_vm(kern_hyp_va(vcpu->kvm));
kvm                19 arch/arm64/kvm/hyp/tlb.c static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
kvm                56 arch/arm64/kvm/hyp/tlb.c 	__load_guest_stage2(kvm);
kvm                63 arch/arm64/kvm/hyp/tlb.c static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
kvm                66 arch/arm64/kvm/hyp/tlb.c 	__load_guest_stage2(kvm);
kvm                70 arch/arm64/kvm/hyp/tlb.c static void __hyp_text __tlb_switch_to_guest(struct kvm *kvm,
kvm                74 arch/arm64/kvm/hyp/tlb.c 		__tlb_switch_to_guest_vhe(kvm, cxt);
kvm                76 arch/arm64/kvm/hyp/tlb.c 		__tlb_switch_to_guest_nvhe(kvm, cxt);
kvm                79 arch/arm64/kvm/hyp/tlb.c static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm,
kvm                99 arch/arm64/kvm/hyp/tlb.c static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
kvm               105 arch/arm64/kvm/hyp/tlb.c static void __hyp_text __tlb_switch_to_host(struct kvm *kvm,
kvm               109 arch/arm64/kvm/hyp/tlb.c 		__tlb_switch_to_host_vhe(kvm, cxt);
kvm               111 arch/arm64/kvm/hyp/tlb.c 		__tlb_switch_to_host_nvhe(kvm, cxt);
kvm               114 arch/arm64/kvm/hyp/tlb.c void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
kvm               121 arch/arm64/kvm/hyp/tlb.c 	kvm = kern_hyp_va(kvm);
kvm               122 arch/arm64/kvm/hyp/tlb.c 	__tlb_switch_to_guest(kvm, &cxt);
kvm               165 arch/arm64/kvm/hyp/tlb.c 	__tlb_switch_to_host(kvm, &cxt);
kvm               168 arch/arm64/kvm/hyp/tlb.c void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
kvm               175 arch/arm64/kvm/hyp/tlb.c 	kvm = kern_hyp_va(kvm);
kvm               176 arch/arm64/kvm/hyp/tlb.c 	__tlb_switch_to_guest(kvm, &cxt);
kvm               182 arch/arm64/kvm/hyp/tlb.c 	__tlb_switch_to_host(kvm, &cxt);
kvm               187 arch/arm64/kvm/hyp/tlb.c 	struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
kvm               191 arch/arm64/kvm/hyp/tlb.c 	__tlb_switch_to_guest(kvm, &cxt);
kvm               197 arch/arm64/kvm/hyp/tlb.c 	__tlb_switch_to_host(kvm, &cxt);
kvm                37 arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c 	struct kvm *kvm = kern_hyp_va(vcpu->kvm);
kvm                38 arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c 	struct vgic_dist *vgic = &kvm->arch.vgic;
kvm                63 arch/arm64/kvm/reset.c int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext)
kvm               391 arch/arm64/kvm/reset.c int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
kvm               435 arch/arm64/kvm/reset.c 	kvm->arch.vtcr = vtcr;
kvm                 9 arch/arm64/kvm/trace.h #define TRACE_SYSTEM kvm
kvm               788 arch/mips/include/asm/kvm_host.h 	int (*check_extension)(struct kvm *kvm, long ext);
kvm               792 arch/mips/include/asm/kvm_host.h 	void (*flush_shadow_all)(struct kvm *kvm);
kvm               798 arch/mips/include/asm/kvm_host.h 	void (*flush_shadow_memslot)(struct kvm *kvm,
kvm               919 arch/mips/include/asm/kvm_host.h bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
kvm               920 arch/mips/include/asm/kvm_host.h int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
kvm               941 arch/mips/include/asm/kvm_host.h int kvm_unmap_hva_range(struct kvm *kvm,
kvm               943 arch/mips/include/asm/kvm_host.h int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
kvm               944 arch/mips/include/asm/kvm_host.h int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
kvm               945 arch/mips/include/asm/kvm_host.h int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
kvm              1135 arch/mips/include/asm/kvm_host.h extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
kvm              1138 arch/mips/include/asm/kvm_host.h static inline void kvm_arch_sync_events(struct kvm *kvm) {}
kvm              1139 arch/mips/include/asm/kvm_host.h static inline void kvm_arch_free_memslot(struct kvm *kvm,
kvm              1141 arch/mips/include/asm/kvm_host.h static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
kvm               321 arch/mips/kvm/entry.c 	UASM_i_LW(&p, S0, (int)offsetof(struct kvm_vcpu, kvm) -
kvm               323 arch/mips/kvm/entry.c 	UASM_i_LW(&p, A0, offsetof(struct kvm, arch.gpa_mm.pgd), S0);
kvm               366 arch/mips/kvm/entry.c 		     offsetof(struct kvm, arch.gpa_mm.context.asid));
kvm               131 arch/mips/kvm/mips.c int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm               146 arch/mips/kvm/mips.c 	kvm->arch.gpa_mm.pgd = kvm_pgd_alloc();
kvm               147 arch/mips/kvm/mips.c 	if (!kvm->arch.gpa_mm.pgd)
kvm               153 arch/mips/kvm/mips.c void kvm_mips_free_vcpus(struct kvm *kvm)
kvm               158 arch/mips/kvm/mips.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
kvm               162 arch/mips/kvm/mips.c 	mutex_lock(&kvm->lock);
kvm               164 arch/mips/kvm/mips.c 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
kvm               165 arch/mips/kvm/mips.c 		kvm->vcpus[i] = NULL;
kvm               167 arch/mips/kvm/mips.c 	atomic_set(&kvm->online_vcpus, 0);
kvm               169 arch/mips/kvm/mips.c 	mutex_unlock(&kvm->lock);
kvm               172 arch/mips/kvm/mips.c static void kvm_mips_free_gpa_pt(struct kvm *kvm)
kvm               175 arch/mips/kvm/mips.c 	WARN_ON(!kvm_mips_flush_gpa_pt(kvm, 0, ~0));
kvm               176 arch/mips/kvm/mips.c 	pgd_free(NULL, kvm->arch.gpa_mm.pgd);
kvm               179 arch/mips/kvm/mips.c void kvm_arch_destroy_vm(struct kvm *kvm)
kvm               181 arch/mips/kvm/mips.c 	kvm_mips_free_vcpus(kvm);
kvm               182 arch/mips/kvm/mips.c 	kvm_mips_free_gpa_pt(kvm);
kvm               191 arch/mips/kvm/mips.c int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
kvm               197 arch/mips/kvm/mips.c void kvm_arch_flush_shadow_all(struct kvm *kvm)
kvm               200 arch/mips/kvm/mips.c 	kvm_mips_flush_gpa_pt(kvm, 0, ~0);
kvm               203 arch/mips/kvm/mips.c 	kvm_mips_callbacks->flush_shadow_all(kvm);
kvm               206 arch/mips/kvm/mips.c void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
kvm               214 arch/mips/kvm/mips.c 	spin_lock(&kvm->mmu_lock);
kvm               216 arch/mips/kvm/mips.c 	kvm_mips_flush_gpa_pt(kvm, slot->base_gfn,
kvm               219 arch/mips/kvm/mips.c 	kvm_mips_callbacks->flush_shadow_memslot(kvm, slot);
kvm               220 arch/mips/kvm/mips.c 	spin_unlock(&kvm->mmu_lock);
kvm               223 arch/mips/kvm/mips.c int kvm_arch_prepare_memory_region(struct kvm *kvm,
kvm               231 arch/mips/kvm/mips.c void kvm_arch_commit_memory_region(struct kvm *kvm,
kvm               240 arch/mips/kvm/mips.c 		  __func__, kvm, mem->slot, mem->guest_phys_addr,
kvm               255 arch/mips/kvm/mips.c 		spin_lock(&kvm->mmu_lock);
kvm               257 arch/mips/kvm/mips.c 		needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn,
kvm               261 arch/mips/kvm/mips.c 			kvm_mips_callbacks->flush_shadow_memslot(kvm, new);
kvm               262 arch/mips/kvm/mips.c 		spin_unlock(&kvm->mmu_lock);
kvm               283 arch/mips/kvm/mips.c struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
kvm               296 arch/mips/kvm/mips.c 	err = kvm_vcpu_init(vcpu, kvm, id);
kvm               301 arch/mips/kvm/mips.c 	kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
kvm               492 arch/mips/kvm/mips.c 		dvcpu = vcpu->kvm->vcpus[irq->cpu];
kvm               874 arch/mips/kvm/mips.c 	if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap))
kvm               993 arch/mips/kvm/mips.c int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
kvm              1000 arch/mips/kvm/mips.c 	mutex_lock(&kvm->slots_lock);
kvm              1002 arch/mips/kvm/mips.c 	r = kvm_get_dirty_log_protect(kvm, log, &flush);
kvm              1005 arch/mips/kvm/mips.c 		slots = kvm_memslots(kvm);
kvm              1009 arch/mips/kvm/mips.c 		kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot);
kvm              1012 arch/mips/kvm/mips.c 	mutex_unlock(&kvm->slots_lock);
kvm              1016 arch/mips/kvm/mips.c int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, struct kvm_clear_dirty_log *log)
kvm              1023 arch/mips/kvm/mips.c 	mutex_lock(&kvm->slots_lock);
kvm              1025 arch/mips/kvm/mips.c 	r = kvm_clear_dirty_log_protect(kvm, log, &flush);
kvm              1028 arch/mips/kvm/mips.c 		slots = kvm_memslots(kvm);
kvm              1032 arch/mips/kvm/mips.c 		kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot);
kvm              1035 arch/mips/kvm/mips.c 	mutex_unlock(&kvm->slots_lock);
kvm              1097 arch/mips/kvm/mips.c int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
kvm              1138 arch/mips/kvm/mips.c 		r = kvm_mips_callbacks->check_extension(kvm, ext);
kvm               173 arch/mips/kvm/mmu.c static pte_t *kvm_mips_pte_for_gpa(struct kvm *kvm,
kvm               177 arch/mips/kvm/mmu.c 	return kvm_mips_walk_pgd(kvm->arch.gpa_mm.pgd, cache, addr);
kvm               299 arch/mips/kvm/mmu.c bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn)
kvm               301 arch/mips/kvm/mmu.c 	return kvm_mips_flush_gpa_pgd(kvm->arch.gpa_mm.pgd,
kvm               423 arch/mips/kvm/mmu.c int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn)
kvm               425 arch/mips/kvm/mmu.c 	return kvm_mips_mkclean_pgd(kvm->arch.gpa_mm.pgd,
kvm               441 arch/mips/kvm/mmu.c void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
kvm               449 arch/mips/kvm/mmu.c 	kvm_mips_mkclean_gpa_pt(kvm, start, end);
kvm               460 arch/mips/kvm/mmu.c static int kvm_mips_mkold_gpa_pt(struct kvm *kvm, gfn_t start_gfn,
kvm               463 arch/mips/kvm/mmu.c 	return kvm_mips_mkold_pgd(kvm->arch.gpa_mm.pgd,
kvm               468 arch/mips/kvm/mmu.c static int handle_hva_to_gpa(struct kvm *kvm,
kvm               471 arch/mips/kvm/mmu.c 			     int (*handler)(struct kvm *kvm, gfn_t gfn,
kvm               481 arch/mips/kvm/mmu.c 	slots = kvm_memslots(kvm);
kvm               501 arch/mips/kvm/mmu.c 		ret |= handler(kvm, gfn, gfn_end, memslot, data);
kvm               508 arch/mips/kvm/mmu.c static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
kvm               511 arch/mips/kvm/mmu.c 	kvm_mips_flush_gpa_pt(kvm, gfn, gfn_end);
kvm               515 arch/mips/kvm/mmu.c int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
kvm               517 arch/mips/kvm/mmu.c 	handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
kvm               519 arch/mips/kvm/mmu.c 	kvm_mips_callbacks->flush_shadow_all(kvm);
kvm               523 arch/mips/kvm/mmu.c static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
kvm               528 arch/mips/kvm/mmu.c 	pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
kvm               554 arch/mips/kvm/mmu.c int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
kvm               559 arch/mips/kvm/mmu.c 	ret = handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pte);
kvm               561 arch/mips/kvm/mmu.c 		kvm_mips_callbacks->flush_shadow_all(kvm);
kvm               565 arch/mips/kvm/mmu.c static int kvm_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
kvm               568 arch/mips/kvm/mmu.c 	return kvm_mips_mkold_gpa_pt(kvm, gfn, gfn_end);
kvm               571 arch/mips/kvm/mmu.c static int kvm_test_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
kvm               575 arch/mips/kvm/mmu.c 	pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
kvm               582 arch/mips/kvm/mmu.c int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
kvm               584 arch/mips/kvm/mmu.c 	return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
kvm               587 arch/mips/kvm/mmu.c int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
kvm               589 arch/mips/kvm/mmu.c 	return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
kvm               614 arch/mips/kvm/mmu.c 	struct kvm *kvm = vcpu->kvm;
kvm               621 arch/mips/kvm/mmu.c 	spin_lock(&kvm->mmu_lock);
kvm               624 arch/mips/kvm/mmu.c 	ptep = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
kvm               646 arch/mips/kvm/mmu.c 		mark_page_dirty(kvm, gfn);
kvm               656 arch/mips/kvm/mmu.c 	spin_unlock(&kvm->mmu_lock);
kvm               690 arch/mips/kvm/mmu.c 	struct kvm *kvm = vcpu->kvm;
kvm               701 arch/mips/kvm/mmu.c 	srcu_idx = srcu_read_lock(&kvm->srcu);
kvm               718 arch/mips/kvm/mmu.c 	mmu_seq = kvm->mmu_notifier_seq;
kvm               733 arch/mips/kvm/mmu.c 	pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writeable);
kvm               739 arch/mips/kvm/mmu.c 	spin_lock(&kvm->mmu_lock);
kvm               741 arch/mips/kvm/mmu.c 	if (mmu_notifier_retry(kvm, mmu_seq)) {
kvm               747 arch/mips/kvm/mmu.c 		spin_unlock(&kvm->mmu_lock);
kvm               753 arch/mips/kvm/mmu.c 	ptep = kvm_mips_pte_for_gpa(kvm, memcache, gpa);
kvm               761 arch/mips/kvm/mmu.c 			mark_page_dirty(kvm, gfn);
kvm               777 arch/mips/kvm/mmu.c 	spin_unlock(&kvm->mmu_lock);
kvm               781 arch/mips/kvm/mmu.c 	srcu_read_unlock(&kvm->srcu, srcu_idx);
kvm              1045 arch/mips/kvm/mmu.c 	struct kvm *kvm = vcpu->kvm;
kvm              1069 arch/mips/kvm/mmu.c 		spin_lock(&kvm->mmu_lock);
kvm              1070 arch/mips/kvm/mmu.c 		ptep_buddy = kvm_mips_pte_for_gpa(kvm, NULL,
kvm              1074 arch/mips/kvm/mmu.c 		spin_unlock(&kvm->mmu_lock);
kvm                46 arch/mips/kvm/tlb.c 	struct mm_struct *gpa_mm = &vcpu->kvm->arch.gpa_mm;
kvm                16 arch/mips/kvm/trace.h #define TRACE_SYSTEM kvm
kvm               524 arch/mips/kvm/trap_emul.c static int kvm_trap_emul_check_extension(struct kvm *kvm, long ext)
kvm               698 arch/mips/kvm/trap_emul.c static void kvm_trap_emul_flush_shadow_all(struct kvm *kvm)
kvm               701 arch/mips/kvm/trap_emul.c 	kvm_flush_remote_tlbs(kvm);
kvm               704 arch/mips/kvm/trap_emul.c static void kvm_trap_emul_flush_shadow_memslot(struct kvm *kvm,
kvm               707 arch/mips/kvm/trap_emul.c 	kvm_trap_emul_flush_shadow_all(kvm);
kvm              2405 arch/mips/kvm/vz.c 	struct kvm *kvm = vcpu->kvm;
kvm              2406 arch/mips/kvm/vz.c 	struct mm_struct *gpa_mm = &kvm->arch.gpa_mm;
kvm              2457 arch/mips/kvm/vz.c 		if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask))
kvm              2915 arch/mips/kvm/vz.c static int kvm_vz_check_extension(struct kvm *kvm, long ext)
kvm              3106 arch/mips/kvm/vz.c static void kvm_vz_flush_shadow_all(struct kvm *kvm)
kvm              3110 arch/mips/kvm/vz.c 		kvm_flush_remote_tlbs(kvm);
kvm              3121 arch/mips/kvm/vz.c 		cpumask_setall(&kvm->arch.asid_flush_mask);
kvm              3122 arch/mips/kvm/vz.c 		kvm_flush_remote_tlbs(kvm);
kvm              3126 arch/mips/kvm/vz.c static void kvm_vz_flush_shadow_memslot(struct kvm *kvm,
kvm              3129 arch/mips/kvm/vz.c 	kvm_vz_flush_shadow_all(kvm);
kvm                86 arch/powerpc/include/asm/kvm_book3s.h 	struct kvm *kvm;
kvm               161 arch/powerpc/include/asm/kvm_book3s.h extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
kvm               175 arch/powerpc/include/asm/kvm_book3s.h extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc);
kvm               195 arch/powerpc/include/asm/kvm_book3s.h extern void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
kvm               197 arch/powerpc/include/asm/kvm_book3s.h extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
kvm               201 arch/powerpc/include/asm/kvm_book3s.h extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable,
kvm               209 arch/powerpc/include/asm/kvm_book3s.h extern int kvmppc_init_vm_radix(struct kvm *kvm);
kvm               210 arch/powerpc/include/asm/kvm_book3s.h extern void kvmppc_free_radix(struct kvm *kvm);
kvm               211 arch/powerpc/include/asm/kvm_book3s.h extern void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd,
kvm               215 arch/powerpc/include/asm/kvm_book3s.h extern int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
kvm               217 arch/powerpc/include/asm/kvm_book3s.h extern int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
kvm               219 arch/powerpc/include/asm/kvm_book3s.h extern int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
kvm               221 arch/powerpc/include/asm/kvm_book3s.h extern long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
kvm               223 arch/powerpc/include/asm/kvm_book3s.h extern void kvmppc_radix_flush_memslot(struct kvm *kvm,
kvm               225 arch/powerpc/include/asm/kvm_book3s.h extern int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
kvm               240 arch/powerpc/include/asm/kvm_book3s.h extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
kvm               244 arch/powerpc/include/asm/kvm_book3s.h extern void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
kvm               246 arch/powerpc/include/asm/kvm_book3s.h void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
kvm               248 arch/powerpc/include/asm/kvm_book3s.h extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
kvm               250 arch/powerpc/include/asm/kvm_book3s.h extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr,
kvm               252 arch/powerpc/include/asm/kvm_book3s.h extern long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
kvm               255 arch/powerpc/include/asm/kvm_book3s.h extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
kvm               258 arch/powerpc/include/asm/kvm_book3s.h extern long kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm,
kvm               263 arch/powerpc/include/asm/kvm_book3s.h extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
kvm               276 arch/powerpc/include/asm/kvm_book3s.h extern void kvmppc_pr_init_default_hcalls(struct kvm *kvm);
kvm               296 arch/powerpc/include/asm/kvm_book3s.h void kvmhv_vm_nested_init(struct kvm *kvm);
kvm               300 arch/powerpc/include/asm/kvm_book3s.h void kvmhv_release_all_nested(struct kvm *kvm);
kvm               404 arch/powerpc/include/asm/kvm_book3s.h static inline bool is_kvmppc_hv_enabled(struct kvm *kvm);
kvm               408 arch/powerpc/include/asm/kvm_book3s.h 	return !is_kvmppc_hv_enabled(vcpu->kvm);
kvm               458 arch/powerpc/include/asm/kvm_book3s.h static inline u32 kvmppc_pack_vcpu_id(struct kvm *kvm, u32 id)
kvm               461 arch/powerpc/include/asm/kvm_book3s.h 	int stride = kvm->arch.emul_smt_mode;
kvm                35 arch/powerpc/include/asm/kvm_book3s_64.h 	struct kvm *l1_host;		/* L1 VM that owns this nested guest */
kvm               110 arch/powerpc/include/asm/kvm_book3s_64.h struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
kvm               113 arch/powerpc/include/asm/kvm_book3s_64.h int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid);
kvm               138 arch/powerpc/include/asm/kvm_book3s_64.h static inline bool kvm_is_radix(struct kvm *kvm)
kvm               140 arch/powerpc/include/asm/kvm_book3s_64.h 	return kvm->arch.radix;
kvm               150 arch/powerpc/include/asm/kvm_book3s_64.h 		radix = kvm_is_radix(vcpu->kvm);
kvm               523 arch/powerpc/include/asm/kvm_book3s_64.h static inline void note_hpte_modification(struct kvm *kvm,
kvm               526 arch/powerpc/include/asm/kvm_book3s_64.h 	if (atomic_read(&kvm->arch.hpte_mod_interest))
kvm               536 arch/powerpc/include/asm/kvm_book3s_64.h static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
kvm               538 arch/powerpc/include/asm/kvm_book3s_64.h 	return rcu_dereference_raw_check(kvm->memslots[0]);
kvm               541 arch/powerpc/include/asm/kvm_book3s_64.h extern void kvmppc_mmu_debugfs_init(struct kvm *kvm);
kvm               542 arch/powerpc/include/asm/kvm_book3s_64.h extern void kvmhv_radix_debugfs_init(struct kvm *kvm);
kvm               623 arch/powerpc/include/asm/kvm_book3s_64.h extern int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
kvm               627 arch/powerpc/include/asm/kvm_book3s_64.h extern void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp,
kvm               629 arch/powerpc/include/asm/kvm_book3s_64.h extern void kvmhv_update_nest_rmap_rc_list(struct kvm *kvm, unsigned long *rmapp,
kvm               632 arch/powerpc/include/asm/kvm_book3s_64.h extern void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
kvm                60 arch/powerpc/include/asm/kvm_host.h extern int kvm_unmap_hva_range(struct kvm *kvm,
kvm                62 arch/powerpc/include/asm/kvm_host.h extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
kvm                63 arch/powerpc/include/asm/kvm_host.h extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
kvm                64 arch/powerpc/include/asm/kvm_host.h extern int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
kvm               197 arch/powerpc/include/asm/kvm_host.h 	struct kvm *kvm;
kvm               857 arch/powerpc/include/asm/kvm_host.h static inline void kvm_arch_sync_events(struct kvm *kvm) {}
kvm               858 arch/powerpc/include/asm/kvm_host.h static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
kvm               859 arch/powerpc/include/asm/kvm_host.h static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
kvm               122 arch/powerpc/include/asm/kvm_ppc.h extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
kvm               164 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info);
kvm               165 arch/powerpc/include/asm/kvm_ppc.h extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order);
kvm               167 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_rmap_reset(struct kvm *kvm);
kvm               168 arch/powerpc/include/asm/kvm_ppc.h extern long kvmppc_prepare_vrma(struct kvm *kvm,
kvm               173 arch/powerpc/include/asm/kvm_ppc.h extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
kvm               175 arch/powerpc/include/asm/kvm_ppc.h extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
kvm               177 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm);
kvm               178 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm);
kvm               179 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_setup_partition_table(struct kvm *kvm);
kvm               181 arch/powerpc/include/asm/kvm_ppc.h extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
kvm               184 arch/powerpc/include/asm/kvm_ppc.h 		struct kvm *kvm, unsigned long liobn);
kvm               201 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_core_init_vm(struct kvm *kvm);
kvm               202 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_core_destroy_vm(struct kvm *kvm);
kvm               203 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_core_free_memslot(struct kvm *kvm,
kvm               206 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_core_create_memslot(struct kvm *kvm,
kvm               209 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
kvm               212 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
kvm               217 arch/powerpc/include/asm/kvm_ppc.h extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
kvm               219 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_core_flush_memslot(struct kvm *kvm,
kvm               227 arch/powerpc/include/asm/kvm_ppc.h extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
kvm               228 arch/powerpc/include/asm/kvm_ppc.h extern long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
kvm               230 arch/powerpc/include/asm/kvm_ppc.h extern long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
kvm               235 arch/powerpc/include/asm/kvm_ppc.h extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
kvm               237 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
kvm               239 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
kvm               241 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
kvm               243 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
kvm               244 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
kvm               276 arch/powerpc/include/asm/kvm_ppc.h 	struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
kvm               279 arch/powerpc/include/asm/kvm_ppc.h 	int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
kvm               280 arch/powerpc/include/asm/kvm_ppc.h 	void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
kvm               281 arch/powerpc/include/asm/kvm_ppc.h 	int (*prepare_memory_region)(struct kvm *kvm,
kvm               284 arch/powerpc/include/asm/kvm_ppc.h 	void (*commit_memory_region)(struct kvm *kvm,
kvm               289 arch/powerpc/include/asm/kvm_ppc.h 	int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
kvm               291 arch/powerpc/include/asm/kvm_ppc.h 	int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end);
kvm               292 arch/powerpc/include/asm/kvm_ppc.h 	int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
kvm               293 arch/powerpc/include/asm/kvm_ppc.h 	void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
kvm               299 arch/powerpc/include/asm/kvm_ppc.h 	int (*init_vm)(struct kvm *kvm);
kvm               300 arch/powerpc/include/asm/kvm_ppc.h 	void (*destroy_vm)(struct kvm *kvm);
kvm               301 arch/powerpc/include/asm/kvm_ppc.h 	int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
kvm               314 arch/powerpc/include/asm/kvm_ppc.h 	int (*configure_mmu)(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg);
kvm               315 arch/powerpc/include/asm/kvm_ppc.h 	int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
kvm               316 arch/powerpc/include/asm/kvm_ppc.h 	int (*set_smt_mode)(struct kvm *kvm, unsigned long mode,
kvm               319 arch/powerpc/include/asm/kvm_ppc.h 	int (*enable_nested)(struct kvm *kvm);
kvm               352 arch/powerpc/include/asm/kvm_ppc.h static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
kvm               354 arch/powerpc/include/asm/kvm_ppc.h 	return kvm->arch.kvm_ops == kvmppc_hv_ops;
kvm               553 arch/powerpc/include/asm/kvm_ppc.h 	vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
kvm               560 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu,
kvm               602 arch/powerpc/include/asm/kvm_ppc.h 				struct kvm *kvm)
kvm               604 arch/powerpc/include/asm/kvm_ppc.h 	if (kvm && kvm_irq_bypass)
kvm               605 arch/powerpc/include/asm/kvm_ppc.h 		return kvm->arch.pimap;
kvm               611 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_free_pimap(struct kvm *kvm);
kvm               620 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long guest_irq,
kvm               622 arch/powerpc/include/asm/kvm_ppc.h extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
kvm               629 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
kvm               635 arch/powerpc/include/asm/kvm_ppc.h 				struct kvm *kvm)
kvm               639 arch/powerpc/include/asm/kvm_ppc.h static inline void kvmppc_free_pimap(struct kvm *kvm) {};
kvm               658 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
kvm               660 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
kvm               662 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq);
kvm               663 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
kvm               670 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
kvm               672 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
kvm               677 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
kvm               698 arch/powerpc/include/asm/kvm_ppc.h static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
kvm               700 arch/powerpc/include/asm/kvm_ppc.h static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
kvm               702 arch/powerpc/include/asm/kvm_ppc.h static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; }
kvm               703 arch/powerpc/include/asm/kvm_ppc.h static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; }
kvm               710 arch/powerpc/include/asm/kvm_ppc.h static inline int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
kvm               712 arch/powerpc/include/asm/kvm_ppc.h static inline int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
kvm               717 arch/powerpc/include/asm/kvm_ppc.h static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
kvm               494 arch/powerpc/kernel/asm-offsets.c 	OFFSET(VCPU_KVM, kvm_vcpu, kvm);
kvm               495 arch/powerpc/kernel/asm-offsets.c 	OFFSET(KVM_LPID, kvm, arch.lpid);
kvm               499 arch/powerpc/kernel/asm-offsets.c 	OFFSET(KVM_TLB_SETS, kvm, arch.tlb_sets);
kvm               500 arch/powerpc/kernel/asm-offsets.c 	OFFSET(KVM_SDR1, kvm, arch.sdr1);
kvm               501 arch/powerpc/kernel/asm-offsets.c 	OFFSET(KVM_HOST_LPID, kvm, arch.host_lpid);
kvm               502 arch/powerpc/kernel/asm-offsets.c 	OFFSET(KVM_HOST_LPCR, kvm, arch.host_lpcr);
kvm               503 arch/powerpc/kernel/asm-offsets.c 	OFFSET(KVM_HOST_SDR1, kvm, arch.host_sdr1);
kvm               504 arch/powerpc/kernel/asm-offsets.c 	OFFSET(KVM_NEED_FLUSH, kvm, arch.need_tlb_flush.bits);
kvm               505 arch/powerpc/kernel/asm-offsets.c 	OFFSET(KVM_ENABLED_HCALLS, kvm, arch.enabled_hcalls);
kvm               506 arch/powerpc/kernel/asm-offsets.c 	OFFSET(KVM_VRMA_SLB_V, kvm, arch.vrma_slb_v);
kvm               507 arch/powerpc/kernel/asm-offsets.c 	OFFSET(KVM_RADIX, kvm, arch.radix);
kvm               508 arch/powerpc/kernel/asm-offsets.c 	OFFSET(KVM_FWNMI, kvm, arch.fwnmi_enabled);
kvm               509 arch/powerpc/kernel/asm-offsets.c 	OFFSET(KVM_SECURE_GUEST, kvm, arch.secure_guest);
kvm               574 arch/powerpc/kernel/asm-offsets.c 	OFFSET(VCORE_KVM, kvmppc_vcore, kvm);
kvm                39 arch/powerpc/kvm/book3s.c #define VM_STAT(x, ...) offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__
kvm                93 arch/powerpc/kvm/book3s.c 	if (!is_kvmppc_hv_enabled(vcpu->kvm))
kvm               101 arch/powerpc/kvm/book3s.c 	if (is_kvmppc_hv_enabled(vcpu->kvm))
kvm               115 arch/powerpc/kvm/book3s.c 	if (is_kvmppc_hv_enabled(vcpu->kvm))
kvm               448 arch/powerpc/kvm/book3s.c 	return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable);
kvm               519 arch/powerpc/kvm/book3s.c 	ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
kvm               531 arch/powerpc/kvm/book3s.c 	ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
kvm               608 arch/powerpc/kvm/book3s.c 	r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
kvm               696 arch/powerpc/kvm/book3s.c 	r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
kvm               777 arch/powerpc/kvm/book3s.c 	vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
kvm               782 arch/powerpc/kvm/book3s.c 	vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
kvm               787 arch/powerpc/kvm/book3s.c 	vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr);
kvm               793 arch/powerpc/kvm/book3s.c 	return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu);
kvm               817 arch/powerpc/kvm/book3s.c struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
kvm               819 arch/powerpc/kvm/book3s.c 	return kvm->arch.kvm_ops->vcpu_create(kvm, id);
kvm               824 arch/powerpc/kvm/book3s.c 	vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
kvm               829 arch/powerpc/kvm/book3s.c 	return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
kvm               832 arch/powerpc/kvm/book3s.c int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
kvm               834 arch/powerpc/kvm/book3s.c 	return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
kvm               837 arch/powerpc/kvm/book3s.c void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
kvm               840 arch/powerpc/kvm/book3s.c 	kvm->arch.kvm_ops->free_memslot(free, dont);
kvm               843 arch/powerpc/kvm/book3s.c int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
kvm               846 arch/powerpc/kvm/book3s.c 	return kvm->arch.kvm_ops->create_memslot(slot, npages);
kvm               849 arch/powerpc/kvm/book3s.c void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
kvm               851 arch/powerpc/kvm/book3s.c 	kvm->arch.kvm_ops->flush_memslot(kvm, memslot);
kvm               854 arch/powerpc/kvm/book3s.c int kvmppc_core_prepare_memory_region(struct kvm *kvm,
kvm               858 arch/powerpc/kvm/book3s.c 	return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem);
kvm               861 arch/powerpc/kvm/book3s.c void kvmppc_core_commit_memory_region(struct kvm *kvm,
kvm               867 arch/powerpc/kvm/book3s.c 	kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change);
kvm               870 arch/powerpc/kvm/book3s.c int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
kvm               872 arch/powerpc/kvm/book3s.c 	return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
kvm               875 arch/powerpc/kvm/book3s.c int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
kvm               877 arch/powerpc/kvm/book3s.c 	return kvm->arch.kvm_ops->age_hva(kvm, start, end);
kvm               880 arch/powerpc/kvm/book3s.c int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
kvm               882 arch/powerpc/kvm/book3s.c 	return kvm->arch.kvm_ops->test_age_hva(kvm, hva);
kvm               885 arch/powerpc/kvm/book3s.c int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
kvm               887 arch/powerpc/kvm/book3s.c 	kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte);
kvm               893 arch/powerpc/kvm/book3s.c 	vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
kvm               896 arch/powerpc/kvm/book3s.c int kvmppc_core_init_vm(struct kvm *kvm)
kvm               900 arch/powerpc/kvm/book3s.c 	INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
kvm               901 arch/powerpc/kvm/book3s.c 	INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
kvm               902 arch/powerpc/kvm/book3s.c 	mutex_init(&kvm->arch.rtas_token_lock);
kvm               905 arch/powerpc/kvm/book3s.c 	return kvm->arch.kvm_ops->init_vm(kvm);
kvm               908 arch/powerpc/kvm/book3s.c void kvmppc_core_destroy_vm(struct kvm *kvm)
kvm               910 arch/powerpc/kvm/book3s.c 	kvm->arch.kvm_ops->destroy_vm(kvm);
kvm               913 arch/powerpc/kvm/book3s.c 	kvmppc_rtas_tokens_free(kvm);
kvm               914 arch/powerpc/kvm/book3s.c 	WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
kvm               922 arch/powerpc/kvm/book3s.c 	kfree(kvm->arch.xive_devices.native);
kvm               923 arch/powerpc/kvm/book3s.c 	kvm->arch.xive_devices.native = NULL;
kvm               924 arch/powerpc/kvm/book3s.c 	kfree(kvm->arch.xive_devices.xics_on_xive);
kvm               925 arch/powerpc/kvm/book3s.c 	kvm->arch.xive_devices.xics_on_xive = NULL;
kvm               940 arch/powerpc/kvm/book3s.c 	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm               942 arch/powerpc/kvm/book3s.c 	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
kvm              1001 arch/powerpc/kvm/book3s.c 	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm              1003 arch/powerpc/kvm/book3s.c 	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
kvm              1021 arch/powerpc/kvm/book3s.c int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall)
kvm              1023 arch/powerpc/kvm/book3s.c 	return kvm->arch.kvm_ops->hcall_implemented(hcall);
kvm              1027 arch/powerpc/kvm/book3s.c int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
kvm              1031 arch/powerpc/kvm/book3s.c 		return kvmppc_xive_set_irq(kvm, irq_source_id, irq, level,
kvm              1034 arch/powerpc/kvm/book3s.c 		return kvmppc_xics_set_irq(kvm, irq_source_id, irq, level,
kvm              1039 arch/powerpc/kvm/book3s.c 			      struct kvm *kvm, int irq_source_id,
kvm              1042 arch/powerpc/kvm/book3s.c 	return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi,
kvm              1046 arch/powerpc/kvm/book3s.c 				 struct kvm *kvm, int irq_source_id, int level,
kvm              1049 arch/powerpc/kvm/book3s.c 	return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status);
kvm              1052 arch/powerpc/kvm/book3s.c int kvm_irq_map_gsi(struct kvm *kvm,
kvm              1063 arch/powerpc/kvm/book3s.c int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
kvm                10 arch/powerpc/kvm/book3s.h extern void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
kvm                12 arch/powerpc/kvm/book3s.h extern int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start,
kvm                14 arch/powerpc/kvm/book3s.h extern int kvm_age_hva_hv(struct kvm *kvm, unsigned long start,
kvm                16 arch/powerpc/kvm/book3s.h extern int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva);
kvm                17 arch/powerpc/kvm/book3s.h extern void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte);
kvm               120 arch/powerpc/kvm/book3s_32_mmu.c 	r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT);
kvm               349 arch/powerpc/kvm/book3s_32_mmu.c 	kvm_for_each_vcpu(i, v, vcpu->kvm)
kvm               200 arch/powerpc/kvm/book3s_32_mmu_host.c 		mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
kvm               162 arch/powerpc/kvm/book3s_64_mmu.c 		r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT);
kvm               258 arch/powerpc/kvm/book3s_64_mmu.c 	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
kvm               363 arch/powerpc/kvm/book3s_64_mmu.c 	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
kvm               370 arch/powerpc/kvm/book3s_64_mmu.c 	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
kvm               573 arch/powerpc/kvm/book3s_64_mmu.c 	kvm_for_each_vcpu(i, v, vcpu->kvm)
kvm                86 arch/powerpc/kvm/book3s_64_mmu_host.c 	struct kvm *kvm = vcpu->kvm;
kvm                92 arch/powerpc/kvm/book3s_64_mmu_host.c 	mmu_seq = kvm->mmu_notifier_seq;
kvm               127 arch/powerpc/kvm/book3s_64_mmu_host.c 		mark_page_dirty(vcpu->kvm, gfn);
kvm               151 arch/powerpc/kvm/book3s_64_mmu_host.c 	spin_lock(&kvm->mmu_lock);
kvm               152 arch/powerpc/kvm/book3s_64_mmu_host.c 	if (!cpte || mmu_notifier_retry(kvm, mmu_seq)) {
kvm               203 arch/powerpc/kvm/book3s_64_mmu_host.c 	spin_unlock(&kvm->mmu_lock);
kvm                45 arch/powerpc/kvm/book3s_64_mmu_hv.c static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
kvm                51 arch/powerpc/kvm/book3s_64_mmu_hv.c 	struct kvm *kvm;
kvm               116 arch/powerpc/kvm/book3s_64_mmu_hv.c void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info)
kvm               118 arch/powerpc/kvm/book3s_64_mmu_hv.c 	atomic64_set(&kvm->arch.mmio_update, 0);
kvm               119 arch/powerpc/kvm/book3s_64_mmu_hv.c 	kvm->arch.hpt = *info;
kvm               120 arch/powerpc/kvm/book3s_64_mmu_hv.c 	kvm->arch.sdr1 = __pa(info->virt) | (info->order - 18);
kvm               123 arch/powerpc/kvm/book3s_64_mmu_hv.c 		 info->virt, (long)info->order, kvm->arch.lpid);
kvm               126 arch/powerpc/kvm/book3s_64_mmu_hv.c long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
kvm               131 arch/powerpc/kvm/book3s_64_mmu_hv.c 	mutex_lock(&kvm->arch.mmu_setup_lock);
kvm               132 arch/powerpc/kvm/book3s_64_mmu_hv.c 	if (kvm->arch.mmu_ready) {
kvm               133 arch/powerpc/kvm/book3s_64_mmu_hv.c 		kvm->arch.mmu_ready = 0;
kvm               136 arch/powerpc/kvm/book3s_64_mmu_hv.c 		if (atomic_read(&kvm->arch.vcpus_running)) {
kvm               137 arch/powerpc/kvm/book3s_64_mmu_hv.c 			kvm->arch.mmu_ready = 1;
kvm               141 arch/powerpc/kvm/book3s_64_mmu_hv.c 	if (kvm_is_radix(kvm)) {
kvm               142 arch/powerpc/kvm/book3s_64_mmu_hv.c 		err = kvmppc_switch_mmu_to_hpt(kvm);
kvm               147 arch/powerpc/kvm/book3s_64_mmu_hv.c 	if (kvm->arch.hpt.order == order) {
kvm               151 arch/powerpc/kvm/book3s_64_mmu_hv.c 		memset((void *)kvm->arch.hpt.virt, 0, 1ul << order);
kvm               155 arch/powerpc/kvm/book3s_64_mmu_hv.c 		kvmppc_rmap_reset(kvm);
kvm               160 arch/powerpc/kvm/book3s_64_mmu_hv.c 	if (kvm->arch.hpt.virt) {
kvm               161 arch/powerpc/kvm/book3s_64_mmu_hv.c 		kvmppc_free_hpt(&kvm->arch.hpt);
kvm               162 arch/powerpc/kvm/book3s_64_mmu_hv.c 		kvmppc_rmap_reset(kvm);
kvm               168 arch/powerpc/kvm/book3s_64_mmu_hv.c 	kvmppc_set_hpt(kvm, &info);
kvm               173 arch/powerpc/kvm/book3s_64_mmu_hv.c 		cpumask_setall(&kvm->arch.need_tlb_flush);
kvm               175 arch/powerpc/kvm/book3s_64_mmu_hv.c 	mutex_unlock(&kvm->arch.mmu_setup_lock);
kvm               215 arch/powerpc/kvm/book3s_64_mmu_hv.c 	struct kvm *kvm = vcpu->kvm;
kvm               224 arch/powerpc/kvm/book3s_64_mmu_hv.c 	if (npages > kvmppc_hpt_mask(&kvm->arch.hpt) + 1)
kvm               225 arch/powerpc/kvm/book3s_64_mmu_hv.c 		npages = kvmppc_hpt_mask(&kvm->arch.hpt) + 1;
kvm               236 arch/powerpc/kvm/book3s_64_mmu_hv.c 			& kvmppc_hpt_mask(&kvm->arch.hpt);
kvm               246 arch/powerpc/kvm/book3s_64_mmu_hv.c 		ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, hash, hp_v, hp_r,
kvm               290 arch/powerpc/kvm/book3s_64_mmu_hv.c static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
kvm               298 arch/powerpc/kvm/book3s_64_mmu_hv.c 	ret = kvmppc_do_h_enter(kvm, flags, pte_index, pteh, ptel,
kvm               343 arch/powerpc/kvm/book3s_64_mmu_hv.c 	struct kvm *kvm = vcpu->kvm;
kvm               352 arch/powerpc/kvm/book3s_64_mmu_hv.c 	if (kvm_is_radix(vcpu->kvm))
kvm               363 arch/powerpc/kvm/book3s_64_mmu_hv.c 		slb_v = vcpu->kvm->arch.vrma_slb_v;
kvm               368 arch/powerpc/kvm/book3s_64_mmu_hv.c 	index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v,
kvm               374 arch/powerpc/kvm/book3s_64_mmu_hv.c 	hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4));
kvm               378 arch/powerpc/kvm/book3s_64_mmu_hv.c 	gr = kvm->arch.hpt.rev[index].guest_rpte;
kvm               441 arch/powerpc/kvm/book3s_64_mmu_hv.c 		idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm               444 arch/powerpc/kvm/book3s_64_mmu_hv.c 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
kvm               494 arch/powerpc/kvm/book3s_64_mmu_hv.c 	struct kvm *kvm = vcpu->kvm;
kvm               512 arch/powerpc/kvm/book3s_64_mmu_hv.c 	if (kvm_is_radix(kvm))
kvm               525 arch/powerpc/kvm/book3s_64_mmu_hv.c 		mmio_update = atomic64_read(&kvm->arch.mmio_update);
kvm               538 arch/powerpc/kvm/book3s_64_mmu_hv.c 	hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4));
kvm               539 arch/powerpc/kvm/book3s_64_mmu_hv.c 	rev = &kvm->arch.hpt.rev[index];
kvm               563 arch/powerpc/kvm/book3s_64_mmu_hv.c 	memslot = gfn_to_memslot(kvm, gfn);
kvm               580 arch/powerpc/kvm/book3s_64_mmu_hv.c 	mmu_seq = kvm->mmu_notifier_seq;
kvm               677 arch/powerpc/kvm/book3s_64_mmu_hv.c 	if (!kvm->arch.mmu_ready)
kvm               692 arch/powerpc/kvm/book3s_64_mmu_hv.c 	if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) {
kvm               705 arch/powerpc/kvm/book3s_64_mmu_hv.c 		kvmppc_invalidate_hpte(kvm, hptep, index);
kvm               709 arch/powerpc/kvm/book3s_64_mmu_hv.c 		kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0);
kvm               744 arch/powerpc/kvm/book3s_64_mmu_hv.c void kvmppc_rmap_reset(struct kvm *kvm)
kvm               750 arch/powerpc/kvm/book3s_64_mmu_hv.c 	srcu_idx = srcu_read_lock(&kvm->srcu);
kvm               751 arch/powerpc/kvm/book3s_64_mmu_hv.c 	slots = kvm_memslots(kvm);
kvm               754 arch/powerpc/kvm/book3s_64_mmu_hv.c 		spin_lock(&kvm->mmu_lock);
kvm               761 arch/powerpc/kvm/book3s_64_mmu_hv.c 		spin_unlock(&kvm->mmu_lock);
kvm               763 arch/powerpc/kvm/book3s_64_mmu_hv.c 	srcu_read_unlock(&kvm->srcu, srcu_idx);
kvm               766 arch/powerpc/kvm/book3s_64_mmu_hv.c typedef int (*hva_handler_fn)(struct kvm *kvm, struct kvm_memory_slot *memslot,
kvm               769 arch/powerpc/kvm/book3s_64_mmu_hv.c static int kvm_handle_hva_range(struct kvm *kvm,
kvm               779 arch/powerpc/kvm/book3s_64_mmu_hv.c 	slots = kvm_memslots(kvm);
kvm               797 arch/powerpc/kvm/book3s_64_mmu_hv.c 			ret = handler(kvm, memslot, gfn);
kvm               805 arch/powerpc/kvm/book3s_64_mmu_hv.c static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
kvm               808 arch/powerpc/kvm/book3s_64_mmu_hv.c 	return kvm_handle_hva_range(kvm, hva, hva + 1, handler);
kvm               812 arch/powerpc/kvm/book3s_64_mmu_hv.c static void kvmppc_unmap_hpte(struct kvm *kvm, unsigned long i,
kvm               816 arch/powerpc/kvm/book3s_64_mmu_hv.c 	__be64 *hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4));
kvm               817 arch/powerpc/kvm/book3s_64_mmu_hv.c 	struct revmap_entry *rev = kvm->arch.hpt.rev;
kvm               840 arch/powerpc/kvm/book3s_64_mmu_hv.c 		kvmppc_invalidate_hpte(kvm, hptep, i);
kvm               849 arch/powerpc/kvm/book3s_64_mmu_hv.c 			note_hpte_modification(kvm, &rev[i]);
kvm               854 arch/powerpc/kvm/book3s_64_mmu_hv.c static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
kvm               875 arch/powerpc/kvm/book3s_64_mmu_hv.c 		hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4));
kvm               884 arch/powerpc/kvm/book3s_64_mmu_hv.c 		kvmppc_unmap_hpte(kvm, i, memslot, rmapp, gfn);
kvm               891 arch/powerpc/kvm/book3s_64_mmu_hv.c int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start, unsigned long end)
kvm               895 arch/powerpc/kvm/book3s_64_mmu_hv.c 	handler = kvm_is_radix(kvm) ? kvm_unmap_radix : kvm_unmap_rmapp;
kvm               896 arch/powerpc/kvm/book3s_64_mmu_hv.c 	kvm_handle_hva_range(kvm, start, end, handler);
kvm               900 arch/powerpc/kvm/book3s_64_mmu_hv.c void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
kvm               909 arch/powerpc/kvm/book3s_64_mmu_hv.c 	if (kvm_is_radix(kvm)) {
kvm               910 arch/powerpc/kvm/book3s_64_mmu_hv.c 		kvmppc_radix_flush_memslot(kvm, memslot);
kvm               922 arch/powerpc/kvm/book3s_64_mmu_hv.c 			kvm_unmap_rmapp(kvm, memslot, gfn);
kvm               927 arch/powerpc/kvm/book3s_64_mmu_hv.c static int kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
kvm               930 arch/powerpc/kvm/book3s_64_mmu_hv.c 	struct revmap_entry *rev = kvm->arch.hpt.rev;
kvm               950 arch/powerpc/kvm/book3s_64_mmu_hv.c 		hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4));
kvm               968 arch/powerpc/kvm/book3s_64_mmu_hv.c 			kvmppc_clear_ref_hpte(kvm, hptep, i);
kvm               971 arch/powerpc/kvm/book3s_64_mmu_hv.c 				note_hpte_modification(kvm, &rev[i]);
kvm               982 arch/powerpc/kvm/book3s_64_mmu_hv.c int kvm_age_hva_hv(struct kvm *kvm, unsigned long start, unsigned long end)
kvm               986 arch/powerpc/kvm/book3s_64_mmu_hv.c 	handler = kvm_is_radix(kvm) ? kvm_age_radix : kvm_age_rmapp;
kvm               987 arch/powerpc/kvm/book3s_64_mmu_hv.c 	return kvm_handle_hva_range(kvm, start, end, handler);
kvm               990 arch/powerpc/kvm/book3s_64_mmu_hv.c static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
kvm               993 arch/powerpc/kvm/book3s_64_mmu_hv.c 	struct revmap_entry *rev = kvm->arch.hpt.rev;
kvm              1010 arch/powerpc/kvm/book3s_64_mmu_hv.c 			hp = (unsigned long *)(kvm->arch.hpt.virt + (i << 4));
kvm              1023 arch/powerpc/kvm/book3s_64_mmu_hv.c int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva)
kvm              1027 arch/powerpc/kvm/book3s_64_mmu_hv.c 	handler = kvm_is_radix(kvm) ? kvm_test_age_radix : kvm_test_age_rmapp;
kvm              1028 arch/powerpc/kvm/book3s_64_mmu_hv.c 	return kvm_handle_hva(kvm, hva, handler);
kvm              1031 arch/powerpc/kvm/book3s_64_mmu_hv.c void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte)
kvm              1035 arch/powerpc/kvm/book3s_64_mmu_hv.c 	handler = kvm_is_radix(kvm) ? kvm_unmap_radix : kvm_unmap_rmapp;
kvm              1036 arch/powerpc/kvm/book3s_64_mmu_hv.c 	kvm_handle_hva(kvm, hva, handler);
kvm              1039 arch/powerpc/kvm/book3s_64_mmu_hv.c static int vcpus_running(struct kvm *kvm)
kvm              1041 arch/powerpc/kvm/book3s_64_mmu_hv.c 	return atomic_read(&kvm->arch.vcpus_running) != 0;
kvm              1048 arch/powerpc/kvm/book3s_64_mmu_hv.c static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
kvm              1050 arch/powerpc/kvm/book3s_64_mmu_hv.c 	struct revmap_entry *rev = kvm->arch.hpt.rev;
kvm              1067 arch/powerpc/kvm/book3s_64_mmu_hv.c 		hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4));
kvm              1086 arch/powerpc/kvm/book3s_64_mmu_hv.c 		    (!hpte_is_writable(hptep1) || vcpus_running(kvm)))
kvm              1105 arch/powerpc/kvm/book3s_64_mmu_hv.c 		kvmppc_invalidate_hpte(kvm, hptep, i);
kvm              1112 arch/powerpc/kvm/book3s_64_mmu_hv.c 				note_hpte_modification(kvm, &rev[i]);
kvm              1147 arch/powerpc/kvm/book3s_64_mmu_hv.c long kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm,
kvm              1156 arch/powerpc/kvm/book3s_64_mmu_hv.c 		int npages = kvm_test_clear_dirty_npages(kvm, rmapp);
kvm              1170 arch/powerpc/kvm/book3s_64_mmu_hv.c void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
kvm              1180 arch/powerpc/kvm/book3s_64_mmu_hv.c 	srcu_idx = srcu_read_lock(&kvm->srcu);
kvm              1181 arch/powerpc/kvm/book3s_64_mmu_hv.c 	memslot = gfn_to_memslot(kvm, gfn);
kvm              1189 arch/powerpc/kvm/book3s_64_mmu_hv.c 	srcu_read_unlock(&kvm->srcu, srcu_idx);
kvm              1197 arch/powerpc/kvm/book3s_64_mmu_hv.c 	srcu_read_unlock(&kvm->srcu, srcu_idx);
kvm              1201 arch/powerpc/kvm/book3s_64_mmu_hv.c void kvmppc_unpin_guest_page(struct kvm *kvm, void *va, unsigned long gpa,
kvm              1216 arch/powerpc/kvm/book3s_64_mmu_hv.c 	srcu_idx = srcu_read_lock(&kvm->srcu);
kvm              1217 arch/powerpc/kvm/book3s_64_mmu_hv.c 	memslot = gfn_to_memslot(kvm, gfn);
kvm              1220 arch/powerpc/kvm/book3s_64_mmu_hv.c 	srcu_read_unlock(&kvm->srcu, srcu_idx);
kvm              1243 arch/powerpc/kvm/book3s_64_mmu_hv.c 	struct kvm *kvm = resize->kvm;
kvm              1244 arch/powerpc/kvm/book3s_64_mmu_hv.c 	struct kvm_hpt_info *old = &kvm->arch.hpt;
kvm              1291 arch/powerpc/kvm/book3s_64_mmu_hv.c 		int srcu_idx = srcu_read_lock(&kvm->srcu);
kvm              1293 arch/powerpc/kvm/book3s_64_mmu_hv.c 			__gfn_to_memslot(kvm_memslots(kvm), gfn);
kvm              1300 arch/powerpc/kvm/book3s_64_mmu_hv.c 			kvmppc_unmap_hpte(kvm, idx, memslot, rmapp, gfn);
kvm              1304 arch/powerpc/kvm/book3s_64_mmu_hv.c 		srcu_read_unlock(&kvm->srcu, srcu_idx);
kvm              1397 arch/powerpc/kvm/book3s_64_mmu_hv.c 	struct kvm *kvm = resize->kvm;
kvm              1401 arch/powerpc/kvm/book3s_64_mmu_hv.c 	for (i = 0; i < kvmppc_hpt_npte(&kvm->arch.hpt); i++) {
kvm              1412 arch/powerpc/kvm/book3s_64_mmu_hv.c 	struct kvm *kvm = resize->kvm;
kvm              1420 arch/powerpc/kvm/book3s_64_mmu_hv.c 	spin_lock(&kvm->mmu_lock);
kvm              1423 arch/powerpc/kvm/book3s_64_mmu_hv.c 	hpt_tmp = kvm->arch.hpt;
kvm              1424 arch/powerpc/kvm/book3s_64_mmu_hv.c 	kvmppc_set_hpt(kvm, &resize->hpt);
kvm              1427 arch/powerpc/kvm/book3s_64_mmu_hv.c 	spin_unlock(&kvm->mmu_lock);
kvm              1429 arch/powerpc/kvm/book3s_64_mmu_hv.c 	synchronize_srcu_expedited(&kvm->srcu);
kvm              1432 arch/powerpc/kvm/book3s_64_mmu_hv.c 		kvmppc_setup_partition_table(kvm);
kvm              1437 arch/powerpc/kvm/book3s_64_mmu_hv.c static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize)
kvm              1439 arch/powerpc/kvm/book3s_64_mmu_hv.c 	if (WARN_ON(!mutex_is_locked(&kvm->arch.mmu_setup_lock)))
kvm              1451 arch/powerpc/kvm/book3s_64_mmu_hv.c 	if (kvm->arch.resize_hpt == resize)
kvm              1452 arch/powerpc/kvm/book3s_64_mmu_hv.c 		kvm->arch.resize_hpt = NULL;
kvm              1460 arch/powerpc/kvm/book3s_64_mmu_hv.c 	struct kvm *kvm = resize->kvm;
kvm              1466 arch/powerpc/kvm/book3s_64_mmu_hv.c 	mutex_lock(&kvm->arch.mmu_setup_lock);
kvm              1469 arch/powerpc/kvm/book3s_64_mmu_hv.c 	if (kvm->arch.resize_hpt == resize) {
kvm              1473 arch/powerpc/kvm/book3s_64_mmu_hv.c 		mutex_unlock(&kvm->arch.mmu_setup_lock);
kvm              1486 arch/powerpc/kvm/book3s_64_mmu_hv.c 		mutex_lock(&kvm->arch.mmu_setup_lock);
kvm              1494 arch/powerpc/kvm/book3s_64_mmu_hv.c 	if (kvm->arch.resize_hpt != resize)
kvm              1495 arch/powerpc/kvm/book3s_64_mmu_hv.c 		resize_hpt_release(kvm, resize);
kvm              1497 arch/powerpc/kvm/book3s_64_mmu_hv.c 	mutex_unlock(&kvm->arch.mmu_setup_lock);
kvm              1500 arch/powerpc/kvm/book3s_64_mmu_hv.c long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
kvm              1508 arch/powerpc/kvm/book3s_64_mmu_hv.c 	if (flags != 0 || kvm_is_radix(kvm))
kvm              1514 arch/powerpc/kvm/book3s_64_mmu_hv.c 	mutex_lock(&kvm->arch.mmu_setup_lock);
kvm              1516 arch/powerpc/kvm/book3s_64_mmu_hv.c 	resize = kvm->arch.resize_hpt;
kvm              1525 arch/powerpc/kvm/book3s_64_mmu_hv.c 				resize_hpt_release(kvm, resize);
kvm              1531 arch/powerpc/kvm/book3s_64_mmu_hv.c 		resize_hpt_release(kvm, resize);
kvm              1548 arch/powerpc/kvm/book3s_64_mmu_hv.c 	resize->kvm = kvm;
kvm              1550 arch/powerpc/kvm/book3s_64_mmu_hv.c 	kvm->arch.resize_hpt = resize;
kvm              1557 arch/powerpc/kvm/book3s_64_mmu_hv.c 	mutex_unlock(&kvm->arch.mmu_setup_lock);
kvm              1566 arch/powerpc/kvm/book3s_64_mmu_hv.c long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
kvm              1574 arch/powerpc/kvm/book3s_64_mmu_hv.c 	if (flags != 0 || kvm_is_radix(kvm))
kvm              1580 arch/powerpc/kvm/book3s_64_mmu_hv.c 	mutex_lock(&kvm->arch.mmu_setup_lock);
kvm              1582 arch/powerpc/kvm/book3s_64_mmu_hv.c 	resize = kvm->arch.resize_hpt;
kvm              1586 arch/powerpc/kvm/book3s_64_mmu_hv.c 	if (WARN_ON(!kvm->arch.mmu_ready))
kvm              1590 arch/powerpc/kvm/book3s_64_mmu_hv.c 	kvm->arch.mmu_ready = 0;
kvm              1613 arch/powerpc/kvm/book3s_64_mmu_hv.c 	kvm->arch.mmu_ready = 1;
kvm              1616 arch/powerpc/kvm/book3s_64_mmu_hv.c 	resize_hpt_release(kvm, resize);
kvm              1617 arch/powerpc/kvm/book3s_64_mmu_hv.c 	mutex_unlock(&kvm->arch.mmu_setup_lock);
kvm              1640 arch/powerpc/kvm/book3s_64_mmu_hv.c 	struct kvm	*kvm;
kvm              1743 arch/powerpc/kvm/book3s_64_mmu_hv.c 	struct kvm *kvm = ctx->kvm;
kvm              1756 arch/powerpc/kvm/book3s_64_mmu_hv.c 	if (kvm_is_radix(kvm))
kvm              1763 arch/powerpc/kvm/book3s_64_mmu_hv.c 	hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE));
kvm              1764 arch/powerpc/kvm/book3s_64_mmu_hv.c 	revp = kvm->arch.hpt.rev + i;
kvm              1779 arch/powerpc/kvm/book3s_64_mmu_hv.c 			while (i < kvmppc_hpt_npte(&kvm->arch.hpt) &&
kvm              1789 arch/powerpc/kvm/book3s_64_mmu_hv.c 		while (i < kvmppc_hpt_npte(&kvm->arch.hpt) &&
kvm              1805 arch/powerpc/kvm/book3s_64_mmu_hv.c 		while (i < kvmppc_hpt_npte(&kvm->arch.hpt) &&
kvm              1826 arch/powerpc/kvm/book3s_64_mmu_hv.c 		if (i >= kvmppc_hpt_npte(&kvm->arch.hpt)) {
kvm              1842 arch/powerpc/kvm/book3s_64_mmu_hv.c 	struct kvm *kvm = ctx->kvm;
kvm              1856 arch/powerpc/kvm/book3s_64_mmu_hv.c 	if (kvm_is_radix(kvm))
kvm              1860 arch/powerpc/kvm/book3s_64_mmu_hv.c 	mutex_lock(&kvm->arch.mmu_setup_lock);
kvm              1861 arch/powerpc/kvm/book3s_64_mmu_hv.c 	mmu_ready = kvm->arch.mmu_ready;
kvm              1863 arch/powerpc/kvm/book3s_64_mmu_hv.c 		kvm->arch.mmu_ready = 0;	/* temporarily */
kvm              1866 arch/powerpc/kvm/book3s_64_mmu_hv.c 		if (atomic_read(&kvm->arch.vcpus_running)) {
kvm              1867 arch/powerpc/kvm/book3s_64_mmu_hv.c 			kvm->arch.mmu_ready = 1;
kvm              1868 arch/powerpc/kvm/book3s_64_mmu_hv.c 			mutex_unlock(&kvm->arch.mmu_setup_lock);
kvm              1888 arch/powerpc/kvm/book3s_64_mmu_hv.c 		if (i >= kvmppc_hpt_npte(&kvm->arch.hpt) ||
kvm              1889 arch/powerpc/kvm/book3s_64_mmu_hv.c 		    i + hdr.n_valid + hdr.n_invalid > kvmppc_hpt_npte(&kvm->arch.hpt))
kvm              1892 arch/powerpc/kvm/book3s_64_mmu_hv.c 		hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE));
kvm              1914 arch/powerpc/kvm/book3s_64_mmu_hv.c 				kvmppc_do_h_remove(kvm, 0, i, 0, tmp);
kvm              1916 arch/powerpc/kvm/book3s_64_mmu_hv.c 			ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r,
kvm              1927 arch/powerpc/kvm/book3s_64_mmu_hv.c 				kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
kvm              1931 arch/powerpc/kvm/book3s_64_mmu_hv.c 					kvmppc_update_lpcr(kvm, lpcr,
kvm              1934 arch/powerpc/kvm/book3s_64_mmu_hv.c 					kvmppc_setup_partition_table(kvm);
kvm              1944 arch/powerpc/kvm/book3s_64_mmu_hv.c 				kvmppc_do_h_remove(kvm, 0, i, 0, tmp);
kvm              1954 arch/powerpc/kvm/book3s_64_mmu_hv.c 	kvm->arch.mmu_ready = mmu_ready;
kvm              1955 arch/powerpc/kvm/book3s_64_mmu_hv.c 	mutex_unlock(&kvm->arch.mmu_setup_lock);
kvm              1968 arch/powerpc/kvm/book3s_64_mmu_hv.c 		atomic_dec(&ctx->kvm->arch.hpte_mod_interest);
kvm              1969 arch/powerpc/kvm/book3s_64_mmu_hv.c 	kvm_put_kvm(ctx->kvm);
kvm              1981 arch/powerpc/kvm/book3s_64_mmu_hv.c int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *ghf)
kvm              1993 arch/powerpc/kvm/book3s_64_mmu_hv.c 	kvm_get_kvm(kvm);
kvm              1994 arch/powerpc/kvm/book3s_64_mmu_hv.c 	ctx->kvm = kvm;
kvm              2003 arch/powerpc/kvm/book3s_64_mmu_hv.c 		kvm_put_kvm(kvm);
kvm              2008 arch/powerpc/kvm/book3s_64_mmu_hv.c 		mutex_lock(&kvm->slots_lock);
kvm              2009 arch/powerpc/kvm/book3s_64_mmu_hv.c 		atomic_inc(&kvm->arch.hpte_mod_interest);
kvm              2011 arch/powerpc/kvm/book3s_64_mmu_hv.c 		synchronize_srcu_expedited(&kvm->srcu);
kvm              2012 arch/powerpc/kvm/book3s_64_mmu_hv.c 		mutex_unlock(&kvm->slots_lock);
kvm              2019 arch/powerpc/kvm/book3s_64_mmu_hv.c 	struct kvm	*kvm;
kvm              2029 arch/powerpc/kvm/book3s_64_mmu_hv.c 	struct kvm *kvm = inode->i_private;
kvm              2036 arch/powerpc/kvm/book3s_64_mmu_hv.c 	kvm_get_kvm(kvm);
kvm              2037 arch/powerpc/kvm/book3s_64_mmu_hv.c 	p->kvm = kvm;
kvm              2048 arch/powerpc/kvm/book3s_64_mmu_hv.c 	kvm_put_kvm(p->kvm);
kvm              2060 arch/powerpc/kvm/book3s_64_mmu_hv.c 	struct kvm *kvm;
kvm              2063 arch/powerpc/kvm/book3s_64_mmu_hv.c 	kvm = p->kvm;
kvm              2064 arch/powerpc/kvm/book3s_64_mmu_hv.c 	if (kvm_is_radix(kvm))
kvm              2090 arch/powerpc/kvm/book3s_64_mmu_hv.c 	hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE));
kvm              2091 arch/powerpc/kvm/book3s_64_mmu_hv.c 	for (; len != 0 && i < kvmppc_hpt_npte(&kvm->arch.hpt);
kvm              2102 arch/powerpc/kvm/book3s_64_mmu_hv.c 		gr = kvm->arch.hpt.rev[i].guest_rpte;
kvm              2150 arch/powerpc/kvm/book3s_64_mmu_hv.c void kvmppc_mmu_debugfs_init(struct kvm *kvm)
kvm              2152 arch/powerpc/kvm/book3s_64_mmu_hv.c 	kvm->arch.htab_dentry = debugfs_create_file("htab", 0400,
kvm              2153 arch/powerpc/kvm/book3s_64_mmu_hv.c 						    kvm->arch.debugfs_dir, kvm,
kvm                87 arch/powerpc/kvm/book3s_64_mmu_radix.c 	int lpid = vcpu->kvm->arch.lpid;
kvm               131 arch/powerpc/kvm/book3s_64_mmu_radix.c 	struct kvm *kvm = vcpu->kvm;
kvm               163 arch/powerpc/kvm/book3s_64_mmu_radix.c 		ret = kvm_read_guest(kvm, addr, &rpte, sizeof(rpte));
kvm               224 arch/powerpc/kvm/book3s_64_mmu_radix.c 	struct kvm *kvm = vcpu->kvm;
kvm               239 arch/powerpc/kvm/book3s_64_mmu_radix.c 	ret = kvm_read_guest(kvm, ptbl, &entry, sizeof(entry));
kvm               269 arch/powerpc/kvm/book3s_64_mmu_radix.c 				vcpu->kvm->arch.process_table, pid, &pte);
kvm               295 arch/powerpc/kvm/book3s_64_mmu_radix.c void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
kvm               323 arch/powerpc/kvm/book3s_64_mmu_radix.c static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid)
kvm               338 arch/powerpc/kvm/book3s_64_mmu_radix.c static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
kvm               345 arch/powerpc/kvm/book3s_64_mmu_radix.c void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr,
kvm               348 arch/powerpc/kvm/book3s_64_mmu_radix.c 	radix__set_pte_at(kvm->mm, addr, ptep, pte, 0);
kvm               375 arch/powerpc/kvm/book3s_64_mmu_radix.c void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
kvm               386 arch/powerpc/kvm/book3s_64_mmu_radix.c 	old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift);
kvm               387 arch/powerpc/kvm/book3s_64_mmu_radix.c 	kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid);
kvm               390 arch/powerpc/kvm/book3s_64_mmu_radix.c 	if (lpid != kvm->arch.lpid)
kvm               394 arch/powerpc/kvm/book3s_64_mmu_radix.c 		memslot = gfn_to_memslot(kvm, gfn);
kvm               401 arch/powerpc/kvm/book3s_64_mmu_radix.c 			kvm->stat.num_2M_pages--;
kvm               403 arch/powerpc/kvm/book3s_64_mmu_radix.c 			kvm->stat.num_1G_pages--;
kvm               408 arch/powerpc/kvm/book3s_64_mmu_radix.c 	kvmhv_remove_nest_rmap_range(kvm, memslot, gpa, hpa, page_size);
kvm               424 arch/powerpc/kvm/book3s_64_mmu_radix.c static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full,
kvm               437 arch/powerpc/kvm/book3s_64_mmu_radix.c 			kvmppc_unmap_pte(kvm, p,
kvm               446 arch/powerpc/kvm/book3s_64_mmu_radix.c static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full,
kvm               460 arch/powerpc/kvm/book3s_64_mmu_radix.c 				kvmppc_unmap_pte(kvm, (pte_t *)p,
kvm               468 arch/powerpc/kvm/book3s_64_mmu_radix.c 			kvmppc_unmap_free_pte(kvm, pte, full, lpid);
kvm               475 arch/powerpc/kvm/book3s_64_mmu_radix.c static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud,
kvm               490 arch/powerpc/kvm/book3s_64_mmu_radix.c 			kvmppc_unmap_free_pmd(kvm, pmd, true, lpid);
kvm               494 arch/powerpc/kvm/book3s_64_mmu_radix.c 	pud_free(kvm->mm, pud);
kvm               497 arch/powerpc/kvm/book3s_64_mmu_radix.c void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, unsigned int lpid)
kvm               507 arch/powerpc/kvm/book3s_64_mmu_radix.c 		kvmppc_unmap_free_pud(kvm, pud, lpid);
kvm               512 arch/powerpc/kvm/book3s_64_mmu_radix.c void kvmppc_free_radix(struct kvm *kvm)
kvm               514 arch/powerpc/kvm/book3s_64_mmu_radix.c 	if (kvm->arch.pgtable) {
kvm               515 arch/powerpc/kvm/book3s_64_mmu_radix.c 		kvmppc_free_pgtable_radix(kvm, kvm->arch.pgtable,
kvm               516 arch/powerpc/kvm/book3s_64_mmu_radix.c 					  kvm->arch.lpid);
kvm               517 arch/powerpc/kvm/book3s_64_mmu_radix.c 		pgd_free(kvm->mm, kvm->arch.pgtable);
kvm               518 arch/powerpc/kvm/book3s_64_mmu_radix.c 		kvm->arch.pgtable = NULL;
kvm               522 arch/powerpc/kvm/book3s_64_mmu_radix.c static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd,
kvm               533 arch/powerpc/kvm/book3s_64_mmu_radix.c 	kvmppc_radix_flush_pwc(kvm, lpid);
kvm               535 arch/powerpc/kvm/book3s_64_mmu_radix.c 	kvmppc_unmap_free_pte(kvm, pte, false, lpid);
kvm               538 arch/powerpc/kvm/book3s_64_mmu_radix.c static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud,
kvm               549 arch/powerpc/kvm/book3s_64_mmu_radix.c 	kvmppc_radix_flush_pwc(kvm, lpid);
kvm               551 arch/powerpc/kvm/book3s_64_mmu_radix.c 	kvmppc_unmap_free_pmd(kvm, pmd, false, lpid);
kvm               563 arch/powerpc/kvm/book3s_64_mmu_radix.c int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
kvm               580 arch/powerpc/kvm/book3s_64_mmu_radix.c 		new_pud = pud_alloc_one(kvm->mm, gpa);
kvm               592 arch/powerpc/kvm/book3s_64_mmu_radix.c 	spin_lock(&kvm->mmu_lock);
kvm               594 arch/powerpc/kvm/book3s_64_mmu_radix.c 	if (mmu_notifier_retry(kvm, mmu_seq))
kvm               602 arch/powerpc/kvm/book3s_64_mmu_radix.c 		pgd_populate(kvm->mm, pgd, new_pud);
kvm               618 arch/powerpc/kvm/book3s_64_mmu_radix.c 			kvmppc_radix_update_pte(kvm, (pte_t *)pud,
kvm               632 arch/powerpc/kvm/book3s_64_mmu_radix.c 		kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT, NULL,
kvm               642 arch/powerpc/kvm/book3s_64_mmu_radix.c 			kvmppc_unmap_free_pud_entry_table(kvm, pud, gpa, lpid);
kvm               644 arch/powerpc/kvm/book3s_64_mmu_radix.c 		kvmppc_radix_set_pte_at(kvm, gpa, (pte_t *)pud, pte);
kvm               646 arch/powerpc/kvm/book3s_64_mmu_radix.c 			kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
kvm               653 arch/powerpc/kvm/book3s_64_mmu_radix.c 		pud_populate(kvm->mm, pud, new_pmd);
kvm               669 arch/powerpc/kvm/book3s_64_mmu_radix.c 			kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd),
kvm               684 arch/powerpc/kvm/book3s_64_mmu_radix.c 		kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT, NULL,
kvm               694 arch/powerpc/kvm/book3s_64_mmu_radix.c 			kvmppc_unmap_free_pmd_entry_table(kvm, pmd, gpa, lpid);
kvm               696 arch/powerpc/kvm/book3s_64_mmu_radix.c 		kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte);
kvm               698 arch/powerpc/kvm/book3s_64_mmu_radix.c 			kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
kvm               705 arch/powerpc/kvm/book3s_64_mmu_radix.c 		pmd_populate(kvm->mm, pmd, new_ptep);
kvm               718 arch/powerpc/kvm/book3s_64_mmu_radix.c 		kvmppc_radix_update_pte(kvm, ptep, 0, pte_val(pte), gpa, 0);
kvm               722 arch/powerpc/kvm/book3s_64_mmu_radix.c 	kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte);
kvm               724 arch/powerpc/kvm/book3s_64_mmu_radix.c 		kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
kvm               728 arch/powerpc/kvm/book3s_64_mmu_radix.c 	spin_unlock(&kvm->mmu_lock);
kvm               730 arch/powerpc/kvm/book3s_64_mmu_radix.c 		pud_free(kvm->mm, new_pud);
kvm               738 arch/powerpc/kvm/book3s_64_mmu_radix.c bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable, bool writing,
kvm               760 arch/powerpc/kvm/book3s_64_mmu_radix.c 		kvmppc_radix_update_pte(kvm, ptep, 0, pgflags, gpa, shift);
kvm               772 arch/powerpc/kvm/book3s_64_mmu_radix.c 	struct kvm *kvm = vcpu->kvm;
kvm               784 arch/powerpc/kvm/book3s_64_mmu_radix.c 	mmu_seq = kvm->mmu_notifier_seq;
kvm               865 arch/powerpc/kvm/book3s_64_mmu_radix.c 	ret = kvmppc_create_pte(kvm, kvm->arch.pgtable, pte, gpa, level,
kvm               866 arch/powerpc/kvm/book3s_64_mmu_radix.c 				mmu_seq, kvm->arch.lpid, NULL, NULL);
kvm               881 arch/powerpc/kvm/book3s_64_mmu_radix.c 			kvm->stat.num_2M_pages++;
kvm               883 arch/powerpc/kvm/book3s_64_mmu_radix.c 			kvm->stat.num_1G_pages++;
kvm               892 arch/powerpc/kvm/book3s_64_mmu_radix.c 	struct kvm *kvm = vcpu->kvm;
kvm               919 arch/powerpc/kvm/book3s_64_mmu_radix.c 	memslot = gfn_to_memslot(kvm, gfn);
kvm               947 arch/powerpc/kvm/book3s_64_mmu_radix.c 		spin_lock(&kvm->mmu_lock);
kvm               948 arch/powerpc/kvm/book3s_64_mmu_radix.c 		if (kvmppc_hv_handle_set_rc(kvm, kvm->arch.pgtable,
kvm               949 arch/powerpc/kvm/book3s_64_mmu_radix.c 					    writing, gpa, kvm->arch.lpid))
kvm               951 arch/powerpc/kvm/book3s_64_mmu_radix.c 		spin_unlock(&kvm->mmu_lock);
kvm               968 arch/powerpc/kvm/book3s_64_mmu_radix.c int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
kvm               975 arch/powerpc/kvm/book3s_64_mmu_radix.c 	ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
kvm               977 arch/powerpc/kvm/book3s_64_mmu_radix.c 		kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
kvm               978 arch/powerpc/kvm/book3s_64_mmu_radix.c 				 kvm->arch.lpid);
kvm               983 arch/powerpc/kvm/book3s_64_mmu_radix.c int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
kvm               992 arch/powerpc/kvm/book3s_64_mmu_radix.c 	ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
kvm               994 arch/powerpc/kvm/book3s_64_mmu_radix.c 		old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
kvm               999 arch/powerpc/kvm/book3s_64_mmu_radix.c 		kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_ACCESSED, 0,
kvm              1008 arch/powerpc/kvm/book3s_64_mmu_radix.c int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
kvm              1016 arch/powerpc/kvm/book3s_64_mmu_radix.c 	ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
kvm              1023 arch/powerpc/kvm/book3s_64_mmu_radix.c static int kvm_radix_test_clear_dirty(struct kvm *kvm,
kvm              1033 arch/powerpc/kvm/book3s_64_mmu_radix.c 	ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
kvm              1038 arch/powerpc/kvm/book3s_64_mmu_radix.c 		spin_lock(&kvm->mmu_lock);
kvm              1039 arch/powerpc/kvm/book3s_64_mmu_radix.c 		old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0,
kvm              1041 arch/powerpc/kvm/book3s_64_mmu_radix.c 		kvmppc_radix_tlbie_page(kvm, gpa, shift, kvm->arch.lpid);
kvm              1044 arch/powerpc/kvm/book3s_64_mmu_radix.c 		kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_DIRTY, 0,
kvm              1047 arch/powerpc/kvm/book3s_64_mmu_radix.c 		spin_unlock(&kvm->mmu_lock);
kvm              1052 arch/powerpc/kvm/book3s_64_mmu_radix.c long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
kvm              1059 arch/powerpc/kvm/book3s_64_mmu_radix.c 		npages = kvm_radix_test_clear_dirty(kvm, memslot, i);
kvm              1077 arch/powerpc/kvm/book3s_64_mmu_radix.c void kvmppc_radix_flush_memslot(struct kvm *kvm,
kvm              1086 arch/powerpc/kvm/book3s_64_mmu_radix.c 	spin_lock(&kvm->mmu_lock);
kvm              1088 arch/powerpc/kvm/book3s_64_mmu_radix.c 		ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
kvm              1090 arch/powerpc/kvm/book3s_64_mmu_radix.c 			kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
kvm              1091 arch/powerpc/kvm/book3s_64_mmu_radix.c 					 kvm->arch.lpid);
kvm              1094 arch/powerpc/kvm/book3s_64_mmu_radix.c 	spin_unlock(&kvm->mmu_lock);
kvm              1107 arch/powerpc/kvm/book3s_64_mmu_radix.c int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info)
kvm              1134 arch/powerpc/kvm/book3s_64_mmu_radix.c int kvmppc_init_vm_radix(struct kvm *kvm)
kvm              1136 arch/powerpc/kvm/book3s_64_mmu_radix.c 	kvm->arch.pgtable = pgd_alloc(kvm->mm);
kvm              1137 arch/powerpc/kvm/book3s_64_mmu_radix.c 	if (!kvm->arch.pgtable)
kvm              1153 arch/powerpc/kvm/book3s_64_mmu_radix.c 	struct kvm	*kvm;
kvm              1165 arch/powerpc/kvm/book3s_64_mmu_radix.c 	struct kvm *kvm = inode->i_private;
kvm              1172 arch/powerpc/kvm/book3s_64_mmu_radix.c 	kvm_get_kvm(kvm);
kvm              1173 arch/powerpc/kvm/book3s_64_mmu_radix.c 	p->kvm = kvm;
kvm              1184 arch/powerpc/kvm/book3s_64_mmu_radix.c 	kvm_put_kvm(p->kvm);
kvm              1195 arch/powerpc/kvm/book3s_64_mmu_radix.c 	struct kvm *kvm;
kvm              1206 arch/powerpc/kvm/book3s_64_mmu_radix.c 	kvm = p->kvm;
kvm              1207 arch/powerpc/kvm/book3s_64_mmu_radix.c 	if (!kvm_is_radix(kvm))
kvm              1243 arch/powerpc/kvm/book3s_64_mmu_radix.c 			p->lpid = kvmhv_nested_next_lpid(kvm, p->lpid);
kvm              1250 arch/powerpc/kvm/book3s_64_mmu_radix.c 				pgt = kvm->arch.pgtable;
kvm              1252 arch/powerpc/kvm/book3s_64_mmu_radix.c 				nested = kvmhv_get_nested(kvm, p->lpid, false);
kvm              1354 arch/powerpc/kvm/book3s_64_mmu_radix.c void kvmhv_radix_debugfs_init(struct kvm *kvm)
kvm              1356 arch/powerpc/kvm/book3s_64_mmu_radix.c 	kvm->arch.radix_dentry = debugfs_create_file("radix", 0400,
kvm              1357 arch/powerpc/kvm/book3s_64_mmu_radix.c 						     kvm->arch.debugfs_dir, kvm,
kvm                69 arch/powerpc/kvm/book3s_64_vio.c extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
kvm                77 arch/powerpc/kvm/book3s_64_vio.c 	list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
kvm                94 arch/powerpc/kvm/book3s_64_vio.c extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
kvm               109 arch/powerpc/kvm/book3s_64_vio.c 	list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
kvm               242 arch/powerpc/kvm/book3s_64_vio.c 	struct kvm *kvm = stt->kvm;
kvm               244 arch/powerpc/kvm/book3s_64_vio.c 	mutex_lock(&kvm->lock);
kvm               246 arch/powerpc/kvm/book3s_64_vio.c 	mutex_unlock(&kvm->lock);
kvm               256 arch/powerpc/kvm/book3s_64_vio.c 	kvm_put_kvm(stt->kvm);
kvm               270 arch/powerpc/kvm/book3s_64_vio.c long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
kvm               297 arch/powerpc/kvm/book3s_64_vio.c 	stt->kvm = kvm;
kvm               301 arch/powerpc/kvm/book3s_64_vio.c 	mutex_lock(&kvm->lock);
kvm               305 arch/powerpc/kvm/book3s_64_vio.c 	list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
kvm               312 arch/powerpc/kvm/book3s_64_vio.c 	kvm_get_kvm(kvm);
kvm               318 arch/powerpc/kvm/book3s_64_vio.c 		list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
kvm               320 arch/powerpc/kvm/book3s_64_vio.c 		kvm_put_kvm(kvm);
kvm               322 arch/powerpc/kvm/book3s_64_vio.c 	mutex_unlock(&kvm->lock);
kvm               333 arch/powerpc/kvm/book3s_64_vio.c static long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
kvm               339 arch/powerpc/kvm/book3s_64_vio.c 	memslot = search_memslots(kvm_memslots(kvm), gfn);
kvm               364 arch/powerpc/kvm/book3s_64_vio.c 	if (kvmppc_tce_to_ua(stt->kvm, tce, &ua))
kvm               372 arch/powerpc/kvm/book3s_64_vio.c 		mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift);
kvm               422 arch/powerpc/kvm/book3s_64_vio.c static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
kvm               432 arch/powerpc/kvm/book3s_64_vio.c 	mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize);
kvm               443 arch/powerpc/kvm/book3s_64_vio.c static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
kvm               450 arch/powerpc/kvm/book3s_64_vio.c 	if (WARN_ON_ONCE(iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa,
kvm               457 arch/powerpc/kvm/book3s_64_vio.c 	ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
kvm               459 arch/powerpc/kvm/book3s_64_vio.c 		iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
kvm               464 arch/powerpc/kvm/book3s_64_vio.c static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
kvm               473 arch/powerpc/kvm/book3s_64_vio.c 		ret = kvmppc_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
kvm               481 arch/powerpc/kvm/book3s_64_vio.c long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
kvm               494 arch/powerpc/kvm/book3s_64_vio.c 	mem = mm_iommu_lookup(kvm->mm, ua, 1ULL << tbl->it_page_shift);
kvm               505 arch/powerpc/kvm/book3s_64_vio.c 	ret = iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
kvm               512 arch/powerpc/kvm/book3s_64_vio.c 		kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
kvm               519 arch/powerpc/kvm/book3s_64_vio.c static long kvmppc_tce_iommu_map(struct kvm *kvm,
kvm               531 arch/powerpc/kvm/book3s_64_vio.c 		ret = kvmppc_tce_iommu_do_map(kvm, tbl,
kvm               552 arch/powerpc/kvm/book3s_64_vio.c 	stt = kvmppc_find_table(vcpu->kvm, liobn);
kvm               560 arch/powerpc/kvm/book3s_64_vio.c 	idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm               568 arch/powerpc/kvm/book3s_64_vio.c 	if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
kvm               577 arch/powerpc/kvm/book3s_64_vio.c 			ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
kvm               580 arch/powerpc/kvm/book3s_64_vio.c 			ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
kvm               586 arch/powerpc/kvm/book3s_64_vio.c 			kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
kvm               594 arch/powerpc/kvm/book3s_64_vio.c 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
kvm               611 arch/powerpc/kvm/book3s_64_vio.c 	stt = kvmppc_find_table(vcpu->kvm, liobn);
kvm               630 arch/powerpc/kvm/book3s_64_vio.c 	idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm               631 arch/powerpc/kvm/book3s_64_vio.c 	if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua)) {
kvm               666 arch/powerpc/kvm/book3s_64_vio.c 		if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
kvm               672 arch/powerpc/kvm/book3s_64_vio.c 			ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
kvm               677 arch/powerpc/kvm/book3s_64_vio.c 				kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl,
kvm               691 arch/powerpc/kvm/book3s_64_vio.c 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
kvm               705 arch/powerpc/kvm/book3s_64_vio.c 	stt = kvmppc_find_table(vcpu->kvm, liobn);
kvm               721 arch/powerpc/kvm/book3s_64_vio.c 			ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
kvm               731 arch/powerpc/kvm/book3s_64_vio.c 			kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
kvm                64 arch/powerpc/kvm/book3s_64_vio_hv.c struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
kvm                69 arch/powerpc/kvm/book3s_64_vio_hv.c 	list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
kvm                78 arch/powerpc/kvm/book3s_64_vio_hv.c static long kvmppc_rm_tce_to_ua(struct kvm *kvm, unsigned long tce,
kvm                84 arch/powerpc/kvm/book3s_64_vio_hv.c 	memslot = search_memslots(kvm_memslots_raw(kvm), gfn);
kvm               120 arch/powerpc/kvm/book3s_64_vio_hv.c 	if (kvmppc_rm_tce_to_ua(stt->kvm, tce, &ua, NULL))
kvm               128 arch/powerpc/kvm/book3s_64_vio_hv.c 		mem = mm_iommu_lookup_rm(stt->kvm->mm, ua, 1ULL << shift);
kvm               251 arch/powerpc/kvm/book3s_64_vio_hv.c static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl,
kvm               257 arch/powerpc/kvm/book3s_64_vio_hv.c 	iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
kvm               260 arch/powerpc/kvm/book3s_64_vio_hv.c static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
kvm               271 arch/powerpc/kvm/book3s_64_vio_hv.c 	mem = mm_iommu_lookup_rm(kvm->mm, be64_to_cpu(*pua), pgsize);
kvm               282 arch/powerpc/kvm/book3s_64_vio_hv.c static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
kvm               289 arch/powerpc/kvm/book3s_64_vio_hv.c 	if (iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir))
kvm               299 arch/powerpc/kvm/book3s_64_vio_hv.c 	ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
kvm               301 arch/powerpc/kvm/book3s_64_vio_hv.c 		iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
kvm               306 arch/powerpc/kvm/book3s_64_vio_hv.c static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
kvm               315 arch/powerpc/kvm/book3s_64_vio_hv.c 		ret = kvmppc_rm_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
kvm               323 arch/powerpc/kvm/book3s_64_vio_hv.c static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
kvm               336 arch/powerpc/kvm/book3s_64_vio_hv.c 	mem = mm_iommu_lookup_rm(kvm->mm, ua, 1ULL << tbl->it_page_shift);
kvm               347 arch/powerpc/kvm/book3s_64_vio_hv.c 	ret = iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
kvm               358 arch/powerpc/kvm/book3s_64_vio_hv.c 		kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
kvm               365 arch/powerpc/kvm/book3s_64_vio_hv.c static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
kvm               377 arch/powerpc/kvm/book3s_64_vio_hv.c 		ret = kvmppc_rm_tce_iommu_do_map(kvm, tbl,
kvm               399 arch/powerpc/kvm/book3s_64_vio_hv.c 	if (kvm_is_radix(vcpu->kvm))
kvm               402 arch/powerpc/kvm/book3s_64_vio_hv.c 	stt = kvmppc_find_table(vcpu->kvm, liobn);
kvm               415 arch/powerpc/kvm/book3s_64_vio_hv.c 	if ((dir != DMA_NONE) && kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
kvm               422 arch/powerpc/kvm/book3s_64_vio_hv.c 			ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
kvm               425 arch/powerpc/kvm/book3s_64_vio_hv.c 			ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
kvm               431 arch/powerpc/kvm/book3s_64_vio_hv.c 			kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
kvm               489 arch/powerpc/kvm/book3s_64_vio_hv.c 	if (kvm_is_radix(vcpu->kvm))
kvm               492 arch/powerpc/kvm/book3s_64_vio_hv.c 	stt = kvmppc_find_table(vcpu->kvm, liobn);
kvm               511 arch/powerpc/kvm/book3s_64_vio_hv.c 	if (mm_iommu_preregistered(vcpu->kvm->mm)) {
kvm               519 arch/powerpc/kvm/book3s_64_vio_hv.c 		if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL))
kvm               522 arch/powerpc/kvm/book3s_64_vio_hv.c 		mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
kvm               535 arch/powerpc/kvm/book3s_64_vio_hv.c 		if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
kvm               569 arch/powerpc/kvm/book3s_64_vio_hv.c 		if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) {
kvm               575 arch/powerpc/kvm/book3s_64_vio_hv.c 			ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
kvm               580 arch/powerpc/kvm/book3s_64_vio_hv.c 				kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl,
kvm               609 arch/powerpc/kvm/book3s_64_vio_hv.c 	if (kvm_is_radix(vcpu->kvm))
kvm               612 arch/powerpc/kvm/book3s_64_vio_hv.c 	stt = kvmppc_find_table(vcpu->kvm, liobn);
kvm               628 arch/powerpc/kvm/book3s_64_vio_hv.c 			ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
kvm               638 arch/powerpc/kvm/book3s_64_vio_hv.c 			kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
kvm               662 arch/powerpc/kvm/book3s_64_vio_hv.c 	stt = kvmppc_find_table(vcpu->kvm, liobn);
kvm               128 arch/powerpc/kvm/book3s_hv.c static inline bool nesting_enabled(struct kvm *kvm)
kvm               130 arch/powerpc/kvm/book3s_hv.c 	return kvm->arch.nested_enable && kvm_is_radix(kvm);
kvm               443 arch/powerpc/kvm/book3s_hv.c 	       vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1,
kvm               447 arch/powerpc/kvm/book3s_hv.c static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
kvm               449 arch/powerpc/kvm/book3s_hv.c 	return kvm_get_vcpu_by_id(kvm, id);
kvm               494 arch/powerpc/kvm/book3s_hv.c 	struct kvm *kvm = vcpu->kvm;
kvm               502 arch/powerpc/kvm/book3s_hv.c 	tvcpu = kvmppc_find_vcpu(kvm, vcpuid);
kvm               514 arch/powerpc/kvm/book3s_hv.c 		va = kvmppc_pin_guest_page(kvm, vpa, &nb);
kvm               521 arch/powerpc/kvm/book3s_hv.c 		kvmppc_unpin_guest_page(kvm, va, vpa, false);
kvm               609 arch/powerpc/kvm/book3s_hv.c 	struct kvm *kvm = vcpu->kvm;
kvm               628 arch/powerpc/kvm/book3s_hv.c 			va = kvmppc_pin_guest_page(kvm, gpa, &nb);
kvm               634 arch/powerpc/kvm/book3s_hv.c 			kvmppc_unpin_guest_page(kvm, va, gpa, false);
kvm               644 arch/powerpc/kvm/book3s_hv.c 		kvmppc_unpin_guest_page(kvm, va, gpa, false);
kvm               648 arch/powerpc/kvm/book3s_hv.c 		kvmppc_unpin_guest_page(kvm, vpap->pinned_addr, vpap->gpa,
kvm               801 arch/powerpc/kvm/book3s_hv.c static int kvmppc_copy_guest(struct kvm *kvm, gpa_t to, gpa_t from,
kvm               810 arch/powerpc/kvm/book3s_hv.c 	from_memslot = gfn_to_memslot(kvm, from >> PAGE_SHIFT);
kvm               822 arch/powerpc/kvm/book3s_hv.c 	to_memslot = gfn_to_memslot(kvm, to >> PAGE_SHIFT);
kvm               838 arch/powerpc/kvm/book3s_hv.c 	mark_page_dirty(kvm, to >> PAGE_SHIFT);
kvm               860 arch/powerpc/kvm/book3s_hv.c 		ret = kvmppc_copy_guest(vcpu->kvm, dest, src, pg_sz);
kvm               864 arch/powerpc/kvm/book3s_hv.c 		ret = kvm_clear_guest(vcpu->kvm, dest, pg_sz);
kvm               918 arch/powerpc/kvm/book3s_hv.c 	    !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls))
kvm               926 arch/powerpc/kvm/book3s_hv.c 		tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
kvm               940 arch/powerpc/kvm/book3s_hv.c 		tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
kvm               956 arch/powerpc/kvm/book3s_hv.c 		if (list_empty(&vcpu->kvm->arch.rtas_tokens))
kvm               959 arch/powerpc/kvm/book3s_hv.c 		idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm               961 arch/powerpc/kvm/book3s_hv.c 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
kvm              1048 arch/powerpc/kvm/book3s_hv.c 		if (nesting_enabled(vcpu->kvm))
kvm              1053 arch/powerpc/kvm/book3s_hv.c 		if (!nesting_enabled(vcpu->kvm))
kvm              1068 arch/powerpc/kvm/book3s_hv.c 		if (nesting_enabled(vcpu->kvm))
kvm              1073 arch/powerpc/kvm/book3s_hv.c 		if (nesting_enabled(vcpu->kvm))
kvm              1167 arch/powerpc/kvm/book3s_hv.c 	nthreads = vcpu->kvm->arch.emul_smt_mode;
kvm              1171 arch/powerpc/kvm/book3s_hv.c 		v = kvmppc_find_vcpu(vcpu->kvm, cpu);
kvm              1198 arch/powerpc/kvm/book3s_hv.c 	struct kvm *kvm = vcpu->kvm;
kvm              1206 arch/powerpc/kvm/book3s_hv.c 	thr = vcpu->vcpu_id & (kvm->arch.emul_smt_mode - 1);
kvm              1213 arch/powerpc/kvm/book3s_hv.c 		if (arg >= kvm->arch.emul_smt_mode)
kvm              1215 arch/powerpc/kvm/book3s_hv.c 		tvcpu = kvmppc_find_vcpu(kvm, vcpu->vcpu_id - thr + arg);
kvm              1305 arch/powerpc/kvm/book3s_hv.c 		if (!vcpu->kvm->arch.fwnmi_enabled) {
kvm              1496 arch/powerpc/kvm/book3s_hv.c 		srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm              1498 arch/powerpc/kvm/book3s_hv.c 		srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
kvm              1506 arch/powerpc/kvm/book3s_hv.c 		srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm              1508 arch/powerpc/kvm/book3s_hv.c 		srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
kvm              1577 arch/powerpc/kvm/book3s_hv.c 	struct kvm *kvm = vcpu->kvm;
kvm              1590 arch/powerpc/kvm/book3s_hv.c 		kvm_for_each_vcpu(i, vcpu, kvm) {
kvm              1833 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->kvm->arch.l1_ptcr);
kvm              2067 arch/powerpc/kvm/book3s_hv.c 		vcpu->kvm->arch.l1_ptcr = set_reg_val(id, *val);
kvm              2084 arch/powerpc/kvm/book3s_hv.c static int threads_per_vcore(struct kvm *kvm)
kvm              2086 arch/powerpc/kvm/book3s_hv.c 	if (kvm->arch.threads_indep)
kvm              2091 arch/powerpc/kvm/book3s_hv.c static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int id)
kvm              2104 arch/powerpc/kvm/book3s_hv.c 	vcore->lpcr = kvm->arch.lpcr;
kvm              2106 arch/powerpc/kvm/book3s_hv.c 	vcore->kvm = kvm;
kvm              2141 arch/powerpc/kvm/book3s_hv.c 	kvm_get_kvm(vcpu->kvm);
kvm              2152 arch/powerpc/kvm/book3s_hv.c 	kvm_put_kvm(p->vcpu->kvm);
kvm              2241 arch/powerpc/kvm/book3s_hv.c 	struct kvm *kvm = vcpu->kvm;
kvm              2244 arch/powerpc/kvm/book3s_hv.c 	if (IS_ERR_OR_NULL(kvm->arch.debugfs_dir))
kvm              2246 arch/powerpc/kvm/book3s_hv.c 	vcpu->arch.debugfs_dir = debugfs_create_dir(buf, kvm->arch.debugfs_dir);
kvm              2260 arch/powerpc/kvm/book3s_hv.c static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
kvm              2273 arch/powerpc/kvm/book3s_hv.c 	err = kvm_vcpu_init(vcpu, kvm, id);
kvm              2321 arch/powerpc/kvm/book3s_hv.c 	mutex_lock(&kvm->lock);
kvm              2325 arch/powerpc/kvm/book3s_hv.c 		if (id >= (KVM_MAX_VCPUS * kvm->arch.emul_smt_mode)) {
kvm              2329 arch/powerpc/kvm/book3s_hv.c 			BUG_ON(kvm->arch.smt_mode != 1);
kvm              2330 arch/powerpc/kvm/book3s_hv.c 			core = kvmppc_pack_vcpu_id(kvm, id);
kvm              2333 arch/powerpc/kvm/book3s_hv.c 		core = id / kvm->arch.smt_mode;
kvm              2336 arch/powerpc/kvm/book3s_hv.c 		vcore = kvm->arch.vcores[core];
kvm              2346 arch/powerpc/kvm/book3s_hv.c 			vcore = kvmppc_vcore_create(kvm,
kvm              2347 arch/powerpc/kvm/book3s_hv.c 					id & ~(kvm->arch.smt_mode - 1));
kvm              2348 arch/powerpc/kvm/book3s_hv.c 			mutex_lock(&kvm->arch.mmu_setup_lock);
kvm              2349 arch/powerpc/kvm/book3s_hv.c 			kvm->arch.vcores[core] = vcore;
kvm              2350 arch/powerpc/kvm/book3s_hv.c 			kvm->arch.online_vcores++;
kvm              2351 arch/powerpc/kvm/book3s_hv.c 			mutex_unlock(&kvm->arch.mmu_setup_lock);
kvm              2354 arch/powerpc/kvm/book3s_hv.c 	mutex_unlock(&kvm->lock);
kvm              2382 arch/powerpc/kvm/book3s_hv.c static int kvmhv_set_smt_mode(struct kvm *kvm, unsigned long smt_mode,
kvm              2407 arch/powerpc/kvm/book3s_hv.c 	mutex_lock(&kvm->lock);
kvm              2409 arch/powerpc/kvm/book3s_hv.c 	if (!kvm->arch.online_vcores) {
kvm              2410 arch/powerpc/kvm/book3s_hv.c 		kvm->arch.smt_mode = smt_mode;
kvm              2411 arch/powerpc/kvm/book3s_hv.c 		kvm->arch.emul_smt_mode = esmt;
kvm              2414 arch/powerpc/kvm/book3s_hv.c 	mutex_unlock(&kvm->lock);
kvm              2419 arch/powerpc/kvm/book3s_hv.c static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa)
kvm              2422 arch/powerpc/kvm/book3s_hv.c 		kvmppc_unpin_guest_page(kvm, vpa->pinned_addr, vpa->gpa,
kvm              2429 arch/powerpc/kvm/book3s_hv.c 	unpin_vpa(vcpu->kvm, &vcpu->arch.dtl);
kvm              2430 arch/powerpc/kvm/book3s_hv.c 	unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow);
kvm              2431 arch/powerpc/kvm/book3s_hv.c 	unpin_vpa(vcpu->kvm, &vcpu->arch.vpa);
kvm              2533 arch/powerpc/kvm/book3s_hv.c static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
kvm              2544 arch/powerpc/kvm/book3s_hv.c 		cpumask_set_cpu(cpu, &kvm->arch.need_tlb_flush);
kvm              2545 arch/powerpc/kvm/book3s_hv.c 		cpu_in_guest = &kvm->arch.cpu_in_guest;
kvm              2561 arch/powerpc/kvm/book3s_hv.c 	struct kvm *kvm = vcpu->kvm;
kvm              2588 arch/powerpc/kvm/book3s_hv.c 			radix_flush_cpu(kvm, prev_cpu, vcpu);
kvm              2600 arch/powerpc/kvm/book3s_hv.c 	struct kvm *kvm = vc->kvm;
kvm              2611 arch/powerpc/kvm/book3s_hv.c 		cpumask_set_cpu(cpu, &kvm->arch.cpu_in_guest);
kvm              2714 arch/powerpc/kvm/book3s_hv.c 	if (vc->num_threads < threads_per_vcore(vc->kvm)) {
kvm              2808 arch/powerpc/kvm/book3s_hv.c 	if (one_vm_per_core && vc->kvm != cip->vc[0]->kvm)
kvm              2813 arch/powerpc/kvm/book3s_hv.c 	    kvm_is_radix(vc->kvm) != kvm_is_radix(cip->vc[0]->kvm))
kvm              2875 arch/powerpc/kvm/book3s_hv.c 		if (!pvc->n_runnable || !pvc->kvm->arch.mmu_ready) {
kvm              2904 arch/powerpc/kvm/book3s_hv.c 		if (!vc->kvm->arch.mmu_ready)
kvm              3082 arch/powerpc/kvm/book3s_hv.c 	controlled_threads = threads_per_vcore(vc->kvm);
kvm              3093 arch/powerpc/kvm/book3s_hv.c 		!kvm_is_radix(vc->kvm);
kvm              3096 arch/powerpc/kvm/book3s_hv.c 	    (hpt_on_radix && vc->kvm->arch.threads_indep)) {
kvm              3123 arch/powerpc/kvm/book3s_hv.c 	if (kvm_is_radix(vc->kvm)) {
kvm              3189 arch/powerpc/kvm/book3s_hv.c 				split_info.lpidr_req = vc->kvm->arch.lpid;
kvm              3190 arch/powerpc/kvm/book3s_hv.c 				split_info.host_lpcr = vc->kvm->arch.host_lpcr;
kvm              3296 arch/powerpc/kvm/book3s_hv.c 	srcu_idx = srcu_read_lock(&vc->kvm->srcu);
kvm              3312 arch/powerpc/kvm/book3s_hv.c 	srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
kvm              3364 arch/powerpc/kvm/book3s_hv.c 		cpumask_clear_cpu(pcpu + i, &vc->kvm->arch.cpu_in_guest);
kvm              3491 arch/powerpc/kvm/book3s_hv.c 	mtspr(SPRN_LPID, vcpu->kvm->arch.host_lpid);	/* restore host LPID */
kvm              3510 arch/powerpc/kvm/book3s_hv.c 	mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr);
kvm              3609 arch/powerpc/kvm/book3s_hv.c 			hvregs.lpid = vcpu->kvm->arch.lpid;
kvm              3687 arch/powerpc/kvm/book3s_hv.c 	save_pmu |= nesting_enabled(vcpu->kvm);
kvm              3890 arch/powerpc/kvm/book3s_hv.c 	struct kvm *kvm = vcpu->kvm;
kvm              3892 arch/powerpc/kvm/book3s_hv.c 	mutex_lock(&kvm->arch.mmu_setup_lock);
kvm              3893 arch/powerpc/kvm/book3s_hv.c 	if (!kvm->arch.mmu_ready) {
kvm              3894 arch/powerpc/kvm/book3s_hv.c 		if (!kvm_is_radix(kvm))
kvm              3898 arch/powerpc/kvm/book3s_hv.c 				kvmppc_setup_partition_table(kvm);
kvm              3899 arch/powerpc/kvm/book3s_hv.c 			kvm->arch.mmu_ready = 1;
kvm              3902 arch/powerpc/kvm/book3s_hv.c 	mutex_unlock(&kvm->arch.mmu_setup_lock);
kvm              3954 arch/powerpc/kvm/book3s_hv.c 		if (!vcpu->kvm->arch.mmu_ready) {
kvm              4043 arch/powerpc/kvm/book3s_hv.c 	struct kvm *kvm = vcpu->kvm;
kvm              4065 arch/powerpc/kvm/book3s_hv.c 	if (!kvm->arch.mmu_ready)
kvm              4085 arch/powerpc/kvm/book3s_hv.c 	if (lazy_irq_pending() || need_resched() || !kvm->arch.mmu_ready)
kvm              4118 arch/powerpc/kvm/book3s_hv.c 		lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
kvm              4121 arch/powerpc/kvm/book3s_hv.c 		kvmppc_check_need_tlb_flush(kvm, pcpu, nested);
kvm              4126 arch/powerpc/kvm/book3s_hv.c 	srcu_idx = srcu_read_lock(&kvm->srcu);
kvm              4140 arch/powerpc/kvm/book3s_hv.c 	srcu_read_unlock(&kvm->srcu, srcu_idx);
kvm              4143 arch/powerpc/kvm/book3s_hv.c 		mtspr(SPRN_LPID, kvm->arch.host_lpid);
kvm              4154 arch/powerpc/kvm/book3s_hv.c 	cpumask_clear_cpu(pcpu, &kvm->arch.cpu_in_guest);
kvm              4222 arch/powerpc/kvm/book3s_hv.c 	struct kvm *kvm;
kvm              4269 arch/powerpc/kvm/book3s_hv.c 	kvm = vcpu->kvm;
kvm              4270 arch/powerpc/kvm/book3s_hv.c 	atomic_inc(&kvm->arch.vcpus_running);
kvm              4298 arch/powerpc/kvm/book3s_hv.c 		if (kvm->arch.threads_indep && kvm_is_radix(kvm) &&
kvm              4312 arch/powerpc/kvm/book3s_hv.c 			srcu_idx = srcu_read_lock(&kvm->srcu);
kvm              4315 arch/powerpc/kvm/book3s_hv.c 			srcu_read_unlock(&kvm->srcu, srcu_idx);
kvm              4335 arch/powerpc/kvm/book3s_hv.c 	atomic_dec(&kvm->arch.vcpus_running);
kvm              4359 arch/powerpc/kvm/book3s_hv.c static int kvm_vm_ioctl_get_smmu_info_hv(struct kvm *kvm,
kvm              4392 arch/powerpc/kvm/book3s_hv.c static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm,
kvm              4402 arch/powerpc/kvm/book3s_hv.c 	mutex_lock(&kvm->slots_lock);
kvm              4408 arch/powerpc/kvm/book3s_hv.c 	slots = kvm_memslots(kvm);
kvm              4422 arch/powerpc/kvm/book3s_hv.c 	if (kvm_is_radix(kvm))
kvm              4423 arch/powerpc/kvm/book3s_hv.c 		r = kvmppc_hv_get_dirty_log_radix(kvm, memslot, buf);
kvm              4425 arch/powerpc/kvm/book3s_hv.c 		r = kvmppc_hv_get_dirty_log_hpt(kvm, memslot, buf);
kvm              4441 arch/powerpc/kvm/book3s_hv.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
kvm              4454 arch/powerpc/kvm/book3s_hv.c 	mutex_unlock(&kvm->slots_lock);
kvm              4477 arch/powerpc/kvm/book3s_hv.c static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm,
kvm              4484 arch/powerpc/kvm/book3s_hv.c static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
kvm              4499 arch/powerpc/kvm/book3s_hv.c 		atomic64_inc(&kvm->arch.mmio_update);
kvm              4514 arch/powerpc/kvm/book3s_hv.c 	if (change == KVM_MR_FLAGS_ONLY && kvm_is_radix(kvm) &&
kvm              4516 arch/powerpc/kvm/book3s_hv.c 		kvmppc_radix_flush_memslot(kvm, old);
kvm              4524 arch/powerpc/kvm/book3s_hv.c void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask)
kvm              4529 arch/powerpc/kvm/book3s_hv.c 	if ((kvm->arch.lpcr & mask) == lpcr)
kvm              4532 arch/powerpc/kvm/book3s_hv.c 	kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr;
kvm              4535 arch/powerpc/kvm/book3s_hv.c 		struct kvmppc_vcore *vc = kvm->arch.vcores[i];
kvm              4541 arch/powerpc/kvm/book3s_hv.c 		if (++cores_done >= kvm->arch.online_vcores)
kvm              4551 arch/powerpc/kvm/book3s_hv.c void kvmppc_setup_partition_table(struct kvm *kvm)
kvm              4555 arch/powerpc/kvm/book3s_hv.c 	if (!kvm_is_radix(kvm)) {
kvm              4557 arch/powerpc/kvm/book3s_hv.c 		dw0 = ((kvm->arch.vrma_slb_v & SLB_VSID_L) >> 1) |
kvm              4558 arch/powerpc/kvm/book3s_hv.c 			((kvm->arch.vrma_slb_v & SLB_VSID_LP) << 1);
kvm              4560 arch/powerpc/kvm/book3s_hv.c 		dw0 |= kvm->arch.sdr1;
kvm              4563 arch/powerpc/kvm/book3s_hv.c 		dw1 = kvm->arch.process_table;
kvm              4566 arch/powerpc/kvm/book3s_hv.c 			__pa(kvm->arch.pgtable) | RADIX_PGD_INDEX_SIZE;
kvm              4567 arch/powerpc/kvm/book3s_hv.c 		dw1 = PATB_GR | kvm->arch.process_table;
kvm              4569 arch/powerpc/kvm/book3s_hv.c 	kvmhv_set_ptbl_entry(kvm->arch.lpid, dw0, dw1);
kvm              4579 arch/powerpc/kvm/book3s_hv.c 	struct kvm *kvm = vcpu->kvm;
kvm              4588 arch/powerpc/kvm/book3s_hv.c 	if (!kvm->arch.hpt.virt) {
kvm              4604 arch/powerpc/kvm/book3s_hv.c 		kvmppc_set_hpt(kvm, &info);
kvm              4608 arch/powerpc/kvm/book3s_hv.c 	srcu_idx = srcu_read_lock(&kvm->srcu);
kvm              4609 arch/powerpc/kvm/book3s_hv.c 	memslot = gfn_to_memslot(kvm, 0);
kvm              4637 arch/powerpc/kvm/book3s_hv.c 	kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
kvm              4646 arch/powerpc/kvm/book3s_hv.c 		kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
kvm              4653 arch/powerpc/kvm/book3s_hv.c 	srcu_read_unlock(&kvm->srcu, srcu_idx);
kvm              4666 arch/powerpc/kvm/book3s_hv.c int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
kvm              4668 arch/powerpc/kvm/book3s_hv.c 	if (nesting_enabled(kvm))
kvm              4669 arch/powerpc/kvm/book3s_hv.c 		kvmhv_release_all_nested(kvm);
kvm              4670 arch/powerpc/kvm/book3s_hv.c 	kvmppc_rmap_reset(kvm);
kvm              4671 arch/powerpc/kvm/book3s_hv.c 	kvm->arch.process_table = 0;
kvm              4673 arch/powerpc/kvm/book3s_hv.c 	spin_lock(&kvm->mmu_lock);
kvm              4674 arch/powerpc/kvm/book3s_hv.c 	kvm->arch.radix = 0;
kvm              4675 arch/powerpc/kvm/book3s_hv.c 	spin_unlock(&kvm->mmu_lock);
kvm              4676 arch/powerpc/kvm/book3s_hv.c 	kvmppc_free_radix(kvm);
kvm              4677 arch/powerpc/kvm/book3s_hv.c 	kvmppc_update_lpcr(kvm, LPCR_VPM1,
kvm              4686 arch/powerpc/kvm/book3s_hv.c int kvmppc_switch_mmu_to_radix(struct kvm *kvm)
kvm              4690 arch/powerpc/kvm/book3s_hv.c 	err = kvmppc_init_vm_radix(kvm);
kvm              4693 arch/powerpc/kvm/book3s_hv.c 	kvmppc_rmap_reset(kvm);
kvm              4695 arch/powerpc/kvm/book3s_hv.c 	spin_lock(&kvm->mmu_lock);
kvm              4696 arch/powerpc/kvm/book3s_hv.c 	kvm->arch.radix = 1;
kvm              4697 arch/powerpc/kvm/book3s_hv.c 	spin_unlock(&kvm->mmu_lock);
kvm              4698 arch/powerpc/kvm/book3s_hv.c 	kvmppc_free_hpt(&kvm->arch.hpt);
kvm              4699 arch/powerpc/kvm/book3s_hv.c 	kvmppc_update_lpcr(kvm, LPCR_UPRT | LPCR_GTSE | LPCR_HR,
kvm              4784 arch/powerpc/kvm/book3s_hv.c static int kvmppc_core_init_vm_hv(struct kvm *kvm)
kvm              4790 arch/powerpc/kvm/book3s_hv.c 	mutex_init(&kvm->arch.mmu_setup_lock);
kvm              4797 arch/powerpc/kvm/book3s_hv.c 	kvm->arch.lpid = lpid;
kvm              4801 arch/powerpc/kvm/book3s_hv.c 	kvmhv_vm_nested_init(kvm);
kvm              4811 arch/powerpc/kvm/book3s_hv.c 		cpumask_setall(&kvm->arch.need_tlb_flush);
kvm              4814 arch/powerpc/kvm/book3s_hv.c 	memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls,
kvm              4815 arch/powerpc/kvm/book3s_hv.c 	       sizeof(kvm->arch.enabled_hcalls));
kvm              4818 arch/powerpc/kvm/book3s_hv.c 		kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
kvm              4822 arch/powerpc/kvm/book3s_hv.c 		kvm->arch.host_lpid = mfspr(SPRN_LPID);
kvm              4823 arch/powerpc/kvm/book3s_hv.c 		kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
kvm              4830 arch/powerpc/kvm/book3s_hv.c 	kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
kvm              4858 arch/powerpc/kvm/book3s_hv.c 		kvm->arch.radix = 1;
kvm              4859 arch/powerpc/kvm/book3s_hv.c 		kvm->arch.mmu_ready = 1;
kvm              4862 arch/powerpc/kvm/book3s_hv.c 		ret = kvmppc_init_vm_radix(kvm);
kvm              4864 arch/powerpc/kvm/book3s_hv.c 			kvmppc_free_lpid(kvm->arch.lpid);
kvm              4867 arch/powerpc/kvm/book3s_hv.c 		kvmppc_setup_partition_table(kvm);
kvm              4870 arch/powerpc/kvm/book3s_hv.c 	kvm->arch.lpcr = lpcr;
kvm              4873 arch/powerpc/kvm/book3s_hv.c 	kvm->arch.resize_hpt = NULL;
kvm              4880 arch/powerpc/kvm/book3s_hv.c 		kvm->arch.tlb_sets = POWER9_TLB_SETS_RADIX;	/* 128 */
kvm              4882 arch/powerpc/kvm/book3s_hv.c 		kvm->arch.tlb_sets = POWER9_TLB_SETS_HASH;	/* 256 */
kvm              4884 arch/powerpc/kvm/book3s_hv.c 		kvm->arch.tlb_sets = POWER8_TLB_SETS;		/* 512 */
kvm              4886 arch/powerpc/kvm/book3s_hv.c 		kvm->arch.tlb_sets = POWER7_TLB_SETS;		/* 128 */
kvm              4897 arch/powerpc/kvm/book3s_hv.c 			kvm->arch.threads_indep = true;
kvm              4899 arch/powerpc/kvm/book3s_hv.c 			kvm->arch.threads_indep = indep_threads_mode;
kvm              4902 arch/powerpc/kvm/book3s_hv.c 	if (!kvm->arch.threads_indep)
kvm              4913 arch/powerpc/kvm/book3s_hv.c 		kvm->arch.smt_mode = threads_per_subcore;
kvm              4915 arch/powerpc/kvm/book3s_hv.c 		kvm->arch.smt_mode = 1;
kvm              4916 arch/powerpc/kvm/book3s_hv.c 	kvm->arch.emul_smt_mode = 1;
kvm              4922 arch/powerpc/kvm/book3s_hv.c 	kvm->arch.debugfs_dir = debugfs_create_dir(buf, kvm_debugfs_dir);
kvm              4923 arch/powerpc/kvm/book3s_hv.c 	kvmppc_mmu_debugfs_init(kvm);
kvm              4925 arch/powerpc/kvm/book3s_hv.c 		kvmhv_radix_debugfs_init(kvm);
kvm              4930 arch/powerpc/kvm/book3s_hv.c static void kvmppc_free_vcores(struct kvm *kvm)
kvm              4935 arch/powerpc/kvm/book3s_hv.c 		kfree(kvm->arch.vcores[i]);
kvm              4936 arch/powerpc/kvm/book3s_hv.c 	kvm->arch.online_vcores = 0;
kvm              4939 arch/powerpc/kvm/book3s_hv.c static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
kvm              4941 arch/powerpc/kvm/book3s_hv.c 	debugfs_remove_recursive(kvm->arch.debugfs_dir);
kvm              4943 arch/powerpc/kvm/book3s_hv.c 	if (!kvm->arch.threads_indep)
kvm              4946 arch/powerpc/kvm/book3s_hv.c 	kvmppc_free_vcores(kvm);
kvm              4949 arch/powerpc/kvm/book3s_hv.c 	if (kvm_is_radix(kvm))
kvm              4950 arch/powerpc/kvm/book3s_hv.c 		kvmppc_free_radix(kvm);
kvm              4952 arch/powerpc/kvm/book3s_hv.c 		kvmppc_free_hpt(&kvm->arch.hpt);
kvm              4956 arch/powerpc/kvm/book3s_hv.c 		if (nesting_enabled(kvm))
kvm              4957 arch/powerpc/kvm/book3s_hv.c 			kvmhv_release_all_nested(kvm);
kvm              4958 arch/powerpc/kvm/book3s_hv.c 		kvm->arch.process_table = 0;
kvm              4959 arch/powerpc/kvm/book3s_hv.c 		kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0);
kvm              4961 arch/powerpc/kvm/book3s_hv.c 	kvmppc_free_lpid(kvm->arch.lpid);
kvm              4963 arch/powerpc/kvm/book3s_hv.c 	kvmppc_free_pimap(kvm);
kvm              5000 arch/powerpc/kvm/book3s_hv.c void kvmppc_free_pimap(struct kvm *kvm)
kvm              5002 arch/powerpc/kvm/book3s_hv.c 	kfree(kvm->arch.pimap);
kvm              5010 arch/powerpc/kvm/book3s_hv.c static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
kvm              5025 arch/powerpc/kvm/book3s_hv.c 	mutex_lock(&kvm->lock);
kvm              5027 arch/powerpc/kvm/book3s_hv.c 	pimap = kvm->arch.pimap;
kvm              5032 arch/powerpc/kvm/book3s_hv.c 			mutex_unlock(&kvm->lock);
kvm              5035 arch/powerpc/kvm/book3s_hv.c 		kvm->arch.pimap = pimap;
kvm              5047 arch/powerpc/kvm/book3s_hv.c 		mutex_unlock(&kvm->lock);
kvm              5059 arch/powerpc/kvm/book3s_hv.c 				mutex_unlock(&kvm->lock);
kvm              5067 arch/powerpc/kvm/book3s_hv.c 		mutex_unlock(&kvm->lock);
kvm              5087 arch/powerpc/kvm/book3s_hv.c 		rc = kvmppc_xive_set_mapped(kvm, guest_gsi, desc);
kvm              5089 arch/powerpc/kvm/book3s_hv.c 		kvmppc_xics_set_mapped(kvm, guest_gsi, desc->irq_data.hwirq);
kvm              5093 arch/powerpc/kvm/book3s_hv.c 	mutex_unlock(&kvm->lock);
kvm              5098 arch/powerpc/kvm/book3s_hv.c static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
kvm              5111 arch/powerpc/kvm/book3s_hv.c 	mutex_lock(&kvm->lock);
kvm              5112 arch/powerpc/kvm/book3s_hv.c 	if (!kvm->arch.pimap)
kvm              5115 arch/powerpc/kvm/book3s_hv.c 	pimap = kvm->arch.pimap;
kvm              5123 arch/powerpc/kvm/book3s_hv.c 		mutex_unlock(&kvm->lock);
kvm              5128 arch/powerpc/kvm/book3s_hv.c 		rc = kvmppc_xive_clr_mapped(kvm, guest_gsi, pimap->mapped[i].desc);
kvm              5130 arch/powerpc/kvm/book3s_hv.c 		kvmppc_xics_clr_mapped(kvm, guest_gsi, pimap->mapped[i].r_hwirq);
kvm              5140 arch/powerpc/kvm/book3s_hv.c 	mutex_unlock(&kvm->lock);
kvm              5153 arch/powerpc/kvm/book3s_hv.c 	ret = kvmppc_set_passthru_irq(irqfd->kvm, prod->irq, irqfd->gsi);
kvm              5175 arch/powerpc/kvm/book3s_hv.c 	ret = kvmppc_clr_passthru_irq(irqfd->kvm, prod->irq, irqfd->gsi);
kvm              5185 arch/powerpc/kvm/book3s_hv.c 	struct kvm *kvm __maybe_unused = filp->private_data;
kvm              5197 arch/powerpc/kvm/book3s_hv.c 		r = kvmppc_alloc_reset_hpt(kvm, htab_order);
kvm              5210 arch/powerpc/kvm/book3s_hv.c 		r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf);
kvm              5221 arch/powerpc/kvm/book3s_hv.c 		r = kvm_vm_ioctl_resize_hpt_prepare(kvm, &rhpt);
kvm              5232 arch/powerpc/kvm/book3s_hv.c 		r = kvm_vm_ioctl_resize_hpt_commit(kvm, &rhpt);
kvm              5286 arch/powerpc/kvm/book3s_hv.c static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg)
kvm              5317 arch/powerpc/kvm/book3s_hv.c 	mutex_lock(&kvm->arch.mmu_setup_lock);
kvm              5318 arch/powerpc/kvm/book3s_hv.c 	if (radix != kvm_is_radix(kvm)) {
kvm              5319 arch/powerpc/kvm/book3s_hv.c 		if (kvm->arch.mmu_ready) {
kvm              5320 arch/powerpc/kvm/book3s_hv.c 			kvm->arch.mmu_ready = 0;
kvm              5323 arch/powerpc/kvm/book3s_hv.c 			if (atomic_read(&kvm->arch.vcpus_running)) {
kvm              5324 arch/powerpc/kvm/book3s_hv.c 				kvm->arch.mmu_ready = 1;
kvm              5330 arch/powerpc/kvm/book3s_hv.c 			err = kvmppc_switch_mmu_to_radix(kvm);
kvm              5332 arch/powerpc/kvm/book3s_hv.c 			err = kvmppc_switch_mmu_to_hpt(kvm);
kvm              5337 arch/powerpc/kvm/book3s_hv.c 	kvm->arch.process_table = cfg->process_table;
kvm              5338 arch/powerpc/kvm/book3s_hv.c 	kvmppc_setup_partition_table(kvm);
kvm              5341 arch/powerpc/kvm/book3s_hv.c 	kvmppc_update_lpcr(kvm, lpcr, LPCR_GTSE);
kvm              5345 arch/powerpc/kvm/book3s_hv.c 	mutex_unlock(&kvm->arch.mmu_setup_lock);
kvm              5349 arch/powerpc/kvm/book3s_hv.c static int kvmhv_enable_nested(struct kvm *kvm)
kvm              5357 arch/powerpc/kvm/book3s_hv.c 	if (kvm)
kvm              5358 arch/powerpc/kvm/book3s_hv.c 		kvm->arch.nested_enable = true;
kvm               211 arch/powerpc/kvm/book3s_hv_builtin.c 	if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR))
kvm               402 arch/powerpc/kvm/book3s_hv_builtin.c 	pimap = kvmppc_get_passthru_irqmap(vcpu->kvm);
kvm               806 arch/powerpc/kvm/book3s_hv_builtin.c static void flush_guest_tlb(struct kvm *kvm)
kvm               811 arch/powerpc/kvm/book3s_hv_builtin.c 	if (kvm_is_radix(kvm)) {
kvm               816 arch/powerpc/kvm/book3s_hv_builtin.c 		for (set = 1; set < kvm->arch.tlb_sets; ++set) {
kvm               826 arch/powerpc/kvm/book3s_hv_builtin.c 		for (set = 0; set < kvm->arch.tlb_sets; ++set) {
kvm               838 arch/powerpc/kvm/book3s_hv_builtin.c void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu,
kvm               855 arch/powerpc/kvm/book3s_hv_builtin.c 		need_tlb_flush = &kvm->arch.need_tlb_flush;
kvm               858 arch/powerpc/kvm/book3s_hv_builtin.c 		flush_guest_tlb(kvm);
kvm               231 arch/powerpc/kvm/book3s_hv_nested.c 	if (vcpu->kvm->arch.l1_ptcr == 0)
kvm               256 arch/powerpc/kvm/book3s_hv_nested.c 	l2 = kvmhv_get_nested(vcpu->kvm, l2_hv.lpid, true);
kvm               433 arch/powerpc/kvm/book3s_hv_nested.c void kvmhv_vm_nested_init(struct kvm *kvm)
kvm               435 arch/powerpc/kvm/book3s_hv_nested.c 	kvm->arch.max_nested_lpid = -1;
kvm               445 arch/powerpc/kvm/book3s_hv_nested.c 	struct kvm *kvm = vcpu->kvm;
kvm               450 arch/powerpc/kvm/book3s_hv_nested.c 	srcu_idx = srcu_read_lock(&kvm->srcu);
kvm               456 arch/powerpc/kvm/book3s_hv_nested.c 	    !kvm_is_visible_gfn(vcpu->kvm, (ptcr & PRTB_MASK) >> PAGE_SHIFT))
kvm               458 arch/powerpc/kvm/book3s_hv_nested.c 	srcu_read_unlock(&kvm->srcu, srcu_idx);
kvm               460 arch/powerpc/kvm/book3s_hv_nested.c 		kvm->arch.l1_ptcr = ptcr;
kvm               496 arch/powerpc/kvm/book3s_hv_nested.c 	gp = kvmhv_get_nested(vcpu->kvm, l1_lpid, false);
kvm               548 arch/powerpc/kvm/book3s_hv_nested.c 	struct kvm *kvm = gp->l1_host;
kvm               551 arch/powerpc/kvm/book3s_hv_nested.c 	ptbl_addr = (kvm->arch.l1_ptcr & PRTB_MASK) + (gp->l1_lpid << 4);
kvm               552 arch/powerpc/kvm/book3s_hv_nested.c 	if (gp->l1_lpid < (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 8)))
kvm               553 arch/powerpc/kvm/book3s_hv_nested.c 		ret = kvm_read_guest(kvm, ptbl_addr,
kvm               565 arch/powerpc/kvm/book3s_hv_nested.c struct kvm_nested_guest *kvmhv_alloc_nested(struct kvm *kvm, unsigned int lpid)
kvm               573 arch/powerpc/kvm/book3s_hv_nested.c 	gp->l1_host = kvm;
kvm               576 arch/powerpc/kvm/book3s_hv_nested.c 	gp->shadow_pgtable = pgd_alloc(kvm->mm);
kvm               590 arch/powerpc/kvm/book3s_hv_nested.c 	pgd_free(kvm->mm, gp->shadow_pgtable);
kvm               601 arch/powerpc/kvm/book3s_hv_nested.c 	struct kvm *kvm = gp->l1_host;
kvm               609 arch/powerpc/kvm/book3s_hv_nested.c 		kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable,
kvm               611 arch/powerpc/kvm/book3s_hv_nested.c 		pgd_free(kvm->mm, gp->shadow_pgtable);
kvm               620 arch/powerpc/kvm/book3s_hv_nested.c 	struct kvm *kvm = gp->l1_host;
kvm               624 arch/powerpc/kvm/book3s_hv_nested.c 	spin_lock(&kvm->mmu_lock);
kvm               625 arch/powerpc/kvm/book3s_hv_nested.c 	if (gp == kvm->arch.nested_guests[lpid]) {
kvm               626 arch/powerpc/kvm/book3s_hv_nested.c 		kvm->arch.nested_guests[lpid] = NULL;
kvm               627 arch/powerpc/kvm/book3s_hv_nested.c 		if (lpid == kvm->arch.max_nested_lpid) {
kvm               628 arch/powerpc/kvm/book3s_hv_nested.c 			while (--lpid >= 0 && !kvm->arch.nested_guests[lpid])
kvm               630 arch/powerpc/kvm/book3s_hv_nested.c 			kvm->arch.max_nested_lpid = lpid;
kvm               635 arch/powerpc/kvm/book3s_hv_nested.c 	spin_unlock(&kvm->mmu_lock);
kvm               646 arch/powerpc/kvm/book3s_hv_nested.c void kvmhv_release_all_nested(struct kvm *kvm)
kvm               654 arch/powerpc/kvm/book3s_hv_nested.c 	spin_lock(&kvm->mmu_lock);
kvm               655 arch/powerpc/kvm/book3s_hv_nested.c 	for (i = 0; i <= kvm->arch.max_nested_lpid; i++) {
kvm               656 arch/powerpc/kvm/book3s_hv_nested.c 		gp = kvm->arch.nested_guests[i];
kvm               659 arch/powerpc/kvm/book3s_hv_nested.c 		kvm->arch.nested_guests[i] = NULL;
kvm               665 arch/powerpc/kvm/book3s_hv_nested.c 	kvm->arch.max_nested_lpid = -1;
kvm               666 arch/powerpc/kvm/book3s_hv_nested.c 	spin_unlock(&kvm->mmu_lock);
kvm               672 arch/powerpc/kvm/book3s_hv_nested.c 	srcu_idx = srcu_read_lock(&kvm->srcu);
kvm               673 arch/powerpc/kvm/book3s_hv_nested.c 	kvm_for_each_memslot(memslot, kvm_memslots(kvm))
kvm               675 arch/powerpc/kvm/book3s_hv_nested.c 	srcu_read_unlock(&kvm->srcu, srcu_idx);
kvm               681 arch/powerpc/kvm/book3s_hv_nested.c 	struct kvm *kvm = gp->l1_host;
kvm               683 arch/powerpc/kvm/book3s_hv_nested.c 	spin_lock(&kvm->mmu_lock);
kvm               684 arch/powerpc/kvm/book3s_hv_nested.c 	kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, gp->shadow_lpid);
kvm               685 arch/powerpc/kvm/book3s_hv_nested.c 	spin_unlock(&kvm->mmu_lock);
kvm               692 arch/powerpc/kvm/book3s_hv_nested.c struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
kvm               698 arch/powerpc/kvm/book3s_hv_nested.c 	    l1_lpid >= (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4)))
kvm               701 arch/powerpc/kvm/book3s_hv_nested.c 	spin_lock(&kvm->mmu_lock);
kvm               702 arch/powerpc/kvm/book3s_hv_nested.c 	gp = kvm->arch.nested_guests[l1_lpid];
kvm               705 arch/powerpc/kvm/book3s_hv_nested.c 	spin_unlock(&kvm->mmu_lock);
kvm               710 arch/powerpc/kvm/book3s_hv_nested.c 	newgp = kvmhv_alloc_nested(kvm, l1_lpid);
kvm               713 arch/powerpc/kvm/book3s_hv_nested.c 	spin_lock(&kvm->mmu_lock);
kvm               714 arch/powerpc/kvm/book3s_hv_nested.c 	if (kvm->arch.nested_guests[l1_lpid]) {
kvm               716 arch/powerpc/kvm/book3s_hv_nested.c 		gp = kvm->arch.nested_guests[l1_lpid];
kvm               718 arch/powerpc/kvm/book3s_hv_nested.c 		kvm->arch.nested_guests[l1_lpid] = newgp;
kvm               722 arch/powerpc/kvm/book3s_hv_nested.c 		if (l1_lpid > kvm->arch.max_nested_lpid)
kvm               723 arch/powerpc/kvm/book3s_hv_nested.c 			kvm->arch.max_nested_lpid = l1_lpid;
kvm               726 arch/powerpc/kvm/book3s_hv_nested.c 	spin_unlock(&kvm->mmu_lock);
kvm               736 arch/powerpc/kvm/book3s_hv_nested.c 	struct kvm *kvm = gp->l1_host;
kvm               739 arch/powerpc/kvm/book3s_hv_nested.c 	spin_lock(&kvm->mmu_lock);
kvm               741 arch/powerpc/kvm/book3s_hv_nested.c 	spin_unlock(&kvm->mmu_lock);
kvm               746 arch/powerpc/kvm/book3s_hv_nested.c static struct kvm_nested_guest *kvmhv_find_nested(struct kvm *kvm, int lpid)
kvm               748 arch/powerpc/kvm/book3s_hv_nested.c 	if (lpid > kvm->arch.max_nested_lpid)
kvm               750 arch/powerpc/kvm/book3s_hv_nested.c 	return kvm->arch.nested_guests[lpid];
kvm               759 arch/powerpc/kvm/book3s_hv_nested.c void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp,
kvm               791 arch/powerpc/kvm/book3s_hv_nested.c static void kvmhv_update_nest_rmap_rc(struct kvm *kvm, u64 n_rmap,
kvm               802 arch/powerpc/kvm/book3s_hv_nested.c 	gp = kvmhv_find_nested(kvm, lpid);
kvm               816 arch/powerpc/kvm/book3s_hv_nested.c 		kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid);
kvm               824 arch/powerpc/kvm/book3s_hv_nested.c void kvmhv_update_nest_rmap_rc_list(struct kvm *kvm, unsigned long *rmapp,
kvm               839 arch/powerpc/kvm/book3s_hv_nested.c 		kvmhv_update_nest_rmap_rc(kvm, rmap, clr, set, hpa, mask);
kvm               842 arch/powerpc/kvm/book3s_hv_nested.c static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap,
kvm               852 arch/powerpc/kvm/book3s_hv_nested.c 	gp = kvmhv_find_nested(kvm, lpid);
kvm               860 arch/powerpc/kvm/book3s_hv_nested.c 		kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
kvm               863 arch/powerpc/kvm/book3s_hv_nested.c static void kvmhv_remove_nest_rmap_list(struct kvm *kvm, unsigned long *rmapp,
kvm               871 arch/powerpc/kvm/book3s_hv_nested.c 		kvmhv_remove_nest_rmap(kvm, rmap, hpa, mask);
kvm               877 arch/powerpc/kvm/book3s_hv_nested.c void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
kvm               895 arch/powerpc/kvm/book3s_hv_nested.c 		kvmhv_remove_nest_rmap_list(kvm, rmap, hpa, addr_mask);
kvm               918 arch/powerpc/kvm/book3s_hv_nested.c 	struct kvm *kvm = vcpu->kvm;
kvm               923 arch/powerpc/kvm/book3s_hv_nested.c 	spin_lock(&kvm->mmu_lock);
kvm               928 arch/powerpc/kvm/book3s_hv_nested.c 		kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
kvm               931 arch/powerpc/kvm/book3s_hv_nested.c 	spin_unlock(&kvm->mmu_lock);
kvm               976 arch/powerpc/kvm/book3s_hv_nested.c 	struct kvm *kvm = vcpu->kvm;
kvm               991 arch/powerpc/kvm/book3s_hv_nested.c 	gp = kvmhv_get_nested(kvm, lpid, false);
kvm              1012 arch/powerpc/kvm/book3s_hv_nested.c 	struct kvm *kvm = vcpu->kvm;
kvm              1018 arch/powerpc/kvm/book3s_hv_nested.c 		spin_lock(&kvm->mmu_lock);
kvm              1019 arch/powerpc/kvm/book3s_hv_nested.c 		kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable,
kvm              1022 arch/powerpc/kvm/book3s_hv_nested.c 		spin_unlock(&kvm->mmu_lock);
kvm              1042 arch/powerpc/kvm/book3s_hv_nested.c 	struct kvm *kvm = vcpu->kvm;
kvm              1046 arch/powerpc/kvm/book3s_hv_nested.c 	spin_lock(&kvm->mmu_lock);
kvm              1047 arch/powerpc/kvm/book3s_hv_nested.c 	for (i = 0; i <= kvm->arch.max_nested_lpid; i++) {
kvm              1048 arch/powerpc/kvm/book3s_hv_nested.c 		gp = kvm->arch.nested_guests[i];
kvm              1050 arch/powerpc/kvm/book3s_hv_nested.c 			spin_unlock(&kvm->mmu_lock);
kvm              1052 arch/powerpc/kvm/book3s_hv_nested.c 			spin_lock(&kvm->mmu_lock);
kvm              1055 arch/powerpc/kvm/book3s_hv_nested.c 	spin_unlock(&kvm->mmu_lock);
kvm              1061 arch/powerpc/kvm/book3s_hv_nested.c 	struct kvm *kvm = vcpu->kvm;
kvm              1098 arch/powerpc/kvm/book3s_hv_nested.c 		gp = kvmhv_get_nested(kvm, lpid, false);
kvm              1201 arch/powerpc/kvm/book3s_hv_nested.c 	struct kvm *kvm = vcpu->kvm;
kvm              1213 arch/powerpc/kvm/book3s_hv_nested.c 	spin_lock(&kvm->mmu_lock);
kvm              1215 arch/powerpc/kvm/book3s_hv_nested.c 	ret = kvmppc_hv_handle_set_rc(kvm, kvm->arch.pgtable, writing,
kvm              1216 arch/powerpc/kvm/book3s_hv_nested.c 				     gpte.raddr, kvm->arch.lpid);
kvm              1223 arch/powerpc/kvm/book3s_hv_nested.c 	ret = kvmppc_hv_handle_set_rc(kvm, gp->shadow_pgtable, writing, n_gpa,
kvm              1231 arch/powerpc/kvm/book3s_hv_nested.c 	spin_unlock(&kvm->mmu_lock);
kvm              1264 arch/powerpc/kvm/book3s_hv_nested.c 	struct kvm *kvm = vcpu->kvm;
kvm              1335 arch/powerpc/kvm/book3s_hv_nested.c 	memslot = gfn_to_memslot(kvm, gfn);
kvm              1359 arch/powerpc/kvm/book3s_hv_nested.c 	mmu_seq = kvm->mmu_notifier_seq;
kvm              1364 arch/powerpc/kvm/book3s_hv_nested.c 	spin_lock(&kvm->mmu_lock);
kvm              1365 arch/powerpc/kvm/book3s_hv_nested.c 	pte_p = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
kvm              1370 arch/powerpc/kvm/book3s_hv_nested.c 	spin_unlock(&kvm->mmu_lock);
kvm              1417 arch/powerpc/kvm/book3s_hv_nested.c 	ret = kvmppc_create_pte(kvm, gp->shadow_pgtable, pte, n_gpa, level,
kvm              1442 arch/powerpc/kvm/book3s_hv_nested.c int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid)
kvm              1446 arch/powerpc/kvm/book3s_hv_nested.c 	spin_lock(&kvm->mmu_lock);
kvm              1447 arch/powerpc/kvm/book3s_hv_nested.c 	while (++lpid <= kvm->arch.max_nested_lpid) {
kvm              1448 arch/powerpc/kvm/book3s_hv_nested.c 		if (kvm->arch.nested_guests[lpid]) {
kvm              1453 arch/powerpc/kvm/book3s_hv_nested.c 	spin_unlock(&kvm->mmu_lock);
kvm                86 arch/powerpc/kvm/book3s_hv_ras.c 			tlbiel_all_lpid(vcpu->kvm->arch.radix);
kvm               103 arch/powerpc/kvm/book3s_hv_ras.c 		tlbiel_all_lpid(vcpu->kvm->arch.radix);
kvm                43 arch/powerpc/kvm/book3s_hv_rm_mmu.c static int global_invalidates(struct kvm *kvm)
kvm                55 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcpu)
kvm                63 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		cpumask_setall(&kvm->arch.need_tlb_flush);
kvm                71 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		cpumask_clear_cpu(cpu, &kvm->arch.need_tlb_flush);
kvm                81 arch/powerpc/kvm/book3s_hv_rm_mmu.c void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
kvm                89 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		head = &kvm->arch.hpt.rev[i];
kvm                92 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		tail = &kvm->arch.hpt.rev[head->back];
kvm               122 arch/powerpc/kvm/book3s_hv_rm_mmu.c static void kvmppc_set_dirty_from_hpte(struct kvm *kvm,
kvm               131 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
kvm               137 arch/powerpc/kvm/book3s_hv_rm_mmu.c static unsigned long *revmap_for_hpte(struct kvm *kvm, unsigned long hpte_v,
kvm               147 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
kvm               160 arch/powerpc/kvm/book3s_hv_rm_mmu.c static void remove_revmap_chain(struct kvm *kvm, long pte_index,
kvm               173 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	rmap = revmap_for_hpte(kvm, hpte_v, ptel, &memslot, &gfn);
kvm               179 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	next = real_vmalloc_addr(&kvm->arch.hpt.rev[rev->forw]);
kvm               180 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	prev = real_vmalloc_addr(&kvm->arch.hpt.rev[rev->back]);
kvm               197 arch/powerpc/kvm/book3s_hv_rm_mmu.c long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
kvm               215 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	if (kvm_is_radix(kvm))
kvm               226 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	mmu_seq = kvm->mmu_notifier_seq;
kvm               232 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
kvm               317 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
kvm               321 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
kvm               352 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
kvm               369 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	rev = &kvm->arch.hpt.rev[pte_index];
kvm               374 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		note_hpte_modification(kvm, rev);
kvm               383 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		if (mmu_notifier_retry(kvm, mmu_seq)) {
kvm               390 arch/powerpc/kvm/book3s_hv_rm_mmu.c 			kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index,
kvm               418 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel,
kvm               467 arch/powerpc/kvm/book3s_hv_rm_mmu.c static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
kvm               482 arch/powerpc/kvm/book3s_hv_rm_mmu.c 				     "r" (rbvalues[i]), "r" (kvm->arch.lpid));
kvm               485 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		fixup_tlbie_lpid(rbvalues[i - 1], kvm->arch.lpid);
kvm               498 arch/powerpc/kvm/book3s_hv_rm_mmu.c long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
kvm               507 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	if (kvm_is_radix(kvm))
kvm               509 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
kvm               511 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
kvm               527 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
kvm               532 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		do_tlbies(kvm, &rb, 1, global_invalidates(kvm), true);
kvm               542 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		remove_revmap_chain(kvm, pte_index, rev, v,
kvm               546 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	note_hpte_modification(kvm, rev);
kvm               550 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		atomic64_inc(&kvm->arch.mmio_update);
kvm               563 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn,
kvm               569 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	struct kvm *kvm = vcpu->kvm;
kvm               580 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	if (kvm_is_radix(kvm))
kvm               582 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	global = global_invalidates(kvm);
kvm               597 arch/powerpc/kvm/book3s_hv_rm_mmu.c 			    pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt)) {
kvm               603 arch/powerpc/kvm/book3s_hv_rm_mmu.c 			hp = (__be64 *) (kvm->arch.hpt.virt + (pte_index << 4));
kvm               640 arch/powerpc/kvm/book3s_hv_rm_mmu.c 			rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
kvm               641 arch/powerpc/kvm/book3s_hv_rm_mmu.c 			note_hpte_modification(kvm, rev);
kvm               649 arch/powerpc/kvm/book3s_hv_rm_mmu.c 					atomic64_inc(&kvm->arch.mmio_update);
kvm               666 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		do_tlbies(kvm, tlbrb, n, global, true);
kvm               674 arch/powerpc/kvm/book3s_hv_rm_mmu.c 			remove_revmap_chain(kvm, pte_index, rev,
kvm               689 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	struct kvm *kvm = vcpu->kvm;
kvm               695 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	if (kvm_is_radix(kvm))
kvm               697 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
kvm               700 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
kvm               720 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
kvm               724 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		note_hpte_modification(kvm, rev);
kvm               742 arch/powerpc/kvm/book3s_hv_rm_mmu.c 			do_tlbies(kvm, &rb, 1, global_invalidates(kvm), true);
kvm               751 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		atomic64_inc(&kvm->arch.mmio_update);
kvm               759 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	struct kvm *kvm = vcpu->kvm;
kvm               765 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	if (kvm_is_radix(kvm))
kvm               767 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
kvm               773 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
kvm               775 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
kvm               799 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	struct kvm *kvm = vcpu->kvm;
kvm               806 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	if (kvm_is_radix(kvm))
kvm               808 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
kvm               811 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
kvm               812 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
kvm               823 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		note_hpte_modification(kvm, rev);
kvm               828 arch/powerpc/kvm/book3s_hv_rm_mmu.c 			kvmppc_clear_ref_hpte(kvm, hpte, pte_index);
kvm               829 arch/powerpc/kvm/book3s_hv_rm_mmu.c 			rmap = revmap_for_hpte(kvm, v, gr, NULL, NULL);
kvm               847 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	struct kvm *kvm = vcpu->kvm;
kvm               853 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	if (kvm_is_radix(kvm))
kvm               855 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
kvm               858 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
kvm               859 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
kvm               870 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		note_hpte_modification(kvm, rev);
kvm               875 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		kvmppc_invalidate_hpte(kvm, hpte, pte_index);
kvm               881 arch/powerpc/kvm/book3s_hv_rm_mmu.c 			kvmppc_set_dirty_from_hpte(kvm, v, gr);
kvm               895 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	struct kvm *kvm = vcpu->kvm;
kvm               903 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
kvm               937 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	struct kvm *kvm = vcpu->kvm;
kvm               943 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	mmu_seq = kvm->mmu_notifier_seq;
kvm               951 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	raw_spin_lock(&kvm->mmu_lock.rlock);
kvm               952 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	if (mmu_notifier_retry(kvm, mmu_seq)) {
kvm               963 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	raw_spin_unlock(&kvm->mmu_lock.rlock);
kvm               972 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	struct kvm *kvm = vcpu->kvm;
kvm               976 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	mmu_seq = kvm->mmu_notifier_seq;
kvm               987 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	raw_spin_lock(&kvm->mmu_lock.rlock);
kvm               988 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	if (mmu_notifier_retry(kvm, mmu_seq)) {
kvm               999 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	raw_spin_unlock(&kvm->mmu_lock.rlock);
kvm              1006 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	struct kvm *kvm = vcpu->kvm;
kvm              1011 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	if (kvm_is_radix(kvm))
kvm              1034 arch/powerpc/kvm/book3s_hv_rm_mmu.c void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
kvm              1048 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	do_tlbies(kvm, &rb, 1, 1, true);
kvm              1052 arch/powerpc/kvm/book3s_hv_rm_mmu.c void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
kvm              1069 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	do_tlbies(kvm, &rb, 1, 1, false);
kvm              1115 arch/powerpc/kvm/book3s_hv_rm_mmu.c long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
kvm              1144 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvmppc_hpt_mask(&kvm->arch.hpt);
kvm              1155 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		hpte = (__be64 *)(kvm->arch.hpt.virt + (hash << 7));
kvm              1191 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		hash = hash ^ kvmppc_hpt_mask(&kvm->arch.hpt);
kvm              1211 arch/powerpc/kvm/book3s_hv_rm_mmu.c 	struct kvm *kvm = vcpu->kvm;
kvm              1225 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		mmio_update = atomic64_read(&kvm->arch.mmio_update);
kvm              1234 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid);
kvm              1240 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		hpte = (__be64 *)(kvm->arch.hpt.virt + (index << 4));
kvm              1247 arch/powerpc/kvm/book3s_hv_rm_mmu.c 		rev = real_vmalloc_addr(&kvm->arch.hpt.rev[index]);
kvm               322 arch/powerpc/kvm/book3s_hv_rm_xics.c 		icp = kvmppc_xics_find_server(xics->kvm, state->server);
kvm               493 arch/powerpc/kvm/book3s_hv_rm_xics.c 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
kvm               532 arch/powerpc/kvm/book3s_hv_rm_xics.c 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
kvm               545 arch/powerpc/kvm/book3s_hv_rm_xics.c 		icp = kvmppc_xics_find_server(vcpu->kvm, server);
kvm               618 arch/powerpc/kvm/book3s_hv_rm_xics.c 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
kvm               679 arch/powerpc/kvm/book3s_hv_rm_xics.c 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
kvm               711 arch/powerpc/kvm/book3s_hv_rm_xics.c 	if (!hlist_empty(&vcpu->kvm->irq_ack_notifier_list)) {
kvm               737 arch/powerpc/kvm/book3s_hv_rm_xics.c 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
kvm               868 arch/powerpc/kvm/book3s_hv_rm_xics.c 	xics = vcpu->kvm->arch.xics;
kvm               392 arch/powerpc/kvm/book3s_pr.c static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start,
kvm               400 arch/powerpc/kvm/book3s_pr.c 	slots = kvm_memslots(kvm);
kvm               416 arch/powerpc/kvm/book3s_pr.c 		kvm_for_each_vcpu(i, vcpu, kvm)
kvm               422 arch/powerpc/kvm/book3s_pr.c static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start,
kvm               425 arch/powerpc/kvm/book3s_pr.c 	do_kvm_unmap_hva(kvm, start, end);
kvm               430 arch/powerpc/kvm/book3s_pr.c static int kvm_age_hva_pr(struct kvm *kvm, unsigned long start,
kvm               437 arch/powerpc/kvm/book3s_pr.c static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva)
kvm               443 arch/powerpc/kvm/book3s_pr.c static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte)
kvm               446 arch/powerpc/kvm/book3s_pr.c 	do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
kvm               632 arch/powerpc/kvm/book3s_pr.c 	hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
kvm               664 arch/powerpc/kvm/book3s_pr.c 	return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT);
kvm              1164 arch/powerpc/kvm/book3s_pr.c 			int idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm              1166 arch/powerpc/kvm/book3s_pr.c 			srcu_read_unlock(&vcpu->kvm->srcu, idx);
kvm              1214 arch/powerpc/kvm/book3s_pr.c 			int idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm              1216 arch/powerpc/kvm/book3s_pr.c 			srcu_read_unlock(&vcpu->kvm->srcu, idx);
kvm              1711 arch/powerpc/kvm/book3s_pr.c static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
kvm              1735 arch/powerpc/kvm/book3s_pr.c 	err = kvm_vcpu_init(vcpu, kvm, id);
kvm              1862 arch/powerpc/kvm/book3s_pr.c static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
kvm              1873 arch/powerpc/kvm/book3s_pr.c 	mutex_lock(&kvm->slots_lock);
kvm              1875 arch/powerpc/kvm/book3s_pr.c 	r = kvm_get_dirty_log(kvm, log, &is_dirty);
kvm              1881 arch/powerpc/kvm/book3s_pr.c 		slots = kvm_memslots(kvm);
kvm              1887 arch/powerpc/kvm/book3s_pr.c 		kvm_for_each_vcpu(n, vcpu, kvm)
kvm              1896 arch/powerpc/kvm/book3s_pr.c 	mutex_unlock(&kvm->slots_lock);
kvm              1900 arch/powerpc/kvm/book3s_pr.c static void kvmppc_core_flush_memslot_pr(struct kvm *kvm,
kvm              1906 arch/powerpc/kvm/book3s_pr.c static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm,
kvm              1913 arch/powerpc/kvm/book3s_pr.c static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm,
kvm              1936 arch/powerpc/kvm/book3s_pr.c static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
kvm              1963 arch/powerpc/kvm/book3s_pr.c 	vcpu = kvm_get_vcpu(kvm, 0);
kvm              1982 arch/powerpc/kvm/book3s_pr.c static int kvm_configure_mmu_pr(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg)
kvm              1993 arch/powerpc/kvm/book3s_pr.c static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
kvm              2004 arch/powerpc/kvm/book3s_pr.c static int kvmppc_core_init_vm_pr(struct kvm *kvm)
kvm              2006 arch/powerpc/kvm/book3s_pr.c 	mutex_init(&kvm->arch.hpt_mutex);
kvm              2010 arch/powerpc/kvm/book3s_pr.c 	kvmppc_pr_init_default_hcalls(kvm);
kvm              2022 arch/powerpc/kvm/book3s_pr.c static void kvmppc_core_destroy_vm_pr(struct kvm *kvm)
kvm              2025 arch/powerpc/kvm/book3s_pr.c 	WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
kvm                49 arch/powerpc/kvm/book3s_pr_papr.c 	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
kvm                80 arch/powerpc/kvm/book3s_pr_papr.c 	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
kvm                96 arch/powerpc/kvm/book3s_pr_papr.c 	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
kvm               121 arch/powerpc/kvm/book3s_pr_papr.c 	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
kvm               151 arch/powerpc/kvm/book3s_pr_papr.c 	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
kvm               208 arch/powerpc/kvm/book3s_pr_papr.c 	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
kvm               224 arch/powerpc/kvm/book3s_pr_papr.c 	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
kvm               256 arch/powerpc/kvm/book3s_pr_papr.c 	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
kvm               359 arch/powerpc/kvm/book3s_pr_papr.c 	    !test_bit(cmd/4, vcpu->kvm->arch.enabled_hcalls))
kvm               397 arch/powerpc/kvm/book3s_pr_papr.c 		if (list_empty(&vcpu->kvm->arch.rtas_tokens))
kvm               399 arch/powerpc/kvm/book3s_pr_papr.c 		idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm               401 arch/powerpc/kvm/book3s_pr_papr.c 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
kvm               461 arch/powerpc/kvm/book3s_pr_papr.c void kvmppc_pr_init_default_hcalls(struct kvm *kvm)
kvm               469 arch/powerpc/kvm/book3s_pr_papr.c 		__set_bit(hcall / 4, kvm->arch.enabled_hcalls);
kvm                34 arch/powerpc/kvm/book3s_rtas.c 		rc = kvmppc_xive_set_xive(vcpu->kvm, irq, server, priority);
kvm                36 arch/powerpc/kvm/book3s_rtas.c 		rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority);
kvm                57 arch/powerpc/kvm/book3s_rtas.c 		rc = kvmppc_xive_get_xive(vcpu->kvm, irq, &server, &priority);
kvm                59 arch/powerpc/kvm/book3s_rtas.c 		rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority);
kvm                84 arch/powerpc/kvm/book3s_rtas.c 		rc = kvmppc_xive_int_off(vcpu->kvm, irq);
kvm                86 arch/powerpc/kvm/book3s_rtas.c 		rc = kvmppc_xics_int_off(vcpu->kvm, irq);
kvm               106 arch/powerpc/kvm/book3s_rtas.c 		rc = kvmppc_xive_int_on(vcpu->kvm, irq);
kvm               108 arch/powerpc/kvm/book3s_rtas.c 		rc = kvmppc_xics_int_on(vcpu->kvm, irq);
kvm               142 arch/powerpc/kvm/book3s_rtas.c static int rtas_token_undefine(struct kvm *kvm, char *name)
kvm               146 arch/powerpc/kvm/book3s_rtas.c 	lockdep_assert_held(&kvm->arch.rtas_token_lock);
kvm               148 arch/powerpc/kvm/book3s_rtas.c 	list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) {
kvm               160 arch/powerpc/kvm/book3s_rtas.c static int rtas_token_define(struct kvm *kvm, char *name, u64 token)
kvm               167 arch/powerpc/kvm/book3s_rtas.c 	lockdep_assert_held(&kvm->arch.rtas_token_lock);
kvm               169 arch/powerpc/kvm/book3s_rtas.c 	list_for_each_entry(d, &kvm->arch.rtas_tokens, list) {
kvm               193 arch/powerpc/kvm/book3s_rtas.c 	list_add_tail(&d->list, &kvm->arch.rtas_tokens);
kvm               198 arch/powerpc/kvm/book3s_rtas.c int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp)
kvm               206 arch/powerpc/kvm/book3s_rtas.c 	mutex_lock(&kvm->arch.rtas_token_lock);
kvm               209 arch/powerpc/kvm/book3s_rtas.c 		rc = rtas_token_define(kvm, args.name, args.token);
kvm               211 arch/powerpc/kvm/book3s_rtas.c 		rc = rtas_token_undefine(kvm, args.name);
kvm               213 arch/powerpc/kvm/book3s_rtas.c 	mutex_unlock(&kvm->arch.rtas_token_lock);
kvm               232 arch/powerpc/kvm/book3s_rtas.c 	rc = kvm_read_guest(vcpu->kvm, args_phys, &args, sizeof(args));
kvm               245 arch/powerpc/kvm/book3s_rtas.c 	mutex_lock(&vcpu->kvm->arch.rtas_token_lock);
kvm               248 arch/powerpc/kvm/book3s_rtas.c 	list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) {
kvm               256 arch/powerpc/kvm/book3s_rtas.c 	mutex_unlock(&vcpu->kvm->arch.rtas_token_lock);
kvm               260 arch/powerpc/kvm/book3s_rtas.c 		rc = kvm_write_guest(vcpu->kvm, args_phys, &args, sizeof(args));
kvm               278 arch/powerpc/kvm/book3s_rtas.c void kvmppc_rtas_tokens_free(struct kvm *kvm)
kvm               282 arch/powerpc/kvm/book3s_rtas.c 	list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) {
kvm               166 arch/powerpc/kvm/book3s_xics.c int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server, u32 priority)
kvm               168 arch/powerpc/kvm/book3s_xics.c 	struct kvmppc_xics *xics = kvm->arch.xics;
kvm               182 arch/powerpc/kvm/book3s_xics.c 	icp = kvmppc_xics_find_server(kvm, server);
kvm               196 arch/powerpc/kvm/book3s_xics.c int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, u32 *priority)
kvm               198 arch/powerpc/kvm/book3s_xics.c 	struct kvmppc_xics *xics = kvm->arch.xics;
kvm               222 arch/powerpc/kvm/book3s_xics.c int kvmppc_xics_int_on(struct kvm *kvm, u32 irq)
kvm               224 arch/powerpc/kvm/book3s_xics.c 	struct kvmppc_xics *xics = kvm->arch.xics;
kvm               238 arch/powerpc/kvm/book3s_xics.c 	icp = kvmppc_xics_find_server(kvm, state->server);
kvm               249 arch/powerpc/kvm/book3s_xics.c int kvmppc_xics_int_off(struct kvm *kvm, u32 irq)
kvm               251 arch/powerpc/kvm/book3s_xics.c 	struct kvmppc_xics *xics = kvm->arch.xics;
kvm               416 arch/powerpc/kvm/book3s_xics.c 		icp = kvmppc_xics_find_server(xics->kvm, state->server);
kvm               623 arch/powerpc/kvm/book3s_xics.c 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
kvm               635 arch/powerpc/kvm/book3s_xics.c 		icp = kvmppc_xics_find_server(vcpu->kvm, server);
kvm               711 arch/powerpc/kvm/book3s_xics.c 		icp = kvmppc_xics_find_server(vcpu->kvm, server);
kvm               724 arch/powerpc/kvm/book3s_xics.c 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
kvm               779 arch/powerpc/kvm/book3s_xics.c 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
kvm               812 arch/powerpc/kvm/book3s_xics.c 	kvm_notify_acked_irq(vcpu->kvm, 0, irq);
kvm               819 arch/powerpc/kvm/book3s_xics.c 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
kvm               850 arch/powerpc/kvm/book3s_xics.c 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
kvm               866 arch/powerpc/kvm/book3s_xics.c 		kvm_notify_acked_irq(vcpu->kvm, 0, icp->rm_eoied_irq);
kvm               877 arch/powerpc/kvm/book3s_xics.c 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
kvm               898 arch/powerpc/kvm/book3s_xics.c 	if (xics->real_mode && is_kvmppc_hv_enabled(vcpu->kvm))
kvm               943 arch/powerpc/kvm/book3s_xics.c 	struct kvm *kvm = xics->kvm;
kvm               951 arch/powerpc/kvm/book3s_xics.c 	if (!kvm)
kvm               960 arch/powerpc/kvm/book3s_xics.c 	xics_debugfs_irqmap(m, kvm->arch.pimap);
kvm               964 arch/powerpc/kvm/book3s_xics.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
kvm              1034 arch/powerpc/kvm/book3s_xics.c static struct kvmppc_ics *kvmppc_xics_create_ics(struct kvm *kvm,
kvm              1042 arch/powerpc/kvm/book3s_xics.c 	mutex_lock(&kvm->lock);
kvm              1067 arch/powerpc/kvm/book3s_xics.c 	mutex_unlock(&kvm->lock);
kvm              1075 arch/powerpc/kvm/book3s_xics.c 	if (!vcpu->kvm->arch.xics)
kvm              1078 arch/powerpc/kvm/book3s_xics.c 	if (kvmppc_xics_find_server(vcpu->kvm, server_num))
kvm              1113 arch/powerpc/kvm/book3s_xics.c 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
kvm              1251 arch/powerpc/kvm/book3s_xics.c 		ics = kvmppc_xics_create_ics(xics->kvm, xics, irq);
kvm              1262 arch/powerpc/kvm/book3s_xics.c 	    kvmppc_xics_find_server(xics->kvm, server) == NULL)
kvm              1293 arch/powerpc/kvm/book3s_xics.c int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
kvm              1296 arch/powerpc/kvm/book3s_xics.c 	struct kvmppc_xics *xics = kvm->arch.xics;
kvm              1341 arch/powerpc/kvm/book3s_xics.c 	struct kvm *kvm = xics->kvm;
kvm              1345 arch/powerpc/kvm/book3s_xics.c 	if (kvm)
kvm              1346 arch/powerpc/kvm/book3s_xics.c 		kvm->arch.xics = NULL;
kvm              1357 arch/powerpc/kvm/book3s_xics.c 	struct kvm *kvm = dev->kvm;
kvm              1366 arch/powerpc/kvm/book3s_xics.c 	xics->kvm = kvm;
kvm              1369 arch/powerpc/kvm/book3s_xics.c 	if (kvm->arch.xics)
kvm              1372 arch/powerpc/kvm/book3s_xics.c 		kvm->arch.xics = xics;
kvm              1416 arch/powerpc/kvm/book3s_xics.c 	if (xics->kvm != vcpu->kvm)
kvm              1437 arch/powerpc/kvm/book3s_xics.c void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long irq,
kvm              1440 arch/powerpc/kvm/book3s_xics.c 	struct kvmppc_xics *xics = kvm->arch.xics;
kvm              1453 arch/powerpc/kvm/book3s_xics.c void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long irq,
kvm              1456 arch/powerpc/kvm/book3s_xics.c 	struct kvmppc_xics *xics = kvm->arch.xics;
kvm               104 arch/powerpc/kvm/book3s_xics.h 	struct kvm *kvm;
kvm               115 arch/powerpc/kvm/book3s_xics.h static inline struct kvmppc_icp *kvmppc_xics_find_server(struct kvm *kvm,
kvm               121 arch/powerpc/kvm/book3s_xics.h 	kvm_for_each_vcpu(i, vcpu, kvm) {
kvm               197 arch/powerpc/kvm/book3s_xive.c 				 vcpu->kvm->arch.lpid, xc->server_num);
kvm               200 arch/powerpc/kvm/book3s_xive.c 				 vcpu->kvm->arch.lpid, xc->server_num, prio);
kvm               281 arch/powerpc/kvm/book3s_xive.c static int xive_check_provisioning(struct kvm *kvm, u8 prio)
kvm               283 arch/powerpc/kvm/book3s_xive.c 	struct kvmppc_xive *xive = kvm->arch.xive;
kvm               296 arch/powerpc/kvm/book3s_xive.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
kvm               313 arch/powerpc/kvm/book3s_xive.c static void xive_inc_q_pending(struct kvm *kvm, u32 server, u8 prio)
kvm               320 arch/powerpc/kvm/book3s_xive.c 	vcpu = kvmppc_xive_find_server(kvm, server);
kvm               353 arch/powerpc/kvm/book3s_xive.c int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio)
kvm               359 arch/powerpc/kvm/book3s_xive.c 	vcpu = kvmppc_xive_find_server(kvm, *server);
kvm               375 arch/powerpc/kvm/book3s_xive.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
kvm               528 arch/powerpc/kvm/book3s_xive.c static int xive_target_interrupt(struct kvm *kvm,
kvm               532 arch/powerpc/kvm/book3s_xive.c 	struct kvmppc_xive *xive = kvm->arch.xive;
kvm               541 arch/powerpc/kvm/book3s_xive.c 	rc = kvmppc_xive_select_target(kvm, &server, prio);
kvm               556 arch/powerpc/kvm/book3s_xive.c 		xive_inc_q_pending(kvm,
kvm               613 arch/powerpc/kvm/book3s_xive.c int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
kvm               616 arch/powerpc/kvm/book3s_xive.c 	struct kvmppc_xive *xive = kvm->arch.xive;
kvm               632 arch/powerpc/kvm/book3s_xive.c 		rc = xive_check_provisioning(xive->kvm,
kvm               695 arch/powerpc/kvm/book3s_xive.c 		rc = xive_target_interrupt(kvm, state, server, new_act_prio);
kvm               714 arch/powerpc/kvm/book3s_xive.c int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
kvm               717 arch/powerpc/kvm/book3s_xive.c 	struct kvmppc_xive *xive = kvm->arch.xive;
kvm               737 arch/powerpc/kvm/book3s_xive.c int kvmppc_xive_int_on(struct kvm *kvm, u32 irq)
kvm               739 arch/powerpc/kvm/book3s_xive.c 	struct kvmppc_xive *xive = kvm->arch.xive;
kvm               776 arch/powerpc/kvm/book3s_xive.c int kvmppc_xive_int_off(struct kvm *kvm, u32 irq)
kvm               778 arch/powerpc/kvm/book3s_xive.c 	struct kvmppc_xive *xive = kvm->arch.xive;
kvm               840 arch/powerpc/kvm/book3s_xive.c 	struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
kvm               896 arch/powerpc/kvm/book3s_xive.c int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
kvm               899 arch/powerpc/kvm/book3s_xive.c 	struct kvmppc_xive *xive = kvm->arch.xive;
kvm               955 arch/powerpc/kvm/book3s_xive.c 		xive->ops->reset_mapped(kvm, guest_irq);
kvm               993 arch/powerpc/kvm/book3s_xive.c int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
kvm               996 arch/powerpc/kvm/book3s_xive.c 	struct kvmppc_xive *xive = kvm->arch.xive;
kvm              1047 arch/powerpc/kvm/book3s_xive.c 		xive->ops->reset_mapped(kvm, guest_irq);
kvm              1077 arch/powerpc/kvm/book3s_xive.c 	struct kvm *kvm = vcpu->kvm;
kvm              1078 arch/powerpc/kvm/book3s_xive.c 	struct kvmppc_xive *xive = kvm->arch.xive;
kvm              1153 arch/powerpc/kvm/book3s_xive.c 	struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
kvm              1228 arch/powerpc/kvm/book3s_xive.c 	if (xive->kvm != vcpu->kvm)
kvm              1232 arch/powerpc/kvm/book3s_xive.c 	if (cpu >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) {
kvm              1241 arch/powerpc/kvm/book3s_xive.c 	if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) {
kvm              1454 arch/powerpc/kvm/book3s_xive.c 	kvm_for_each_vcpu(i, vcpu, xive->kvm) {
kvm              1618 arch/powerpc/kvm/book3s_xive.c 	struct kvm *kvm = xive->kvm;
kvm              1622 arch/powerpc/kvm/book3s_xive.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
kvm              1723 arch/powerpc/kvm/book3s_xive.c 		rc = xive_check_provisioning(xive->kvm, act_prio);
kvm              1728 arch/powerpc/kvm/book3s_xive.c 			rc = xive_target_interrupt(xive->kvm, state,
kvm              1800 arch/powerpc/kvm/book3s_xive.c int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
kvm              1803 arch/powerpc/kvm/book3s_xive.c 	struct kvmppc_xive *xive = kvm->arch.xive;
kvm              1908 arch/powerpc/kvm/book3s_xive.c 	struct kvm *kvm = xive->kvm;
kvm              1928 arch/powerpc/kvm/book3s_xive.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
kvm              1948 arch/powerpc/kvm/book3s_xive.c 	kvm->arch.xive = NULL;
kvm              1980 arch/powerpc/kvm/book3s_xive.c struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type)
kvm              1983 arch/powerpc/kvm/book3s_xive.c 		&kvm->arch.xive_devices.native :
kvm              1984 arch/powerpc/kvm/book3s_xive.c 		&kvm->arch.xive_devices.xics_on_xive;
kvm              2003 arch/powerpc/kvm/book3s_xive.c 	struct kvm *kvm = dev->kvm;
kvm              2009 arch/powerpc/kvm/book3s_xive.c 	if (kvm->arch.xive)
kvm              2012 arch/powerpc/kvm/book3s_xive.c 	xive = kvmppc_xive_get_device(kvm, type);
kvm              2018 arch/powerpc/kvm/book3s_xive.c 	xive->kvm = kvm;
kvm              2040 arch/powerpc/kvm/book3s_xive.c 	kvm->arch.xive = xive;
kvm              2085 arch/powerpc/kvm/book3s_xive.c 	struct kvm *kvm = xive->kvm;
kvm              2099 arch/powerpc/kvm/book3s_xive.c 	if (!kvm)
kvm              2104 arch/powerpc/kvm/book3s_xive.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
kvm                97 arch/powerpc/kvm/book3s_xive.h 	int (*reset_mapped)(struct kvm *kvm, unsigned long guest_irq);
kvm               101 arch/powerpc/kvm/book3s_xive.h 	struct kvm *kvm;
kvm               193 arch/powerpc/kvm/book3s_xive.h static inline struct kvm_vcpu *kvmppc_xive_find_server(struct kvm *kvm, u32 nr)
kvm               198 arch/powerpc/kvm/book3s_xive.h 	kvm_for_each_vcpu(i, vcpu, kvm) {
kvm               220 arch/powerpc/kvm/book3s_xive.h 	return xive->vp_base + kvmppc_pack_vcpu_id(xive->kvm, server);
kvm               223 arch/powerpc/kvm/book3s_xive.h static inline bool kvmppc_xive_vp_in_use(struct kvm *kvm, u32 vp_id)
kvm               228 arch/powerpc/kvm/book3s_xive.h 	kvm_for_each_vcpu(i, vcpu, kvm) {
kvm               293 arch/powerpc/kvm/book3s_xive.h int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio);
kvm               296 arch/powerpc/kvm/book3s_xive.h struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type);
kvm               135 arch/powerpc/kvm/book3s_xive_native.c 	if (xive->kvm != vcpu->kvm)
kvm               139 arch/powerpc/kvm/book3s_xive_native.c 	if (server_num >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) {
kvm               147 arch/powerpc/kvm/book3s_xive_native.c 	if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) {
kvm               200 arch/powerpc/kvm/book3s_xive_native.c static int kvmppc_xive_native_reset_mapped(struct kvm *kvm, unsigned long irq)
kvm               202 arch/powerpc/kvm/book3s_xive_native.c 	struct kvmppc_xive *xive = kvm->arch.xive;
kvm               414 arch/powerpc/kvm/book3s_xive_native.c 	struct kvm *kvm = xive->kvm;
kvm               431 arch/powerpc/kvm/book3s_xive_native.c 		rc = kvmppc_xive_select_target(kvm, &server, priority);
kvm               554 arch/powerpc/kvm/book3s_xive_native.c 	struct kvm *kvm = xive->kvm;
kvm               580 arch/powerpc/kvm/book3s_xive_native.c 	vcpu = kvmppc_xive_find_server(kvm, server);
kvm               638 arch/powerpc/kvm/book3s_xive_native.c 	srcu_idx = srcu_read_lock(&kvm->srcu);
kvm               643 arch/powerpc/kvm/book3s_xive_native.c 		srcu_read_unlock(&kvm->srcu, srcu_idx);
kvm               648 arch/powerpc/kvm/book3s_xive_native.c 	page = gfn_to_page(kvm, gfn);
kvm               650 arch/powerpc/kvm/book3s_xive_native.c 		srcu_read_unlock(&kvm->srcu, srcu_idx);
kvm               656 arch/powerpc/kvm/book3s_xive_native.c 	srcu_read_unlock(&kvm->srcu, srcu_idx);
kvm               702 arch/powerpc/kvm/book3s_xive_native.c 	struct kvm *kvm = xive->kvm;
kvm               725 arch/powerpc/kvm/book3s_xive_native.c 	vcpu = kvmppc_xive_find_server(kvm, server);
kvm               799 arch/powerpc/kvm/book3s_xive_native.c 	struct kvm *kvm = xive->kvm;
kvm               807 arch/powerpc/kvm/book3s_xive_native.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
kvm               899 arch/powerpc/kvm/book3s_xive_native.c 		srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm               900 arch/powerpc/kvm/book3s_xive_native.c 		mark_page_dirty(vcpu->kvm, gpa_to_gfn(q->guest_qaddr));
kvm               901 arch/powerpc/kvm/book3s_xive_native.c 		srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
kvm               908 arch/powerpc/kvm/book3s_xive_native.c 	struct kvm *kvm = xive->kvm;
kvm               925 arch/powerpc/kvm/book3s_xive_native.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
kvm              1006 arch/powerpc/kvm/book3s_xive_native.c 	struct kvm *kvm = xive->kvm;
kvm              1035 arch/powerpc/kvm/book3s_xive_native.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
kvm              1055 arch/powerpc/kvm/book3s_xive_native.c 	kvm->arch.xive = NULL;
kvm              1083 arch/powerpc/kvm/book3s_xive_native.c 	struct kvm *kvm = dev->kvm;
kvm              1088 arch/powerpc/kvm/book3s_xive_native.c 	if (kvm->arch.xive)
kvm              1091 arch/powerpc/kvm/book3s_xive_native.c 	xive = kvmppc_xive_get_device(kvm, type);
kvm              1097 arch/powerpc/kvm/book3s_xive_native.c 	xive->kvm = kvm;
kvm              1118 arch/powerpc/kvm/book3s_xive_native.c 	kvm->arch.xive = xive;
kvm              1169 arch/powerpc/kvm/book3s_xive_native.c 	struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
kvm              1206 arch/powerpc/kvm/book3s_xive_native.c 	struct kvm *kvm = xive->kvm;
kvm              1210 arch/powerpc/kvm/book3s_xive_native.c 	if (!kvm)
kvm              1215 arch/powerpc/kvm/book3s_xive_native.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
kvm               332 arch/powerpc/kvm/book3s_xive_template.c 		vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
kvm               442 arch/powerpc/kvm/book3s_xive_template.c 	struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
kvm               500 arch/powerpc/kvm/book3s_xive_template.c 	struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
kvm               613 arch/powerpc/kvm/book3s_xive_template.c 	vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
kvm                38 arch/powerpc/kvm/booke.c #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
kvm              1265 arch/powerpc/kvm/booke.c 		idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm              1270 arch/powerpc/kvm/booke.c 		if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
kvm              1289 arch/powerpc/kvm/booke.c 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
kvm              1313 arch/powerpc/kvm/booke.c 		idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm              1318 arch/powerpc/kvm/booke.c 		if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
kvm              1331 arch/powerpc/kvm/booke.c 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
kvm              1623 arch/powerpc/kvm/booke.c 	ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
kvm              1646 arch/powerpc/kvm/booke.c 	ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
kvm              1702 arch/powerpc/kvm/booke.c 		r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
kvm              1771 arch/powerpc/kvm/booke.c 		r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
kvm              1799 arch/powerpc/kvm/booke.c int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
kvm              1804 arch/powerpc/kvm/booke.c void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
kvm              1809 arch/powerpc/kvm/booke.c int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
kvm              1815 arch/powerpc/kvm/booke.c int kvmppc_core_prepare_memory_region(struct kvm *kvm,
kvm              1822 arch/powerpc/kvm/booke.c void kvmppc_core_commit_memory_region(struct kvm *kvm,
kvm              1830 arch/powerpc/kvm/booke.c void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
kvm              2109 arch/powerpc/kvm/booke.c 	vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
kvm              2112 arch/powerpc/kvm/booke.c int kvmppc_core_init_vm(struct kvm *kvm)
kvm              2114 arch/powerpc/kvm/booke.c 	return kvm->arch.kvm_ops->init_vm(kvm);
kvm              2117 arch/powerpc/kvm/booke.c struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
kvm              2119 arch/powerpc/kvm/booke.c 	return kvm->arch.kvm_ops->vcpu_create(kvm, id);
kvm              2124 arch/powerpc/kvm/booke.c 	vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
kvm              2127 arch/powerpc/kvm/booke.c void kvmppc_core_destroy_vm(struct kvm *kvm)
kvm              2129 arch/powerpc/kvm/booke.c 	kvm->arch.kvm_ops->destroy_vm(kvm);
kvm              2134 arch/powerpc/kvm/booke.c 	vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
kvm              2139 arch/powerpc/kvm/booke.c 	vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
kvm               436 arch/powerpc/kvm/e500.c static struct kvm_vcpu *kvmppc_core_vcpu_create_e500(struct kvm *kvm,
kvm               453 arch/powerpc/kvm/e500.c 	err = kvm_vcpu_init(vcpu, kvm, id);
kvm               497 arch/powerpc/kvm/e500.c static int kvmppc_core_init_vm_e500(struct kvm *kvm)
kvm               502 arch/powerpc/kvm/e500.c static void kvmppc_core_destroy_vm_e500(struct kvm *kvm)
kvm               268 arch/powerpc/kvm/e500.h 	if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
kvm               307 arch/powerpc/kvm/e500.h 	return get_thread_specific_lpid(vcpu->kvm->arch.lpid);
kvm                74 arch/powerpc/kvm/e500_emulate.c 	kvm_for_each_vcpu(i, cvcpu, vcpu->kvm) {
kvm               431 arch/powerpc/kvm/e500_mmu.c 	idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm               447 arch/powerpc/kvm/e500_mmu.c 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
kvm               131 arch/powerpc/kvm/e500_mmu_host.c 		__write_host_tlbe(stlbe, mas0, vcpu_e500->vcpu.kvm->arch.lpid);
kvm               136 arch/powerpc/kvm/e500_mmu_host.c 				  vcpu_e500->vcpu.kvm->arch.lpid);
kvm               334 arch/powerpc/kvm/e500_mmu_host.c 	struct kvm *kvm = vcpu_e500->vcpu.kvm;
kvm               342 arch/powerpc/kvm/e500_mmu_host.c 	mmu_seq = kvm->mmu_notifier_seq;
kvm               353 arch/powerpc/kvm/e500_mmu_host.c 	slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
kvm               462 arch/powerpc/kvm/e500_mmu_host.c 	spin_lock(&kvm->mmu_lock);
kvm               463 arch/powerpc/kvm/e500_mmu_host.c 	if (mmu_notifier_retry(kvm, mmu_seq)) {
kvm               502 arch/powerpc/kvm/e500_mmu_host.c 	spin_unlock(&kvm->mmu_lock);
kvm               724 arch/powerpc/kvm/e500_mmu_host.c static int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
kvm               732 arch/powerpc/kvm/e500_mmu_host.c 	kvm_flush_remote_tlbs(kvm);
kvm               737 arch/powerpc/kvm/e500_mmu_host.c int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
kvm               740 arch/powerpc/kvm/e500_mmu_host.c 	kvm_unmap_hva(kvm, start);
kvm               745 arch/powerpc/kvm/e500_mmu_host.c int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
kvm               751 arch/powerpc/kvm/e500_mmu_host.c int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
kvm               757 arch/powerpc/kvm/e500_mmu_host.c int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
kvm               760 arch/powerpc/kvm/e500_mmu_host.c 	kvm_unmap_hva(kvm, hva);
kvm               304 arch/powerpc/kvm/e500mc.c static struct kvm_vcpu *kvmppc_core_vcpu_create_e500mc(struct kvm *kvm,
kvm               321 arch/powerpc/kvm/e500mc.c 	err = kvm_vcpu_init(vcpu, kvm, id);
kvm               358 arch/powerpc/kvm/e500mc.c static int kvmppc_core_init_vm_e500mc(struct kvm *kvm)
kvm               374 arch/powerpc/kvm/e500mc.c 	kvm->arch.lpid = lpid;
kvm               378 arch/powerpc/kvm/e500mc.c static void kvmppc_core_destroy_vm_e500mc(struct kvm *kvm)
kvm               380 arch/powerpc/kvm/e500mc.c 	int lpid = kvm->arch.lpid;
kvm               115 arch/powerpc/kvm/emulate.c 		emulated = vcpu->kvm->arch.kvm_ops->emulate_mtspr(vcpu, sprn,
kvm               176 arch/powerpc/kvm/emulate.c 		emulated = vcpu->kvm->arch.kvm_ops->emulate_mfspr(vcpu, sprn,
kvm               288 arch/powerpc/kvm/emulate.c 		emulated = vcpu->kvm->arch.kvm_ops->emulate_op(run, vcpu, inst,
kvm               246 arch/powerpc/kvm/emulate_loadstore.c 			if (vcpu->kvm->arch.kvm_ops->giveup_ext)
kvm               247 arch/powerpc/kvm/emulate_loadstore.c 				vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
kvm               270 arch/powerpc/kvm/emulate_loadstore.c 			if (vcpu->kvm->arch.kvm_ops->giveup_ext)
kvm               271 arch/powerpc/kvm/emulate_loadstore.c 				vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
kvm               315 arch/powerpc/kvm/emulate_loadstore.c 			if (vcpu->kvm->arch.kvm_ops->giveup_ext)
kvm               316 arch/powerpc/kvm/emulate_loadstore.c 				vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
kvm                 7 arch/powerpc/kvm/irq.h static inline int irqchip_in_kernel(struct kvm *kvm)
kvm                12 arch/powerpc/kvm/irq.h 	ret = ret || (kvm->arch.mpic != NULL);
kvm                15 arch/powerpc/kvm/irq.h 	ret = ret || (kvm->arch.xics != NULL);
kvm                16 arch/powerpc/kvm/irq.h 	ret = ret || (kvm->arch.xive != NULL);
kvm               192 arch/powerpc/kvm/mpic.c 	struct kvm *kvm;
kvm              1107 arch/powerpc/kvm/mpic.c 		kvm_notify_acked_irq(opp->kvm, 0, notify_eoi);
kvm              1450 arch/powerpc/kvm/mpic.c 	kvm_io_bus_register_dev(opp->kvm, KVM_MMIO_BUS,
kvm              1457 arch/powerpc/kvm/mpic.c 	kvm_io_bus_unregister_dev(opp->kvm, KVM_MMIO_BUS, &opp->mmio);
kvm              1476 arch/powerpc/kvm/mpic.c 	mutex_lock(&opp->kvm->slots_lock);
kvm              1490 arch/powerpc/kvm/mpic.c 	mutex_unlock(&opp->kvm->slots_lock);
kvm              1568 arch/powerpc/kvm/mpic.c 			mutex_lock(&opp->kvm->slots_lock);
kvm              1570 arch/powerpc/kvm/mpic.c 			mutex_unlock(&opp->kvm->slots_lock);
kvm              1636 arch/powerpc/kvm/mpic.c 	dev->kvm->arch.mpic = NULL;
kvm              1650 arch/powerpc/kvm/mpic.c 	kvm_set_irq_routing(opp->kvm, routing, 0, 0);
kvm              1662 arch/powerpc/kvm/mpic.c 	if (dev->kvm->arch.mpic)
kvm              1670 arch/powerpc/kvm/mpic.c 	opp->kvm = dev->kvm;
kvm              1715 arch/powerpc/kvm/mpic.c 	dev->kvm->arch.mpic = opp;
kvm              1741 arch/powerpc/kvm/mpic.c 	if (opp->kvm != vcpu->kvm)
kvm              1792 arch/powerpc/kvm/mpic.c 			struct kvm *kvm, int irq_source_id, int level,
kvm              1796 arch/powerpc/kvm/mpic.c 	struct openpic *opp = kvm->arch.mpic;
kvm              1808 arch/powerpc/kvm/mpic.c 		struct kvm *kvm, int irq_source_id, int level, bool line_status)
kvm              1810 arch/powerpc/kvm/mpic.c 	struct openpic *opp = kvm->arch.mpic;
kvm              1819 arch/powerpc/kvm/mpic.c 	openpic_msi_write(kvm->arch.mpic, MSIIR_OFFSET, e->msi.data);
kvm              1826 arch/powerpc/kvm/mpic.c int kvm_set_routing_entry(struct kvm *kvm,
kvm               265 arch/powerpc/kvm/powerpc.c 	if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
kvm               332 arch/powerpc/kvm/powerpc.c 	if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr)
kvm               333 arch/powerpc/kvm/powerpc.c 		r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr,
kvm               359 arch/powerpc/kvm/powerpc.c 	if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
kvm               375 arch/powerpc/kvm/powerpc.c 	if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr)
kvm               376 arch/powerpc/kvm/powerpc.c 		rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr,
kvm               405 arch/powerpc/kvm/powerpc.c 	if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size))
kvm               427 arch/powerpc/kvm/powerpc.c int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm               454 arch/powerpc/kvm/powerpc.c 	kvm->arch.kvm_ops = kvm_ops;
kvm               455 arch/powerpc/kvm/powerpc.c 	return kvmppc_core_init_vm(kvm);
kvm               460 arch/powerpc/kvm/powerpc.c void kvm_arch_destroy_vm(struct kvm *kvm)
kvm               471 arch/powerpc/kvm/powerpc.c 	if (is_kvmppc_hv_enabled(kvm))
kvm               475 arch/powerpc/kvm/powerpc.c 	kvm_for_each_vcpu(i, vcpu, kvm)
kvm               478 arch/powerpc/kvm/powerpc.c 	mutex_lock(&kvm->lock);
kvm               479 arch/powerpc/kvm/powerpc.c 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
kvm               480 arch/powerpc/kvm/powerpc.c 		kvm->vcpus[i] = NULL;
kvm               482 arch/powerpc/kvm/powerpc.c 	atomic_set(&kvm->online_vcpus, 0);
kvm               484 arch/powerpc/kvm/powerpc.c 	kvmppc_core_destroy_vm(kvm);
kvm               486 arch/powerpc/kvm/powerpc.c 	mutex_unlock(&kvm->lock);
kvm               489 arch/powerpc/kvm/powerpc.c 	module_put(kvm->arch.kvm_ops->owner);
kvm               492 arch/powerpc/kvm/powerpc.c int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
kvm               498 arch/powerpc/kvm/powerpc.c 	if (kvm) {
kvm               503 arch/powerpc/kvm/powerpc.c 		hv_enabled = is_kvmppc_hv_enabled(kvm);
kvm               576 arch/powerpc/kvm/powerpc.c 		if (kvm) {
kvm               577 arch/powerpc/kvm/powerpc.c 			if (kvm->arch.emul_smt_mode > 1)
kvm               578 arch/powerpc/kvm/powerpc.c 				r = kvm->arch.emul_smt_mode;
kvm               580 arch/powerpc/kvm/powerpc.c 				r = kvm->arch.smt_mode;
kvm               684 arch/powerpc/kvm/powerpc.c void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
kvm               687 arch/powerpc/kvm/powerpc.c 	kvmppc_core_free_memslot(kvm, free, dont);
kvm               690 arch/powerpc/kvm/powerpc.c int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
kvm               693 arch/powerpc/kvm/powerpc.c 	return kvmppc_core_create_memslot(kvm, slot, npages);
kvm               696 arch/powerpc/kvm/powerpc.c int kvm_arch_prepare_memory_region(struct kvm *kvm,
kvm               701 arch/powerpc/kvm/powerpc.c 	return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
kvm               704 arch/powerpc/kvm/powerpc.c void kvm_arch_commit_memory_region(struct kvm *kvm,
kvm               710 arch/powerpc/kvm/powerpc.c 	kvmppc_core_commit_memory_region(kvm, mem, old, new, change);
kvm               713 arch/powerpc/kvm/powerpc.c void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
kvm               716 arch/powerpc/kvm/powerpc.c 	kvmppc_core_flush_memslot(kvm, slot);
kvm               719 arch/powerpc/kvm/powerpc.c struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
kvm               722 arch/powerpc/kvm/powerpc.c 	vcpu = kvmppc_core_vcpu_create(kvm, id);
kvm               840 arch/powerpc/kvm/powerpc.c 	struct kvm *kvm = irqfd->kvm;
kvm               842 arch/powerpc/kvm/powerpc.c 	if (kvm->arch.kvm_ops->irq_bypass_add_producer)
kvm               843 arch/powerpc/kvm/powerpc.c 		return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
kvm               853 arch/powerpc/kvm/powerpc.c 	struct kvm *kvm = irqfd->kvm;
kvm               855 arch/powerpc/kvm/powerpc.c 	if (kvm->arch.kvm_ops->irq_bypass_del_producer)
kvm               856 arch/powerpc/kvm/powerpc.c 		kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
kvm              1161 arch/powerpc/kvm/powerpc.c 		if (vcpu->kvm->arch.kvm_ops->giveup_ext)
kvm              1162 arch/powerpc/kvm/powerpc.c 			vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
kvm              1177 arch/powerpc/kvm/powerpc.c 		if (vcpu->kvm->arch.kvm_ops->giveup_ext)
kvm              1178 arch/powerpc/kvm/powerpc.c 			vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX);
kvm              1194 arch/powerpc/kvm/powerpc.c 		if (vcpu->kvm->arch.kvm_ops->giveup_ext)
kvm              1195 arch/powerpc/kvm/powerpc.c 			vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC);
kvm              1251 arch/powerpc/kvm/powerpc.c 	idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm              1256 arch/powerpc/kvm/powerpc.c 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
kvm              1355 arch/powerpc/kvm/powerpc.c 	idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm              1360 arch/powerpc/kvm/powerpc.c 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
kvm              1969 arch/powerpc/kvm/powerpc.c 		if (!is_kvmppc_hv_enabled(vcpu->kvm))
kvm              1972 arch/powerpc/kvm/powerpc.c 		vcpu->kvm->arch.fwnmi_enabled = true;
kvm              1986 arch/powerpc/kvm/powerpc.c bool kvm_arch_intc_initialized(struct kvm *kvm)
kvm              1989 arch/powerpc/kvm/powerpc.c 	if (kvm->arch.mpic)
kvm              1993 arch/powerpc/kvm/powerpc.c 	if (kvm->arch.xics || kvm->arch.xive)
kvm              2120 arch/powerpc/kvm/powerpc.c int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
kvm              2123 arch/powerpc/kvm/powerpc.c 	if (!irqchip_in_kernel(kvm))
kvm              2126 arch/powerpc/kvm/powerpc.c 	irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
kvm              2133 arch/powerpc/kvm/powerpc.c int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
kvm              2150 arch/powerpc/kvm/powerpc.c 		if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
kvm              2153 arch/powerpc/kvm/powerpc.c 			set_bit(hcall / 4, kvm->arch.enabled_hcalls);
kvm              2155 arch/powerpc/kvm/powerpc.c 			clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
kvm              2164 arch/powerpc/kvm/powerpc.c 		if (kvm->arch.kvm_ops->set_smt_mode)
kvm              2165 arch/powerpc/kvm/powerpc.c 			r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
kvm              2171 arch/powerpc/kvm/powerpc.c 		if (!is_kvmppc_hv_enabled(kvm) ||
kvm              2172 arch/powerpc/kvm/powerpc.c 		    !kvm->arch.kvm_ops->enable_nested)
kvm              2174 arch/powerpc/kvm/powerpc.c 		r = kvm->arch.kvm_ops->enable_nested(kvm);
kvm              2316 arch/powerpc/kvm/powerpc.c 	struct kvm *kvm __maybe_unused = filp->private_data;
kvm              2343 arch/powerpc/kvm/powerpc.c 		r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
kvm              2360 arch/powerpc/kvm/powerpc.c 		r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
kvm              2367 arch/powerpc/kvm/powerpc.c 		struct kvm *kvm = filp->private_data;
kvm              2370 arch/powerpc/kvm/powerpc.c 		r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
kvm              2376 arch/powerpc/kvm/powerpc.c 		struct kvm *kvm = filp->private_data;
kvm              2378 arch/powerpc/kvm/powerpc.c 		r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
kvm              2382 arch/powerpc/kvm/powerpc.c 		struct kvm *kvm = filp->private_data;
kvm              2386 arch/powerpc/kvm/powerpc.c 		if (!kvm->arch.kvm_ops->configure_mmu)
kvm              2391 arch/powerpc/kvm/powerpc.c 		r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg);
kvm              2395 arch/powerpc/kvm/powerpc.c 		struct kvm *kvm = filp->private_data;
kvm              2399 arch/powerpc/kvm/powerpc.c 		if (!kvm->arch.kvm_ops->get_rmmu_info)
kvm              2401 arch/powerpc/kvm/powerpc.c 		r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info);
kvm              2415 arch/powerpc/kvm/powerpc.c 		struct kvm *kvm = filp->private_data;
kvm              2416 arch/powerpc/kvm/powerpc.c 		r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
kvm                 8 arch/powerpc/kvm/trace.h #define TRACE_SYSTEM kvm
kvm               817 arch/s390/include/asm/kvm_host.h 	struct kvm *kvm;				/* 0x0920 */
kvm               904 arch/s390/include/asm/kvm_host.h void kvm_arch_crypto_clear_masks(struct kvm *kvm);
kvm               905 arch/s390/include/asm/kvm_host.h void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
kvm               911 arch/s390/include/asm/kvm_host.h extern int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc);
kvm               912 arch/s390/include/asm/kvm_host.h extern int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc);
kvm               915 arch/s390/include/asm/kvm_host.h static inline void kvm_arch_sync_events(struct kvm *kvm) {}
kvm               918 arch/s390/include/asm/kvm_host.h static inline void kvm_arch_free_memslot(struct kvm *kvm,
kvm               920 arch/s390/include/asm/kvm_host.h static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
kvm               921 arch/s390/include/asm/kvm_host.h static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
kvm               922 arch/s390/include/asm/kvm_host.h static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
kvm               106 arch/s390/kvm/diag.c 		if (kvm_is_error_gpa(vcpu->kvm, parm.token_addr))
kvm               166 arch/s390/kvm/diag.c 	tcpu = kvm_get_vcpu_by_id(vcpu->kvm, tid);
kvm               190 arch/s390/kvm/diag.c 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
kvm               208 arch/s390/kvm/diag.c 	if (!vcpu->kvm->arch.css_support ||
kvm               269 arch/s390/kvm/gaccess.c 		read_lock(&vcpu->kvm->arch.sca_lock);
kvm               270 arch/s390/kvm/gaccess.c 		rc = kvm_s390_get_ipte_control(vcpu->kvm)->kh != 0;
kvm               271 arch/s390/kvm/gaccess.c 		read_unlock(&vcpu->kvm->arch.sca_lock);
kvm               274 arch/s390/kvm/gaccess.c 	return vcpu->kvm->arch.ipte_lock_count != 0;
kvm               281 arch/s390/kvm/gaccess.c 	mutex_lock(&vcpu->kvm->arch.ipte_mutex);
kvm               282 arch/s390/kvm/gaccess.c 	vcpu->kvm->arch.ipte_lock_count++;
kvm               283 arch/s390/kvm/gaccess.c 	if (vcpu->kvm->arch.ipte_lock_count > 1)
kvm               286 arch/s390/kvm/gaccess.c 	read_lock(&vcpu->kvm->arch.sca_lock);
kvm               287 arch/s390/kvm/gaccess.c 	ic = kvm_s390_get_ipte_control(vcpu->kvm);
kvm               291 arch/s390/kvm/gaccess.c 			read_unlock(&vcpu->kvm->arch.sca_lock);
kvm               298 arch/s390/kvm/gaccess.c 	read_unlock(&vcpu->kvm->arch.sca_lock);
kvm               300 arch/s390/kvm/gaccess.c 	mutex_unlock(&vcpu->kvm->arch.ipte_mutex);
kvm               307 arch/s390/kvm/gaccess.c 	mutex_lock(&vcpu->kvm->arch.ipte_mutex);
kvm               308 arch/s390/kvm/gaccess.c 	vcpu->kvm->arch.ipte_lock_count--;
kvm               309 arch/s390/kvm/gaccess.c 	if (vcpu->kvm->arch.ipte_lock_count)
kvm               311 arch/s390/kvm/gaccess.c 	read_lock(&vcpu->kvm->arch.sca_lock);
kvm               312 arch/s390/kvm/gaccess.c 	ic = kvm_s390_get_ipte_control(vcpu->kvm);
kvm               318 arch/s390/kvm/gaccess.c 	read_unlock(&vcpu->kvm->arch.sca_lock);
kvm               319 arch/s390/kvm/gaccess.c 	wake_up(&vcpu->kvm->arch.ipte_wq);
kvm               321 arch/s390/kvm/gaccess.c 	mutex_unlock(&vcpu->kvm->arch.ipte_mutex);
kvm               329 arch/s390/kvm/gaccess.c 	read_lock(&vcpu->kvm->arch.sca_lock);
kvm               330 arch/s390/kvm/gaccess.c 	ic = kvm_s390_get_ipte_control(vcpu->kvm);
kvm               334 arch/s390/kvm/gaccess.c 			read_unlock(&vcpu->kvm->arch.sca_lock);
kvm               342 arch/s390/kvm/gaccess.c 	read_unlock(&vcpu->kvm->arch.sca_lock);
kvm               349 arch/s390/kvm/gaccess.c 	read_lock(&vcpu->kvm->arch.sca_lock);
kvm               350 arch/s390/kvm/gaccess.c 	ic = kvm_s390_get_ipte_control(vcpu->kvm);
kvm               358 arch/s390/kvm/gaccess.c 	read_unlock(&vcpu->kvm->arch.sca_lock);
kvm               360 arch/s390/kvm/gaccess.c 		wake_up(&vcpu->kvm->arch.ipte_wq);
kvm               588 arch/s390/kvm/gaccess.c static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
kvm               590 arch/s390/kvm/gaccess.c 	return kvm_read_guest(kvm, gpa, val, sizeof(*val));
kvm               628 arch/s390/kvm/gaccess.c 	edat1 = ctlreg0.edat && test_kvm_facility(vcpu->kvm, 8);
kvm               629 arch/s390/kvm/gaccess.c 	edat2 = edat1 && test_kvm_facility(vcpu->kvm, 78);
kvm               630 arch/s390/kvm/gaccess.c 	iep = ctlreg0.iep && test_kvm_facility(vcpu->kvm, 130);
kvm               666 arch/s390/kvm/gaccess.c 		if (kvm_is_error_gpa(vcpu->kvm, ptr))
kvm               668 arch/s390/kvm/gaccess.c 		if (deref_table(vcpu->kvm, ptr, &rfte.val))
kvm               684 arch/s390/kvm/gaccess.c 		if (kvm_is_error_gpa(vcpu->kvm, ptr))
kvm               686 arch/s390/kvm/gaccess.c 		if (deref_table(vcpu->kvm, ptr, &rste.val))
kvm               702 arch/s390/kvm/gaccess.c 		if (kvm_is_error_gpa(vcpu->kvm, ptr))
kvm               704 arch/s390/kvm/gaccess.c 		if (deref_table(vcpu->kvm, ptr, &rtte.val))
kvm               730 arch/s390/kvm/gaccess.c 		if (kvm_is_error_gpa(vcpu->kvm, ptr))
kvm               732 arch/s390/kvm/gaccess.c 		if (deref_table(vcpu->kvm, ptr, &ste.val))
kvm               750 arch/s390/kvm/gaccess.c 	if (kvm_is_error_gpa(vcpu->kvm, ptr))
kvm               752 arch/s390/kvm/gaccess.c 	if (deref_table(vcpu->kvm, ptr, &pte.val))
kvm               772 arch/s390/kvm/gaccess.c 	if (kvm_is_error_gpa(vcpu->kvm, raddr.addr))
kvm               818 arch/s390/kvm/gaccess.c 			if (kvm_is_error_gpa(vcpu->kvm, *pages))
kvm               861 arch/s390/kvm/gaccess.c 			rc = kvm_write_guest(vcpu->kvm, gpa, data, _len);
kvm               863 arch/s390/kvm/gaccess.c 			rc = kvm_read_guest(vcpu->kvm, gpa, data, _len);
kvm               928 arch/s390/kvm/gaccess.c 		if (kvm_is_error_gpa(vcpu->kvm, *gpa))
kvm               100 arch/s390/kvm/gaccess.h 	kvm_write_guest(__vcpu->kvm, __gpa, &__x, sizeof(__x));	\
kvm               126 arch/s390/kvm/gaccess.h 	return kvm_write_guest(vcpu->kvm, gpa, data, len);
kvm               152 arch/s390/kvm/gaccess.h 	return kvm_read_guest(vcpu->kvm, gpa, data, len);
kvm               285 arch/s390/kvm/gaccess.h 	return kvm_write_guest(vcpu->kvm, gpa, data, len);
kvm               308 arch/s390/kvm/gaccess.h 	return kvm_read_guest(vcpu->kvm, gpa, data, len);
kvm                82 arch/s390/kvm/intercept.c 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
kvm                94 arch/s390/kvm/intercept.c 		  current->pid, vcpu->kvm);
kvm               367 arch/s390/kvm/intercept.c 	if (!test_kvm_facility(vcpu->kvm, 74))
kvm               423 arch/s390/kvm/intercept.c 	if (vcpu->arch.sie_block->ipa == 0 && vcpu->kvm->arch.user_instr0)
kvm               451 arch/s390/kvm/intercept.c 	if (kvm_is_ucontrol(vcpu->kvm))
kvm                50 arch/s390/kvm/interrupt.c 	read_lock(&vcpu->kvm->arch.sca_lock);
kvm                51 arch/s390/kvm/interrupt.c 	if (vcpu->kvm->arch.use_esca) {
kvm                52 arch/s390/kvm/interrupt.c 		struct esca_block *sca = vcpu->kvm->arch.sca;
kvm                59 arch/s390/kvm/interrupt.c 		struct bsca_block *sca = vcpu->kvm->arch.sca;
kvm                66 arch/s390/kvm/interrupt.c 	read_unlock(&vcpu->kvm->arch.sca_lock);
kvm                79 arch/s390/kvm/interrupt.c 	read_lock(&vcpu->kvm->arch.sca_lock);
kvm                80 arch/s390/kvm/interrupt.c 	if (vcpu->kvm->arch.use_esca) {
kvm                81 arch/s390/kvm/interrupt.c 		struct esca_block *sca = vcpu->kvm->arch.sca;
kvm                93 arch/s390/kvm/interrupt.c 		struct bsca_block *sca = vcpu->kvm->arch.sca;
kvm               105 arch/s390/kvm/interrupt.c 	read_unlock(&vcpu->kvm->arch.sca_lock);
kvm               122 arch/s390/kvm/interrupt.c 	read_lock(&vcpu->kvm->arch.sca_lock);
kvm               123 arch/s390/kvm/interrupt.c 	if (vcpu->kvm->arch.use_esca) {
kvm               124 arch/s390/kvm/interrupt.c 		struct esca_block *sca = vcpu->kvm->arch.sca;
kvm               132 arch/s390/kvm/interrupt.c 		struct bsca_block *sca = vcpu->kvm->arch.sca;
kvm               140 arch/s390/kvm/interrupt.c 	read_unlock(&vcpu->kvm->arch.sca_lock);
kvm               179 arch/s390/kvm/interrupt.c 	const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
kvm               327 arch/s390/kvm/interrupt.c 	return vcpu->kvm->arch.float_int.pending_irqs |
kvm               333 arch/s390/kvm/interrupt.c 	struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
kvm               395 arch/s390/kvm/interrupt.c 	   (vcpu->kvm->arch.float_int.mchk.cr14 |
kvm               411 arch/s390/kvm/interrupt.c 	set_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
kvm               417 arch/s390/kvm/interrupt.c 	clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
kvm               568 arch/s390/kvm/interrupt.c 	if (test_kvm_facility(vcpu->kvm, 133)) {
kvm               588 arch/s390/kvm/interrupt.c 	if (!rc && mci.vr && ext_sa_addr && test_kvm_facility(vcpu->kvm, 129)) {
kvm               595 arch/s390/kvm/interrupt.c 	if (!rc && mci.gs && ext_sa_addr && test_kvm_facility(vcpu->kvm, 133)
kvm               647 arch/s390/kvm/interrupt.c 	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
kvm               907 arch/s390/kvm/interrupt.c 	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
kvm               941 arch/s390/kvm/interrupt.c 	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
kvm               983 arch/s390/kvm/interrupt.c 	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
kvm              1049 arch/s390/kvm/interrupt.c 	struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
kvm              1055 arch/s390/kvm/interrupt.c 	fi = &vcpu->kvm->arch.float_int;
kvm              1150 arch/s390/kvm/interrupt.c 	const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
kvm              1182 arch/s390/kvm/interrupt.c 	struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
kvm              1216 arch/s390/kvm/interrupt.c 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
kvm              1219 arch/s390/kvm/interrupt.c 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm              1421 arch/s390/kvm/interrupt.c 	if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL)
kvm              1505 arch/s390/kvm/interrupt.c 	if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL)
kvm              1574 arch/s390/kvm/interrupt.c static struct kvm_s390_interrupt_info *get_io_int(struct kvm *kvm,
kvm              1577 arch/s390/kvm/interrupt.c 	struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
kvm              1600 arch/s390/kvm/interrupt.c static struct kvm_s390_interrupt_info *get_top_io_int(struct kvm *kvm,
kvm              1608 arch/s390/kvm/interrupt.c 			inti = get_io_int(kvm, isc, schid);
kvm              1613 arch/s390/kvm/interrupt.c static int get_top_gisa_isc(struct kvm *kvm, u64 isc_mask, u32 schid)
kvm              1615 arch/s390/kvm/interrupt.c 	struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
kvm              1647 arch/s390/kvm/interrupt.c struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
kvm              1650 arch/s390/kvm/interrupt.c 	struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
kvm              1654 arch/s390/kvm/interrupt.c 	inti = get_top_io_int(kvm, isc_mask, schid);
kvm              1656 arch/s390/kvm/interrupt.c 	isc = get_top_gisa_isc(kvm, isc_mask, schid);
kvm              1677 arch/s390/kvm/interrupt.c 			kvm_s390_reinject_io_int(kvm, inti);
kvm              1688 arch/s390/kvm/interrupt.c static int __inject_service(struct kvm *kvm,
kvm              1691 arch/s390/kvm/interrupt.c 	struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
kvm              1693 arch/s390/kvm/interrupt.c 	kvm->stat.inject_service_signal++;
kvm              1714 arch/s390/kvm/interrupt.c static int __inject_virtio(struct kvm *kvm,
kvm              1717 arch/s390/kvm/interrupt.c 	struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
kvm              1719 arch/s390/kvm/interrupt.c 	kvm->stat.inject_virtio++;
kvm              1732 arch/s390/kvm/interrupt.c static int __inject_pfault_done(struct kvm *kvm,
kvm              1735 arch/s390/kvm/interrupt.c 	struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
kvm              1737 arch/s390/kvm/interrupt.c 	kvm->stat.inject_pfault_done++;
kvm              1752 arch/s390/kvm/interrupt.c static int __inject_float_mchk(struct kvm *kvm,
kvm              1755 arch/s390/kvm/interrupt.c 	struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
kvm              1757 arch/s390/kvm/interrupt.c 	kvm->stat.inject_float_mchk++;
kvm              1767 arch/s390/kvm/interrupt.c static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
kvm              1769 arch/s390/kvm/interrupt.c 	struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
kvm              1774 arch/s390/kvm/interrupt.c 	kvm->stat.inject_io++;
kvm              1778 arch/s390/kvm/interrupt.c 		VM_EVENT(kvm, 4, "%s isc %1u", "inject: I/O (AI/gisa)", isc);
kvm              1784 arch/s390/kvm/interrupt.c 	fi = &kvm->arch.float_int;
kvm              1793 arch/s390/kvm/interrupt.c 		VM_EVENT(kvm, 4, "%s", "inject: I/O (AI)");
kvm              1795 arch/s390/kvm/interrupt.c 		VM_EVENT(kvm, 4, "inject: I/O %x ss %x schid %04x",
kvm              1809 arch/s390/kvm/interrupt.c static void __floating_irq_kick(struct kvm *kvm, u64 type)
kvm              1814 arch/s390/kvm/interrupt.c 	online_vcpus = atomic_read(&kvm->online_vcpus);
kvm              1819 arch/s390/kvm/interrupt.c 	sigcpu = find_first_bit(kvm->arch.idle_mask, online_vcpus);
kvm              1822 arch/s390/kvm/interrupt.c 			sigcpu = kvm->arch.float_int.next_rr_cpu++;
kvm              1823 arch/s390/kvm/interrupt.c 			kvm->arch.float_int.next_rr_cpu %= online_vcpus;
kvm              1827 arch/s390/kvm/interrupt.c 		} while (is_vcpu_stopped(kvm_get_vcpu(kvm, sigcpu)));
kvm              1829 arch/s390/kvm/interrupt.c 	dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
kvm              1838 arch/s390/kvm/interrupt.c 		      kvm->arch.gisa_int.origin))
kvm              1848 arch/s390/kvm/interrupt.c static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
kvm              1855 arch/s390/kvm/interrupt.c 		rc = __inject_float_mchk(kvm, inti);
kvm              1858 arch/s390/kvm/interrupt.c 		rc = __inject_virtio(kvm, inti);
kvm              1861 arch/s390/kvm/interrupt.c 		rc = __inject_service(kvm, inti);
kvm              1864 arch/s390/kvm/interrupt.c 		rc = __inject_pfault_done(kvm, inti);
kvm              1867 arch/s390/kvm/interrupt.c 		rc = __inject_io(kvm, inti);
kvm              1875 arch/s390/kvm/interrupt.c 	__floating_irq_kick(kvm, type);
kvm              1879 arch/s390/kvm/interrupt.c int kvm_s390_inject_vm(struct kvm *kvm,
kvm              1892 arch/s390/kvm/interrupt.c 		VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
kvm              1898 arch/s390/kvm/interrupt.c 		VM_EVENT(kvm, 4, "inject: sclp parm:%x", s390int->parm);
kvm              1905 arch/s390/kvm/interrupt.c 		VM_EVENT(kvm, 3, "inject: machine check mcic 0x%llx",
kvm              1923 arch/s390/kvm/interrupt.c 	rc = __inject_vm(kvm, inti);
kvm              1929 arch/s390/kvm/interrupt.c int kvm_s390_reinject_io_int(struct kvm *kvm,
kvm              1932 arch/s390/kvm/interrupt.c 	return __inject_vm(kvm, inti);
kvm              2079 arch/s390/kvm/interrupt.c void kvm_s390_clear_float_irqs(struct kvm *kvm)
kvm              2081 arch/s390/kvm/interrupt.c 	struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
kvm              2093 arch/s390/kvm/interrupt.c 	kvm_s390_gisa_clear(kvm);
kvm              2096 arch/s390/kvm/interrupt.c static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
kvm              2098 arch/s390/kvm/interrupt.c 	struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
kvm              2137 arch/s390/kvm/interrupt.c 	fi = &kvm->arch.float_int;
kvm              2185 arch/s390/kvm/interrupt.c static int flic_ais_mode_get_all(struct kvm *kvm, struct kvm_device_attr *attr)
kvm              2187 arch/s390/kvm/interrupt.c 	struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
kvm              2193 arch/s390/kvm/interrupt.c 	if (!test_kvm_facility(kvm, 72))
kvm              2213 arch/s390/kvm/interrupt.c 		r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr,
kvm              2217 arch/s390/kvm/interrupt.c 		r = flic_ais_mode_get_all(dev->kvm, attr);
kvm              2288 arch/s390/kvm/interrupt.c 		r = __inject_vm(dev->kvm, inti);
kvm              2300 arch/s390/kvm/interrupt.c static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
kvm              2305 arch/s390/kvm/interrupt.c 	return kvm->arch.adapters[id];
kvm              2324 arch/s390/kvm/interrupt.c 	if (dev->kvm->arch.adapters[adapter_info.id] != NULL)
kvm              2341 arch/s390/kvm/interrupt.c 	dev->kvm->arch.adapters[adapter->id] = adapter;
kvm              2346 arch/s390/kvm/interrupt.c int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked)
kvm              2349 arch/s390/kvm/interrupt.c 	struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
kvm              2358 arch/s390/kvm/interrupt.c static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
kvm              2360 arch/s390/kvm/interrupt.c 	struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
kvm              2374 arch/s390/kvm/interrupt.c 	map->addr = gmap_translate(kvm->arch.gmap, addr);
kvm              2398 arch/s390/kvm/interrupt.c static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr)
kvm              2400 arch/s390/kvm/interrupt.c 	struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
kvm              2423 arch/s390/kvm/interrupt.c void kvm_s390_destroy_adapters(struct kvm *kvm)
kvm              2429 arch/s390/kvm/interrupt.c 		if (!kvm->arch.adapters[i])
kvm              2432 arch/s390/kvm/interrupt.c 					 &kvm->arch.adapters[i]->maps, list) {
kvm              2437 arch/s390/kvm/interrupt.c 		kfree(kvm->arch.adapters[i]);
kvm              2451 arch/s390/kvm/interrupt.c 	adapter = get_io_adapter(dev->kvm, req.id);
kvm              2456 arch/s390/kvm/interrupt.c 		ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask);
kvm              2461 arch/s390/kvm/interrupt.c 		ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr);
kvm              2464 arch/s390/kvm/interrupt.c 		ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr);
kvm              2473 arch/s390/kvm/interrupt.c static int clear_io_irq(struct kvm *kvm, struct kvm_device_attr *attr)
kvm              2487 arch/s390/kvm/interrupt.c 	kfree(kvm_s390_get_io_int(kvm, isc_mask, schid));
kvm              2496 arch/s390/kvm/interrupt.c static int modify_ais_mode(struct kvm *kvm, struct kvm_device_attr *attr)
kvm              2498 arch/s390/kvm/interrupt.c 	struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
kvm              2502 arch/s390/kvm/interrupt.c 	if (!test_kvm_facility(kvm, 72))
kvm              2535 arch/s390/kvm/interrupt.c static int kvm_s390_inject_airq(struct kvm *kvm,
kvm              2538 arch/s390/kvm/interrupt.c 	struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
kvm              2546 arch/s390/kvm/interrupt.c 	if (!test_kvm_facility(kvm, 72) || !adapter->suppressible)
kvm              2547 arch/s390/kvm/interrupt.c 		return kvm_s390_inject_vm(kvm, &s390int);
kvm              2555 arch/s390/kvm/interrupt.c 	ret = kvm_s390_inject_vm(kvm, &s390int);
kvm              2566 arch/s390/kvm/interrupt.c static int flic_inject_airq(struct kvm *kvm, struct kvm_device_attr *attr)
kvm              2569 arch/s390/kvm/interrupt.c 	struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
kvm              2574 arch/s390/kvm/interrupt.c 	return kvm_s390_inject_airq(kvm, adapter);
kvm              2577 arch/s390/kvm/interrupt.c static int flic_ais_mode_set_all(struct kvm *kvm, struct kvm_device_attr *attr)
kvm              2579 arch/s390/kvm/interrupt.c 	struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
kvm              2582 arch/s390/kvm/interrupt.c 	if (!test_kvm_facility(kvm, 72))
kvm              2607 arch/s390/kvm/interrupt.c 		kvm_s390_clear_float_irqs(dev->kvm);
kvm              2610 arch/s390/kvm/interrupt.c 		dev->kvm->arch.gmap->pfault_enabled = 1;
kvm              2613 arch/s390/kvm/interrupt.c 		dev->kvm->arch.gmap->pfault_enabled = 0;
kvm              2619 arch/s390/kvm/interrupt.c 		synchronize_srcu(&dev->kvm->srcu);
kvm              2620 arch/s390/kvm/interrupt.c 		kvm_for_each_vcpu(i, vcpu, dev->kvm)
kvm              2630 arch/s390/kvm/interrupt.c 		r = clear_io_irq(dev->kvm, attr);
kvm              2633 arch/s390/kvm/interrupt.c 		r = modify_ais_mode(dev->kvm, attr);
kvm              2636 arch/s390/kvm/interrupt.c 		r = flic_inject_airq(dev->kvm, attr);
kvm              2639 arch/s390/kvm/interrupt.c 		r = flic_ais_mode_set_all(dev->kvm, attr);
kvm              2672 arch/s390/kvm/interrupt.c 	if (dev->kvm->arch.flic)
kvm              2674 arch/s390/kvm/interrupt.c 	dev->kvm->arch.flic = dev;
kvm              2680 arch/s390/kvm/interrupt.c 	dev->kvm->arch.flic = NULL;
kvm              2718 arch/s390/kvm/interrupt.c static int adapter_indicators_set(struct kvm *kvm,
kvm              2733 arch/s390/kvm/interrupt.c 	idx = srcu_read_lock(&kvm->srcu);
kvm              2734 arch/s390/kvm/interrupt.c 	mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
kvm              2738 arch/s390/kvm/interrupt.c 		srcu_read_unlock(&kvm->srcu, idx);
kvm              2745 arch/s390/kvm/interrupt.c 	mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
kvm              2747 arch/s390/kvm/interrupt.c 	srcu_read_unlock(&kvm->srcu, idx);
kvm              2757 arch/s390/kvm/interrupt.c 			   struct kvm *kvm, int irq_source_id, int level,
kvm              2766 arch/s390/kvm/interrupt.c 	adapter = get_io_adapter(kvm, e->adapter.adapter_id);
kvm              2770 arch/s390/kvm/interrupt.c 	ret = adapter_indicators_set(kvm, adapter, &e->adapter);
kvm              2773 arch/s390/kvm/interrupt.c 		ret = kvm_s390_inject_airq(kvm, adapter);
kvm              2809 arch/s390/kvm/interrupt.c 		rc = __inject_vm(vcpu->kvm, &inti);
kvm              2818 arch/s390/kvm/interrupt.c int kvm_set_routing_entry(struct kvm *kvm,
kvm              2841 arch/s390/kvm/interrupt.c int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
kvm              2985 arch/s390/kvm/interrupt.c static void __airqs_kick_single_vcpu(struct kvm *kvm, u8 deliverable_mask)
kvm              2987 arch/s390/kvm/interrupt.c 	int vcpu_id, online_vcpus = atomic_read(&kvm->online_vcpus);
kvm              2988 arch/s390/kvm/interrupt.c 	struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
kvm              2991 arch/s390/kvm/interrupt.c 	for_each_set_bit(vcpu_id, kvm->arch.idle_mask, online_vcpus) {
kvm              2992 arch/s390/kvm/interrupt.c 		vcpu = kvm_get_vcpu(kvm, vcpu_id);
kvm              3010 arch/s390/kvm/interrupt.c 	struct kvm *kvm =
kvm              3011 arch/s390/kvm/interrupt.c 		container_of(gi->origin, struct sie_page2, gisa)->kvm;
kvm              3016 arch/s390/kvm/interrupt.c 		__airqs_kick_single_vcpu(kvm, pending_mask);
kvm              3032 arch/s390/kvm/interrupt.c 	struct kvm *kvm;
kvm              3062 arch/s390/kvm/interrupt.c 			kvm = container_of(gisa, struct sie_page2, gisa)->kvm;
kvm              3063 arch/s390/kvm/interrupt.c 			gi = &kvm->arch.gisa_int;
kvm              3072 arch/s390/kvm/interrupt.c void kvm_s390_gisa_clear(struct kvm *kvm)
kvm              3074 arch/s390/kvm/interrupt.c 	struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
kvm              3079 arch/s390/kvm/interrupt.c 	VM_EVENT(kvm, 3, "gisa 0x%pK cleared", gi->origin);
kvm              3082 arch/s390/kvm/interrupt.c void kvm_s390_gisa_init(struct kvm *kvm)
kvm              3084 arch/s390/kvm/interrupt.c 	struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
kvm              3088 arch/s390/kvm/interrupt.c 	gi->origin = &kvm->arch.sie_page2->gisa;
kvm              3096 arch/s390/kvm/interrupt.c 	VM_EVENT(kvm, 3, "gisa 0x%pK initialized", gi->origin);
kvm              3099 arch/s390/kvm/interrupt.c void kvm_s390_gisa_destroy(struct kvm *kvm)
kvm              3101 arch/s390/kvm/interrupt.c 	struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
kvm              3107 arch/s390/kvm/interrupt.c 			  kvm, gi->alert.mask);
kvm              3131 arch/s390/kvm/interrupt.c int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc)
kvm              3133 arch/s390/kvm/interrupt.c 	struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
kvm              3170 arch/s390/kvm/interrupt.c int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc)
kvm              3172 arch/s390/kvm/interrupt.c 	struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
kvm                14 arch/s390/kvm/irq.h static inline int irqchip_in_kernel(struct kvm *kvm)
kvm                60 arch/s390/kvm/kvm-s390.c #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
kvm               269 arch/s390/kvm/kvm-s390.c 	struct kvm *kvm;
kvm               274 arch/s390/kvm/kvm-s390.c 	list_for_each_entry(kvm, &vm_list, vm_list) {
kvm               275 arch/s390/kvm/kvm-s390.c 		kvm_for_each_vcpu(i, vcpu, kvm) {
kvm               278 arch/s390/kvm/kvm-s390.c 				kvm->arch.epoch = vcpu->arch.sie_block->epoch;
kvm               279 arch/s390/kvm/kvm-s390.c 				kvm->arch.epdx = vcpu->arch.sie_block->epdx;
kvm               504 arch/s390/kvm/kvm-s390.c int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
kvm               539 arch/s390/kvm/kvm-s390.c 		if (hpage && !kvm_is_ucontrol(kvm))
kvm               575 arch/s390/kvm/kvm-s390.c static void kvm_s390_sync_dirty_log(struct kvm *kvm,
kvm               581 arch/s390/kvm/kvm-s390.c 	struct gmap *gmap = kvm->arch.gmap;
kvm               597 arch/s390/kvm/kvm-s390.c 				mark_page_dirty(kvm, cur_gfn + i);
kvm               612 arch/s390/kvm/kvm-s390.c int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
kvm               621 arch/s390/kvm/kvm-s390.c 	if (kvm_is_ucontrol(kvm))
kvm               624 arch/s390/kvm/kvm-s390.c 	mutex_lock(&kvm->slots_lock);
kvm               630 arch/s390/kvm/kvm-s390.c 	slots = kvm_memslots(kvm);
kvm               636 arch/s390/kvm/kvm-s390.c 	kvm_s390_sync_dirty_log(kvm, memslot);
kvm               637 arch/s390/kvm/kvm-s390.c 	r = kvm_get_dirty_log(kvm, log, &is_dirty);
kvm               648 arch/s390/kvm/kvm-s390.c 	mutex_unlock(&kvm->slots_lock);
kvm               652 arch/s390/kvm/kvm-s390.c static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
kvm               657 arch/s390/kvm/kvm-s390.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
kvm               662 arch/s390/kvm/kvm-s390.c int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
kvm               671 arch/s390/kvm/kvm-s390.c 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
kvm               672 arch/s390/kvm/kvm-s390.c 		kvm->arch.use_irqchip = 1;
kvm               676 arch/s390/kvm/kvm-s390.c 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
kvm               677 arch/s390/kvm/kvm-s390.c 		kvm->arch.user_sigp = 1;
kvm               681 arch/s390/kvm/kvm-s390.c 		mutex_lock(&kvm->lock);
kvm               682 arch/s390/kvm/kvm-s390.c 		if (kvm->created_vcpus) {
kvm               685 arch/s390/kvm/kvm-s390.c 			set_kvm_facility(kvm->arch.model.fac_mask, 129);
kvm               686 arch/s390/kvm/kvm-s390.c 			set_kvm_facility(kvm->arch.model.fac_list, 129);
kvm               688 arch/s390/kvm/kvm-s390.c 				set_kvm_facility(kvm->arch.model.fac_mask, 134);
kvm               689 arch/s390/kvm/kvm-s390.c 				set_kvm_facility(kvm->arch.model.fac_list, 134);
kvm               692 arch/s390/kvm/kvm-s390.c 				set_kvm_facility(kvm->arch.model.fac_mask, 135);
kvm               693 arch/s390/kvm/kvm-s390.c 				set_kvm_facility(kvm->arch.model.fac_list, 135);
kvm               696 arch/s390/kvm/kvm-s390.c 				set_kvm_facility(kvm->arch.model.fac_mask, 148);
kvm               697 arch/s390/kvm/kvm-s390.c 				set_kvm_facility(kvm->arch.model.fac_list, 148);
kvm               700 arch/s390/kvm/kvm-s390.c 				set_kvm_facility(kvm->arch.model.fac_mask, 152);
kvm               701 arch/s390/kvm/kvm-s390.c 				set_kvm_facility(kvm->arch.model.fac_list, 152);
kvm               706 arch/s390/kvm/kvm-s390.c 		mutex_unlock(&kvm->lock);
kvm               707 arch/s390/kvm/kvm-s390.c 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
kvm               712 arch/s390/kvm/kvm-s390.c 		mutex_lock(&kvm->lock);
kvm               713 arch/s390/kvm/kvm-s390.c 		if (kvm->created_vcpus) {
kvm               716 arch/s390/kvm/kvm-s390.c 			set_kvm_facility(kvm->arch.model.fac_mask, 64);
kvm               717 arch/s390/kvm/kvm-s390.c 			set_kvm_facility(kvm->arch.model.fac_list, 64);
kvm               720 arch/s390/kvm/kvm-s390.c 		mutex_unlock(&kvm->lock);
kvm               721 arch/s390/kvm/kvm-s390.c 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
kvm               725 arch/s390/kvm/kvm-s390.c 		mutex_lock(&kvm->lock);
kvm               726 arch/s390/kvm/kvm-s390.c 		if (kvm->created_vcpus) {
kvm               729 arch/s390/kvm/kvm-s390.c 			set_kvm_facility(kvm->arch.model.fac_mask, 72);
kvm               730 arch/s390/kvm/kvm-s390.c 			set_kvm_facility(kvm->arch.model.fac_list, 72);
kvm               733 arch/s390/kvm/kvm-s390.c 		mutex_unlock(&kvm->lock);
kvm               734 arch/s390/kvm/kvm-s390.c 		VM_EVENT(kvm, 3, "ENABLE: AIS %s",
kvm               739 arch/s390/kvm/kvm-s390.c 		mutex_lock(&kvm->lock);
kvm               740 arch/s390/kvm/kvm-s390.c 		if (kvm->created_vcpus) {
kvm               743 arch/s390/kvm/kvm-s390.c 			set_kvm_facility(kvm->arch.model.fac_mask, 133);
kvm               744 arch/s390/kvm/kvm-s390.c 			set_kvm_facility(kvm->arch.model.fac_list, 133);
kvm               747 arch/s390/kvm/kvm-s390.c 		mutex_unlock(&kvm->lock);
kvm               748 arch/s390/kvm/kvm-s390.c 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
kvm               752 arch/s390/kvm/kvm-s390.c 		mutex_lock(&kvm->lock);
kvm               753 arch/s390/kvm/kvm-s390.c 		if (kvm->created_vcpus)
kvm               755 arch/s390/kvm/kvm-s390.c 		else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
kvm               759 arch/s390/kvm/kvm-s390.c 			down_write(&kvm->mm->mmap_sem);
kvm               760 arch/s390/kvm/kvm-s390.c 			kvm->mm->context.allow_gmap_hpage_1m = 1;
kvm               761 arch/s390/kvm/kvm-s390.c 			up_write(&kvm->mm->mmap_sem);
kvm               767 arch/s390/kvm/kvm-s390.c 			kvm->arch.use_skf = 0;
kvm               768 arch/s390/kvm/kvm-s390.c 			kvm->arch.use_pfmfi = 0;
kvm               770 arch/s390/kvm/kvm-s390.c 		mutex_unlock(&kvm->lock);
kvm               771 arch/s390/kvm/kvm-s390.c 		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
kvm               775 arch/s390/kvm/kvm-s390.c 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
kvm               776 arch/s390/kvm/kvm-s390.c 		kvm->arch.user_stsi = 1;
kvm               780 arch/s390/kvm/kvm-s390.c 		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
kvm               781 arch/s390/kvm/kvm-s390.c 		kvm->arch.user_instr0 = 1;
kvm               782 arch/s390/kvm/kvm-s390.c 		icpt_operexc_on_all_vcpus(kvm);
kvm               792 arch/s390/kvm/kvm-s390.c static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
kvm               799 arch/s390/kvm/kvm-s390.c 		VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
kvm               800 arch/s390/kvm/kvm-s390.c 			 kvm->arch.mem_limit);
kvm               801 arch/s390/kvm/kvm-s390.c 		if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
kvm               811 arch/s390/kvm/kvm-s390.c static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
kvm               821 arch/s390/kvm/kvm-s390.c 		VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
kvm               822 arch/s390/kvm/kvm-s390.c 		mutex_lock(&kvm->lock);
kvm               823 arch/s390/kvm/kvm-s390.c 		if (kvm->created_vcpus)
kvm               825 arch/s390/kvm/kvm-s390.c 		else if (kvm->mm->context.allow_gmap_hpage_1m)
kvm               828 arch/s390/kvm/kvm-s390.c 			kvm->arch.use_cmma = 1;
kvm               830 arch/s390/kvm/kvm-s390.c 			kvm->arch.use_pfmfi = 0;
kvm               833 arch/s390/kvm/kvm-s390.c 		mutex_unlock(&kvm->lock);
kvm               840 arch/s390/kvm/kvm-s390.c 		if (!kvm->arch.use_cmma)
kvm               843 arch/s390/kvm/kvm-s390.c 		VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
kvm               844 arch/s390/kvm/kvm-s390.c 		mutex_lock(&kvm->lock);
kvm               845 arch/s390/kvm/kvm-s390.c 		idx = srcu_read_lock(&kvm->srcu);
kvm               846 arch/s390/kvm/kvm-s390.c 		s390_reset_cmma(kvm->arch.gmap->mm);
kvm               847 arch/s390/kvm/kvm-s390.c 		srcu_read_unlock(&kvm->srcu, idx);
kvm               848 arch/s390/kvm/kvm-s390.c 		mutex_unlock(&kvm->lock);
kvm               854 arch/s390/kvm/kvm-s390.c 		if (kvm_is_ucontrol(kvm))
kvm               860 arch/s390/kvm/kvm-s390.c 		if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
kvm               861 arch/s390/kvm/kvm-s390.c 		    new_limit > kvm->arch.mem_limit)
kvm               872 arch/s390/kvm/kvm-s390.c 		mutex_lock(&kvm->lock);
kvm               873 arch/s390/kvm/kvm-s390.c 		if (!kvm->created_vcpus) {
kvm               880 arch/s390/kvm/kvm-s390.c 				gmap_remove(kvm->arch.gmap);
kvm               881 arch/s390/kvm/kvm-s390.c 				new->private = kvm;
kvm               882 arch/s390/kvm/kvm-s390.c 				kvm->arch.gmap = new;
kvm               886 arch/s390/kvm/kvm-s390.c 		mutex_unlock(&kvm->lock);
kvm               887 arch/s390/kvm/kvm-s390.c 		VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
kvm               888 arch/s390/kvm/kvm-s390.c 		VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
kvm               889 arch/s390/kvm/kvm-s390.c 			 (void *) kvm->arch.gmap->asce);
kvm               901 arch/s390/kvm/kvm-s390.c void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
kvm               906 arch/s390/kvm/kvm-s390.c 	kvm_s390_vcpu_block_all(kvm);
kvm               908 arch/s390/kvm/kvm-s390.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
kvm               914 arch/s390/kvm/kvm-s390.c 	kvm_s390_vcpu_unblock_all(kvm);
kvm               917 arch/s390/kvm/kvm-s390.c static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
kvm               919 arch/s390/kvm/kvm-s390.c 	mutex_lock(&kvm->lock);
kvm               922 arch/s390/kvm/kvm-s390.c 		if (!test_kvm_facility(kvm, 76)) {
kvm               923 arch/s390/kvm/kvm-s390.c 			mutex_unlock(&kvm->lock);
kvm               927 arch/s390/kvm/kvm-s390.c 			kvm->arch.crypto.crycb->aes_wrapping_key_mask,
kvm               928 arch/s390/kvm/kvm-s390.c 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
kvm               929 arch/s390/kvm/kvm-s390.c 		kvm->arch.crypto.aes_kw = 1;
kvm               930 arch/s390/kvm/kvm-s390.c 		VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
kvm               933 arch/s390/kvm/kvm-s390.c 		if (!test_kvm_facility(kvm, 76)) {
kvm               934 arch/s390/kvm/kvm-s390.c 			mutex_unlock(&kvm->lock);
kvm               938 arch/s390/kvm/kvm-s390.c 			kvm->arch.crypto.crycb->dea_wrapping_key_mask,
kvm               939 arch/s390/kvm/kvm-s390.c 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
kvm               940 arch/s390/kvm/kvm-s390.c 		kvm->arch.crypto.dea_kw = 1;
kvm               941 arch/s390/kvm/kvm-s390.c 		VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
kvm               944 arch/s390/kvm/kvm-s390.c 		if (!test_kvm_facility(kvm, 76)) {
kvm               945 arch/s390/kvm/kvm-s390.c 			mutex_unlock(&kvm->lock);
kvm               948 arch/s390/kvm/kvm-s390.c 		kvm->arch.crypto.aes_kw = 0;
kvm               949 arch/s390/kvm/kvm-s390.c 		memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
kvm               950 arch/s390/kvm/kvm-s390.c 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
kvm               951 arch/s390/kvm/kvm-s390.c 		VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
kvm               954 arch/s390/kvm/kvm-s390.c 		if (!test_kvm_facility(kvm, 76)) {
kvm               955 arch/s390/kvm/kvm-s390.c 			mutex_unlock(&kvm->lock);
kvm               958 arch/s390/kvm/kvm-s390.c 		kvm->arch.crypto.dea_kw = 0;
kvm               959 arch/s390/kvm/kvm-s390.c 		memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
kvm               960 arch/s390/kvm/kvm-s390.c 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
kvm               961 arch/s390/kvm/kvm-s390.c 		VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
kvm               965 arch/s390/kvm/kvm-s390.c 			mutex_unlock(&kvm->lock);
kvm               968 arch/s390/kvm/kvm-s390.c 		kvm->arch.crypto.apie = 1;
kvm               972 arch/s390/kvm/kvm-s390.c 			mutex_unlock(&kvm->lock);
kvm               975 arch/s390/kvm/kvm-s390.c 		kvm->arch.crypto.apie = 0;
kvm               978 arch/s390/kvm/kvm-s390.c 		mutex_unlock(&kvm->lock);
kvm               982 arch/s390/kvm/kvm-s390.c 	kvm_s390_vcpu_crypto_reset_all(kvm);
kvm               983 arch/s390/kvm/kvm-s390.c 	mutex_unlock(&kvm->lock);
kvm               987 arch/s390/kvm/kvm-s390.c static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
kvm               992 arch/s390/kvm/kvm-s390.c 	kvm_for_each_vcpu(cx, vcpu, kvm)
kvm              1000 arch/s390/kvm/kvm-s390.c static int kvm_s390_vm_start_migration(struct kvm *kvm)
kvm              1008 arch/s390/kvm/kvm-s390.c 	if (kvm->arch.migration_mode)
kvm              1010 arch/s390/kvm/kvm-s390.c 	slots = kvm_memslots(kvm);
kvm              1014 arch/s390/kvm/kvm-s390.c 	if (!kvm->arch.use_cmma) {
kvm              1015 arch/s390/kvm/kvm-s390.c 		kvm->arch.migration_mode = 1;
kvm              1032 arch/s390/kvm/kvm-s390.c 	atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
kvm              1033 arch/s390/kvm/kvm-s390.c 	kvm->arch.migration_mode = 1;
kvm              1034 arch/s390/kvm/kvm-s390.c 	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
kvm              1042 arch/s390/kvm/kvm-s390.c static int kvm_s390_vm_stop_migration(struct kvm *kvm)
kvm              1045 arch/s390/kvm/kvm-s390.c 	if (!kvm->arch.migration_mode)
kvm              1047 arch/s390/kvm/kvm-s390.c 	kvm->arch.migration_mode = 0;
kvm              1048 arch/s390/kvm/kvm-s390.c 	if (kvm->arch.use_cmma)
kvm              1049 arch/s390/kvm/kvm-s390.c 		kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
kvm              1053 arch/s390/kvm/kvm-s390.c static int kvm_s390_vm_set_migration(struct kvm *kvm,
kvm              1058 arch/s390/kvm/kvm-s390.c 	mutex_lock(&kvm->slots_lock);
kvm              1061 arch/s390/kvm/kvm-s390.c 		res = kvm_s390_vm_start_migration(kvm);
kvm              1064 arch/s390/kvm/kvm-s390.c 		res = kvm_s390_vm_stop_migration(kvm);
kvm              1069 arch/s390/kvm/kvm-s390.c 	mutex_unlock(&kvm->slots_lock);
kvm              1074 arch/s390/kvm/kvm-s390.c static int kvm_s390_vm_get_migration(struct kvm *kvm,
kvm              1077 arch/s390/kvm/kvm-s390.c 	u64 mig = kvm->arch.migration_mode;
kvm              1087 arch/s390/kvm/kvm-s390.c static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
kvm              1094 arch/s390/kvm/kvm-s390.c 	if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
kvm              1096 arch/s390/kvm/kvm-s390.c 	kvm_s390_set_tod_clock(kvm, &gtod);
kvm              1098 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
kvm              1104 arch/s390/kvm/kvm-s390.c static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
kvm              1114 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
kvm              1119 arch/s390/kvm/kvm-s390.c static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
kvm              1127 arch/s390/kvm/kvm-s390.c 	kvm_s390_set_tod_clock(kvm, &gtod);
kvm              1128 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
kvm              1132 arch/s390/kvm/kvm-s390.c static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
kvm              1141 arch/s390/kvm/kvm-s390.c 		ret = kvm_s390_set_tod_ext(kvm, attr);
kvm              1144 arch/s390/kvm/kvm-s390.c 		ret = kvm_s390_set_tod_high(kvm, attr);
kvm              1147 arch/s390/kvm/kvm-s390.c 		ret = kvm_s390_set_tod_low(kvm, attr);
kvm              1156 arch/s390/kvm/kvm-s390.c static void kvm_s390_get_tod_clock(struct kvm *kvm,
kvm              1165 arch/s390/kvm/kvm-s390.c 	gtod->tod = htod.tod + kvm->arch.epoch;
kvm              1167 arch/s390/kvm/kvm-s390.c 	if (test_kvm_facility(kvm, 139)) {
kvm              1168 arch/s390/kvm/kvm-s390.c 		gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx;
kvm              1176 arch/s390/kvm/kvm-s390.c static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
kvm              1181 arch/s390/kvm/kvm-s390.c 	kvm_s390_get_tod_clock(kvm, &gtod);
kvm              1185 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
kvm              1190 arch/s390/kvm/kvm-s390.c static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
kvm              1197 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
kvm              1202 arch/s390/kvm/kvm-s390.c static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
kvm              1206 arch/s390/kvm/kvm-s390.c 	gtod = kvm_s390_get_tod_clock_fast(kvm);
kvm              1209 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
kvm              1214 arch/s390/kvm/kvm-s390.c static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
kvm              1223 arch/s390/kvm/kvm-s390.c 		ret = kvm_s390_get_tod_ext(kvm, attr);
kvm              1226 arch/s390/kvm/kvm-s390.c 		ret = kvm_s390_get_tod_high(kvm, attr);
kvm              1229 arch/s390/kvm/kvm-s390.c 		ret = kvm_s390_get_tod_low(kvm, attr);
kvm              1238 arch/s390/kvm/kvm-s390.c static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
kvm              1244 arch/s390/kvm/kvm-s390.c 	mutex_lock(&kvm->lock);
kvm              1245 arch/s390/kvm/kvm-s390.c 	if (kvm->created_vcpus) {
kvm              1256 arch/s390/kvm/kvm-s390.c 		kvm->arch.model.cpuid = proc->cpuid;
kvm              1261 arch/s390/kvm/kvm-s390.c 				kvm->arch.model.ibc = unblocked_ibc;
kvm              1263 arch/s390/kvm/kvm-s390.c 				kvm->arch.model.ibc = lowest_ibc;
kvm              1265 arch/s390/kvm/kvm-s390.c 				kvm->arch.model.ibc = proc->ibc;
kvm              1267 arch/s390/kvm/kvm-s390.c 		memcpy(kvm->arch.model.fac_list, proc->fac_list,
kvm              1269 arch/s390/kvm/kvm-s390.c 		VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
kvm              1270 arch/s390/kvm/kvm-s390.c 			 kvm->arch.model.ibc,
kvm              1271 arch/s390/kvm/kvm-s390.c 			 kvm->arch.model.cpuid);
kvm              1272 arch/s390/kvm/kvm-s390.c 		VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
kvm              1273 arch/s390/kvm/kvm-s390.c 			 kvm->arch.model.fac_list[0],
kvm              1274 arch/s390/kvm/kvm-s390.c 			 kvm->arch.model.fac_list[1],
kvm              1275 arch/s390/kvm/kvm-s390.c 			 kvm->arch.model.fac_list[2]);
kvm              1280 arch/s390/kvm/kvm-s390.c 	mutex_unlock(&kvm->lock);
kvm              1284 arch/s390/kvm/kvm-s390.c static int kvm_s390_set_processor_feat(struct kvm *kvm,
kvm              1296 arch/s390/kvm/kvm-s390.c 	mutex_lock(&kvm->lock);
kvm              1297 arch/s390/kvm/kvm-s390.c 	if (kvm->created_vcpus) {
kvm              1298 arch/s390/kvm/kvm-s390.c 		mutex_unlock(&kvm->lock);
kvm              1301 arch/s390/kvm/kvm-s390.c 	bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
kvm              1303 arch/s390/kvm/kvm-s390.c 	mutex_unlock(&kvm->lock);
kvm              1304 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
kvm              1311 arch/s390/kvm/kvm-s390.c static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
kvm              1314 arch/s390/kvm/kvm-s390.c 	mutex_lock(&kvm->lock);
kvm              1315 arch/s390/kvm/kvm-s390.c 	if (kvm->created_vcpus) {
kvm              1316 arch/s390/kvm/kvm-s390.c 		mutex_unlock(&kvm->lock);
kvm              1320 arch/s390/kvm/kvm-s390.c 	if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
kvm              1322 arch/s390/kvm/kvm-s390.c 		mutex_unlock(&kvm->lock);
kvm              1325 arch/s390/kvm/kvm-s390.c 	mutex_unlock(&kvm->lock);
kvm              1327 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "SET: guest PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
kvm              1328 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
kvm              1329 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
kvm              1330 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
kvm              1331 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
kvm              1332 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "SET: guest PTFF   subfunc 0x%16.16lx.%16.16lx",
kvm              1333 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
kvm              1334 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
kvm              1335 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "SET: guest KMAC   subfunc 0x%16.16lx.%16.16lx",
kvm              1336 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
kvm              1337 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
kvm              1338 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "SET: guest KMC    subfunc 0x%16.16lx.%16.16lx",
kvm              1339 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
kvm              1340 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
kvm              1341 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "SET: guest KM     subfunc 0x%16.16lx.%16.16lx",
kvm              1342 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
kvm              1343 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
kvm              1344 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "SET: guest KIMD   subfunc 0x%16.16lx.%16.16lx",
kvm              1345 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
kvm              1346 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
kvm              1347 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "SET: guest KLMD   subfunc 0x%16.16lx.%16.16lx",
kvm              1348 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
kvm              1349 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
kvm              1350 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "SET: guest PCKMO  subfunc 0x%16.16lx.%16.16lx",
kvm              1351 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
kvm              1352 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
kvm              1353 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "SET: guest KMCTR  subfunc 0x%16.16lx.%16.16lx",
kvm              1354 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
kvm              1355 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
kvm              1356 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "SET: guest KMF    subfunc 0x%16.16lx.%16.16lx",
kvm              1357 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
kvm              1358 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
kvm              1359 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "SET: guest KMO    subfunc 0x%16.16lx.%16.16lx",
kvm              1360 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
kvm              1361 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
kvm              1362 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "SET: guest PCC    subfunc 0x%16.16lx.%16.16lx",
kvm              1363 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
kvm              1364 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
kvm              1365 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "SET: guest PPNO   subfunc 0x%16.16lx.%16.16lx",
kvm              1366 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
kvm              1367 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
kvm              1368 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "SET: guest KMA    subfunc 0x%16.16lx.%16.16lx",
kvm              1369 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
kvm              1370 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
kvm              1371 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "SET: guest KDSA   subfunc 0x%16.16lx.%16.16lx",
kvm              1372 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
kvm              1373 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
kvm              1374 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "SET: guest SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
kvm              1375 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
kvm              1376 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
kvm              1377 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
kvm              1378 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
kvm              1379 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
kvm              1380 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
kvm              1381 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
kvm              1382 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
kvm              1383 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
kvm              1388 arch/s390/kvm/kvm-s390.c static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
kvm              1394 arch/s390/kvm/kvm-s390.c 		ret = kvm_s390_set_processor(kvm, attr);
kvm              1397 arch/s390/kvm/kvm-s390.c 		ret = kvm_s390_set_processor_feat(kvm, attr);
kvm              1400 arch/s390/kvm/kvm-s390.c 		ret = kvm_s390_set_processor_subfunc(kvm, attr);
kvm              1406 arch/s390/kvm/kvm-s390.c static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
kvm              1416 arch/s390/kvm/kvm-s390.c 	proc->cpuid = kvm->arch.model.cpuid;
kvm              1417 arch/s390/kvm/kvm-s390.c 	proc->ibc = kvm->arch.model.ibc;
kvm              1418 arch/s390/kvm/kvm-s390.c 	memcpy(&proc->fac_list, kvm->arch.model.fac_list,
kvm              1420 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
kvm              1421 arch/s390/kvm/kvm-s390.c 		 kvm->arch.model.ibc,
kvm              1422 arch/s390/kvm/kvm-s390.c 		 kvm->arch.model.cpuid);
kvm              1423 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
kvm              1424 arch/s390/kvm/kvm-s390.c 		 kvm->arch.model.fac_list[0],
kvm              1425 arch/s390/kvm/kvm-s390.c 		 kvm->arch.model.fac_list[1],
kvm              1426 arch/s390/kvm/kvm-s390.c 		 kvm->arch.model.fac_list[2]);
kvm              1434 arch/s390/kvm/kvm-s390.c static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
kvm              1446 arch/s390/kvm/kvm-s390.c 	memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
kvm              1450 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: host ibc:  0x%4.4x, host cpuid:  0x%16.16llx",
kvm              1451 arch/s390/kvm/kvm-s390.c 		 kvm->arch.model.ibc,
kvm              1452 arch/s390/kvm/kvm-s390.c 		 kvm->arch.model.cpuid);
kvm              1453 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: host facmask:  0x%16.16llx.%16.16llx.%16.16llx",
kvm              1457 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: host faclist:  0x%16.16llx.%16.16llx.%16.16llx",
kvm              1468 arch/s390/kvm/kvm-s390.c static int kvm_s390_get_processor_feat(struct kvm *kvm,
kvm              1473 arch/s390/kvm/kvm-s390.c 	bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
kvm              1477 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
kvm              1484 arch/s390/kvm/kvm-s390.c static int kvm_s390_get_machine_feat(struct kvm *kvm,
kvm              1494 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: host feat:  0x%16.16llx.0x%16.16llx.0x%16.16llx",
kvm              1501 arch/s390/kvm/kvm-s390.c static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
kvm              1504 arch/s390/kvm/kvm-s390.c 	if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
kvm              1508 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: guest PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
kvm              1509 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
kvm              1510 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
kvm              1511 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
kvm              1512 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
kvm              1513 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: guest PTFF   subfunc 0x%16.16lx.%16.16lx",
kvm              1514 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
kvm              1515 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
kvm              1516 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: guest KMAC   subfunc 0x%16.16lx.%16.16lx",
kvm              1517 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
kvm              1518 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
kvm              1519 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: guest KMC    subfunc 0x%16.16lx.%16.16lx",
kvm              1520 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
kvm              1521 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
kvm              1522 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: guest KM     subfunc 0x%16.16lx.%16.16lx",
kvm              1523 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
kvm              1524 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
kvm              1525 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: guest KIMD   subfunc 0x%16.16lx.%16.16lx",
kvm              1526 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
kvm              1527 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
kvm              1528 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: guest KLMD   subfunc 0x%16.16lx.%16.16lx",
kvm              1529 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
kvm              1530 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
kvm              1531 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: guest PCKMO  subfunc 0x%16.16lx.%16.16lx",
kvm              1532 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
kvm              1533 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
kvm              1534 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: guest KMCTR  subfunc 0x%16.16lx.%16.16lx",
kvm              1535 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
kvm              1536 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
kvm              1537 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: guest KMF    subfunc 0x%16.16lx.%16.16lx",
kvm              1538 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
kvm              1539 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
kvm              1540 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: guest KMO    subfunc 0x%16.16lx.%16.16lx",
kvm              1541 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
kvm              1542 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
kvm              1543 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: guest PCC    subfunc 0x%16.16lx.%16.16lx",
kvm              1544 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
kvm              1545 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
kvm              1546 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: guest PPNO   subfunc 0x%16.16lx.%16.16lx",
kvm              1547 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
kvm              1548 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
kvm              1549 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: guest KMA    subfunc 0x%16.16lx.%16.16lx",
kvm              1550 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
kvm              1551 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
kvm              1552 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: guest KDSA   subfunc 0x%16.16lx.%16.16lx",
kvm              1553 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
kvm              1554 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
kvm              1555 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: guest SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
kvm              1556 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
kvm              1557 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
kvm              1558 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
kvm              1559 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
kvm              1560 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
kvm              1561 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
kvm              1562 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
kvm              1563 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
kvm              1564 arch/s390/kvm/kvm-s390.c 		 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
kvm              1569 arch/s390/kvm/kvm-s390.c static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
kvm              1576 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: host  PLO    subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
kvm              1581 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: host  PTFF   subfunc 0x%16.16lx.%16.16lx",
kvm              1584 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: host  KMAC   subfunc 0x%16.16lx.%16.16lx",
kvm              1587 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: host  KMC    subfunc 0x%16.16lx.%16.16lx",
kvm              1590 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: host  KM     subfunc 0x%16.16lx.%16.16lx",
kvm              1593 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: host  KIMD   subfunc 0x%16.16lx.%16.16lx",
kvm              1596 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: host  KLMD   subfunc 0x%16.16lx.%16.16lx",
kvm              1599 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: host  PCKMO  subfunc 0x%16.16lx.%16.16lx",
kvm              1602 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: host  KMCTR  subfunc 0x%16.16lx.%16.16lx",
kvm              1605 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: host  KMF    subfunc 0x%16.16lx.%16.16lx",
kvm              1608 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: host  KMO    subfunc 0x%16.16lx.%16.16lx",
kvm              1611 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: host  PCC    subfunc 0x%16.16lx.%16.16lx",
kvm              1614 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: host  PPNO   subfunc 0x%16.16lx.%16.16lx",
kvm              1617 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: host  KMA    subfunc 0x%16.16lx.%16.16lx",
kvm              1620 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: host  KDSA   subfunc 0x%16.16lx.%16.16lx",
kvm              1623 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: host  SORTL  subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
kvm              1628 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "GET: host  DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
kvm              1637 arch/s390/kvm/kvm-s390.c static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
kvm              1643 arch/s390/kvm/kvm-s390.c 		ret = kvm_s390_get_processor(kvm, attr);
kvm              1646 arch/s390/kvm/kvm-s390.c 		ret = kvm_s390_get_machine(kvm, attr);
kvm              1649 arch/s390/kvm/kvm-s390.c 		ret = kvm_s390_get_processor_feat(kvm, attr);
kvm              1652 arch/s390/kvm/kvm-s390.c 		ret = kvm_s390_get_machine_feat(kvm, attr);
kvm              1655 arch/s390/kvm/kvm-s390.c 		ret = kvm_s390_get_processor_subfunc(kvm, attr);
kvm              1658 arch/s390/kvm/kvm-s390.c 		ret = kvm_s390_get_machine_subfunc(kvm, attr);
kvm              1664 arch/s390/kvm/kvm-s390.c static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
kvm              1670 arch/s390/kvm/kvm-s390.c 		ret = kvm_s390_set_mem_control(kvm, attr);
kvm              1673 arch/s390/kvm/kvm-s390.c 		ret = kvm_s390_set_tod(kvm, attr);
kvm              1676 arch/s390/kvm/kvm-s390.c 		ret = kvm_s390_set_cpu_model(kvm, attr);
kvm              1679 arch/s390/kvm/kvm-s390.c 		ret = kvm_s390_vm_set_crypto(kvm, attr);
kvm              1682 arch/s390/kvm/kvm-s390.c 		ret = kvm_s390_vm_set_migration(kvm, attr);
kvm              1692 arch/s390/kvm/kvm-s390.c static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
kvm              1698 arch/s390/kvm/kvm-s390.c 		ret = kvm_s390_get_mem_control(kvm, attr);
kvm              1701 arch/s390/kvm/kvm-s390.c 		ret = kvm_s390_get_tod(kvm, attr);
kvm              1704 arch/s390/kvm/kvm-s390.c 		ret = kvm_s390_get_cpu_model(kvm, attr);
kvm              1707 arch/s390/kvm/kvm-s390.c 		ret = kvm_s390_vm_get_migration(kvm, attr);
kvm              1717 arch/s390/kvm/kvm-s390.c static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
kvm              1790 arch/s390/kvm/kvm-s390.c static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
kvm              1812 arch/s390/kvm/kvm-s390.c 	srcu_idx = srcu_read_lock(&kvm->srcu);
kvm              1814 arch/s390/kvm/kvm-s390.c 		hva = gfn_to_hva(kvm, args->start_gfn + i);
kvm              1824 arch/s390/kvm/kvm-s390.c 	srcu_read_unlock(&kvm->srcu, srcu_idx);
kvm              1838 arch/s390/kvm/kvm-s390.c static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
kvm              1870 arch/s390/kvm/kvm-s390.c 	srcu_idx = srcu_read_lock(&kvm->srcu);
kvm              1873 arch/s390/kvm/kvm-s390.c 		hva = gfn_to_hva(kvm, args->start_gfn + i);
kvm              1895 arch/s390/kvm/kvm-s390.c 	srcu_read_unlock(&kvm->srcu, srcu_idx);
kvm              1946 arch/s390/kvm/kvm-s390.c static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
kvm              1953 arch/s390/kvm/kvm-s390.c 		hva = gfn_to_hva(kvm, cur_gfn);
kvm              1960 arch/s390/kvm/kvm-s390.c 		if (get_pgste(kvm->mm, hva, &pgstev) < 0)
kvm              1994 arch/s390/kvm/kvm-s390.c static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
kvm              1998 arch/s390/kvm/kvm-s390.c 	struct kvm_memslots *slots = kvm_memslots(kvm);
kvm              2002 arch/s390/kvm/kvm-s390.c 	ms = gfn_to_memslot(kvm, cur_gfn);
kvm              2011 arch/s390/kvm/kvm-s390.c 		hva = gfn_to_hva(kvm, cur_gfn);
kvm              2016 arch/s390/kvm/kvm-s390.c 			atomic64_dec(&kvm->arch.cmma_dirty_pages);
kvm              2017 arch/s390/kvm/kvm-s390.c 		if (get_pgste(kvm->mm, hva, &pgstev) < 0)
kvm              2034 arch/s390/kvm/kvm-s390.c 			ms = gfn_to_memslot(kvm, cur_gfn);
kvm              2050 arch/s390/kvm/kvm-s390.c static int kvm_s390_get_cmma_bits(struct kvm *kvm,
kvm              2057 arch/s390/kvm/kvm-s390.c 	if (!kvm->arch.use_cmma)
kvm              2064 arch/s390/kvm/kvm-s390.c 	if (!peek && !kvm->arch.migration_mode)
kvm              2068 arch/s390/kvm/kvm-s390.c 	if (!bufsize || !kvm->mm->context.uses_cmm) {
kvm              2073 arch/s390/kvm/kvm-s390.c 	if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
kvm              2082 arch/s390/kvm/kvm-s390.c 	down_read(&kvm->mm->mmap_sem);
kvm              2083 arch/s390/kvm/kvm-s390.c 	srcu_idx = srcu_read_lock(&kvm->srcu);
kvm              2085 arch/s390/kvm/kvm-s390.c 		ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
kvm              2087 arch/s390/kvm/kvm-s390.c 		ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
kvm              2088 arch/s390/kvm/kvm-s390.c 	srcu_read_unlock(&kvm->srcu, srcu_idx);
kvm              2089 arch/s390/kvm/kvm-s390.c 	up_read(&kvm->mm->mmap_sem);
kvm              2091 arch/s390/kvm/kvm-s390.c 	if (kvm->arch.migration_mode)
kvm              2092 arch/s390/kvm/kvm-s390.c 		args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
kvm              2108 arch/s390/kvm/kvm-s390.c static int kvm_s390_set_cmma_bits(struct kvm *kvm,
kvm              2117 arch/s390/kvm/kvm-s390.c 	if (!kvm->arch.use_cmma)
kvm              2139 arch/s390/kvm/kvm-s390.c 	down_read(&kvm->mm->mmap_sem);
kvm              2140 arch/s390/kvm/kvm-s390.c 	srcu_idx = srcu_read_lock(&kvm->srcu);
kvm              2142 arch/s390/kvm/kvm-s390.c 		hva = gfn_to_hva(kvm, args->start_gfn + i);
kvm              2151 arch/s390/kvm/kvm-s390.c 		set_pgste_bits(kvm->mm, hva, mask, pgstev);
kvm              2153 arch/s390/kvm/kvm-s390.c 	srcu_read_unlock(&kvm->srcu, srcu_idx);
kvm              2154 arch/s390/kvm/kvm-s390.c 	up_read(&kvm->mm->mmap_sem);
kvm              2156 arch/s390/kvm/kvm-s390.c 	if (!kvm->mm->context.uses_cmm) {
kvm              2157 arch/s390/kvm/kvm-s390.c 		down_write(&kvm->mm->mmap_sem);
kvm              2158 arch/s390/kvm/kvm-s390.c 		kvm->mm->context.uses_cmm = 1;
kvm              2159 arch/s390/kvm/kvm-s390.c 		up_write(&kvm->mm->mmap_sem);
kvm              2169 arch/s390/kvm/kvm-s390.c 	struct kvm *kvm = filp->private_data;
kvm              2181 arch/s390/kvm/kvm-s390.c 		r = kvm_s390_inject_vm(kvm, &s390int);
kvm              2188 arch/s390/kvm/kvm-s390.c 		if (kvm->arch.use_irqchip) {
kvm              2191 arch/s390/kvm/kvm-s390.c 			r = kvm_set_irq_routing(kvm, &routing, 0, 0);
kvm              2199 arch/s390/kvm/kvm-s390.c 		r = kvm_s390_vm_set_attr(kvm, &attr);
kvm              2206 arch/s390/kvm/kvm-s390.c 		r = kvm_s390_vm_get_attr(kvm, &attr);
kvm              2213 arch/s390/kvm/kvm-s390.c 		r = kvm_s390_vm_has_attr(kvm, &attr);
kvm              2223 arch/s390/kvm/kvm-s390.c 		r = kvm_s390_get_skeys(kvm, &args);
kvm              2233 arch/s390/kvm/kvm-s390.c 		r = kvm_s390_set_skeys(kvm, &args);
kvm              2242 arch/s390/kvm/kvm-s390.c 		mutex_lock(&kvm->slots_lock);
kvm              2243 arch/s390/kvm/kvm-s390.c 		r = kvm_s390_get_cmma_bits(kvm, &args);
kvm              2244 arch/s390/kvm/kvm-s390.c 		mutex_unlock(&kvm->slots_lock);
kvm              2258 arch/s390/kvm/kvm-s390.c 		mutex_lock(&kvm->slots_lock);
kvm              2259 arch/s390/kvm/kvm-s390.c 		r = kvm_s390_set_cmma_bits(kvm, &args);
kvm              2260 arch/s390/kvm/kvm-s390.c 		mutex_unlock(&kvm->slots_lock);
kvm              2290 arch/s390/kvm/kvm-s390.c static void kvm_s390_set_crycb_format(struct kvm *kvm)
kvm              2292 arch/s390/kvm/kvm-s390.c 	kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
kvm              2295 arch/s390/kvm/kvm-s390.c 	kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
kvm              2298 arch/s390/kvm/kvm-s390.c 	if (!test_kvm_facility(kvm, 76))
kvm              2302 arch/s390/kvm/kvm-s390.c 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
kvm              2304 arch/s390/kvm/kvm-s390.c 		kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
kvm              2307 arch/s390/kvm/kvm-s390.c void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
kvm              2310 arch/s390/kvm/kvm-s390.c 	struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
kvm              2312 arch/s390/kvm/kvm-s390.c 	mutex_lock(&kvm->lock);
kvm              2313 arch/s390/kvm/kvm-s390.c 	kvm_s390_vcpu_block_all(kvm);
kvm              2315 arch/s390/kvm/kvm-s390.c 	switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
kvm              2318 arch/s390/kvm/kvm-s390.c 		VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
kvm              2321 arch/s390/kvm/kvm-s390.c 		VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
kvm              2324 arch/s390/kvm/kvm-s390.c 		VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
kvm              2332 arch/s390/kvm/kvm-s390.c 		VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
kvm              2341 arch/s390/kvm/kvm-s390.c 	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
kvm              2342 arch/s390/kvm/kvm-s390.c 	kvm_s390_vcpu_unblock_all(kvm);
kvm              2343 arch/s390/kvm/kvm-s390.c 	mutex_unlock(&kvm->lock);
kvm              2347 arch/s390/kvm/kvm-s390.c void kvm_arch_crypto_clear_masks(struct kvm *kvm)
kvm              2349 arch/s390/kvm/kvm-s390.c 	mutex_lock(&kvm->lock);
kvm              2350 arch/s390/kvm/kvm-s390.c 	kvm_s390_vcpu_block_all(kvm);
kvm              2352 arch/s390/kvm/kvm-s390.c 	memset(&kvm->arch.crypto.crycb->apcb0, 0,
kvm              2353 arch/s390/kvm/kvm-s390.c 	       sizeof(kvm->arch.crypto.crycb->apcb0));
kvm              2354 arch/s390/kvm/kvm-s390.c 	memset(&kvm->arch.crypto.crycb->apcb1, 0,
kvm              2355 arch/s390/kvm/kvm-s390.c 	       sizeof(kvm->arch.crypto.crycb->apcb1));
kvm              2357 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
kvm              2359 arch/s390/kvm/kvm-s390.c 	kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
kvm              2360 arch/s390/kvm/kvm-s390.c 	kvm_s390_vcpu_unblock_all(kvm);
kvm              2361 arch/s390/kvm/kvm-s390.c 	mutex_unlock(&kvm->lock);
kvm              2374 arch/s390/kvm/kvm-s390.c static void kvm_s390_crypto_init(struct kvm *kvm)
kvm              2376 arch/s390/kvm/kvm-s390.c 	kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
kvm              2377 arch/s390/kvm/kvm-s390.c 	kvm_s390_set_crycb_format(kvm);
kvm              2379 arch/s390/kvm/kvm-s390.c 	if (!test_kvm_facility(kvm, 76))
kvm              2383 arch/s390/kvm/kvm-s390.c 	kvm->arch.crypto.aes_kw = 1;
kvm              2384 arch/s390/kvm/kvm-s390.c 	kvm->arch.crypto.dea_kw = 1;
kvm              2385 arch/s390/kvm/kvm-s390.c 	get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
kvm              2386 arch/s390/kvm/kvm-s390.c 			 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
kvm              2387 arch/s390/kvm/kvm-s390.c 	get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
kvm              2388 arch/s390/kvm/kvm-s390.c 			 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
kvm              2391 arch/s390/kvm/kvm-s390.c static void sca_dispose(struct kvm *kvm)
kvm              2393 arch/s390/kvm/kvm-s390.c 	if (kvm->arch.use_esca)
kvm              2394 arch/s390/kvm/kvm-s390.c 		free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
kvm              2396 arch/s390/kvm/kvm-s390.c 		free_page((unsigned long)(kvm->arch.sca));
kvm              2397 arch/s390/kvm/kvm-s390.c 	kvm->arch.sca = NULL;
kvm              2400 arch/s390/kvm/kvm-s390.c int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm              2426 arch/s390/kvm/kvm-s390.c 	rwlock_init(&kvm->arch.sca_lock);
kvm              2428 arch/s390/kvm/kvm-s390.c 	kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
kvm              2429 arch/s390/kvm/kvm-s390.c 	if (!kvm->arch.sca)
kvm              2435 arch/s390/kvm/kvm-s390.c 	kvm->arch.sca = (struct bsca_block *)
kvm              2436 arch/s390/kvm/kvm-s390.c 			((char *) kvm->arch.sca + sca_offset);
kvm              2441 arch/s390/kvm/kvm-s390.c 	kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
kvm              2442 arch/s390/kvm/kvm-s390.c 	if (!kvm->arch.dbf)
kvm              2446 arch/s390/kvm/kvm-s390.c 	kvm->arch.sie_page2 =
kvm              2448 arch/s390/kvm/kvm-s390.c 	if (!kvm->arch.sie_page2)
kvm              2451 arch/s390/kvm/kvm-s390.c 	kvm->arch.sie_page2->kvm = kvm;
kvm              2452 arch/s390/kvm/kvm-s390.c 	kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
kvm              2455 arch/s390/kvm/kvm-s390.c 		kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] &
kvm              2458 arch/s390/kvm/kvm-s390.c 		kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] &
kvm              2461 arch/s390/kvm/kvm-s390.c 	kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
kvm              2464 arch/s390/kvm/kvm-s390.c 	set_kvm_facility(kvm->arch.model.fac_mask, 138);
kvm              2465 arch/s390/kvm/kvm-s390.c 	set_kvm_facility(kvm->arch.model.fac_list, 138);
kvm              2467 arch/s390/kvm/kvm-s390.c 	set_kvm_facility(kvm->arch.model.fac_mask, 74);
kvm              2468 arch/s390/kvm/kvm-s390.c 	set_kvm_facility(kvm->arch.model.fac_list, 74);
kvm              2470 arch/s390/kvm/kvm-s390.c 		set_kvm_facility(kvm->arch.model.fac_mask, 147);
kvm              2471 arch/s390/kvm/kvm-s390.c 		set_kvm_facility(kvm->arch.model.fac_list, 147);
kvm              2475 arch/s390/kvm/kvm-s390.c 		set_kvm_facility(kvm->arch.model.fac_mask, 65);
kvm              2477 arch/s390/kvm/kvm-s390.c 	kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
kvm              2478 arch/s390/kvm/kvm-s390.c 	kvm->arch.model.ibc = sclp.ibc & 0x0fff;
kvm              2480 arch/s390/kvm/kvm-s390.c 	kvm_s390_crypto_init(kvm);
kvm              2482 arch/s390/kvm/kvm-s390.c 	mutex_init(&kvm->arch.float_int.ais_lock);
kvm              2483 arch/s390/kvm/kvm-s390.c 	spin_lock_init(&kvm->arch.float_int.lock);
kvm              2485 arch/s390/kvm/kvm-s390.c 		INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
kvm              2486 arch/s390/kvm/kvm-s390.c 	init_waitqueue_head(&kvm->arch.ipte_wq);
kvm              2487 arch/s390/kvm/kvm-s390.c 	mutex_init(&kvm->arch.ipte_mutex);
kvm              2489 arch/s390/kvm/kvm-s390.c 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
kvm              2490 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "vm created with type %lu", type);
kvm              2493 arch/s390/kvm/kvm-s390.c 		kvm->arch.gmap = NULL;
kvm              2494 arch/s390/kvm/kvm-s390.c 		kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
kvm              2497 arch/s390/kvm/kvm-s390.c 			kvm->arch.mem_limit = TASK_SIZE_MAX;
kvm              2499 arch/s390/kvm/kvm-s390.c 			kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
kvm              2501 arch/s390/kvm/kvm-s390.c 		kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
kvm              2502 arch/s390/kvm/kvm-s390.c 		if (!kvm->arch.gmap)
kvm              2504 arch/s390/kvm/kvm-s390.c 		kvm->arch.gmap->private = kvm;
kvm              2505 arch/s390/kvm/kvm-s390.c 		kvm->arch.gmap->pfault_enabled = 0;
kvm              2508 arch/s390/kvm/kvm-s390.c 	kvm->arch.use_pfmfi = sclp.has_pfmfi;
kvm              2509 arch/s390/kvm/kvm-s390.c 	kvm->arch.use_skf = sclp.has_skey;
kvm              2510 arch/s390/kvm/kvm-s390.c 	spin_lock_init(&kvm->arch.start_stop_lock);
kvm              2511 arch/s390/kvm/kvm-s390.c 	kvm_s390_vsie_init(kvm);
kvm              2512 arch/s390/kvm/kvm-s390.c 	kvm_s390_gisa_init(kvm);
kvm              2513 arch/s390/kvm/kvm-s390.c 	KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
kvm              2517 arch/s390/kvm/kvm-s390.c 	free_page((unsigned long)kvm->arch.sie_page2);
kvm              2518 arch/s390/kvm/kvm-s390.c 	debug_unregister(kvm->arch.dbf);
kvm              2519 arch/s390/kvm/kvm-s390.c 	sca_dispose(kvm);
kvm              2530 arch/s390/kvm/kvm-s390.c 	if (!kvm_is_ucontrol(vcpu->kvm))
kvm              2533 arch/s390/kvm/kvm-s390.c 	if (kvm_is_ucontrol(vcpu->kvm))
kvm              2536 arch/s390/kvm/kvm-s390.c 	if (vcpu->kvm->arch.use_cmma)
kvm              2544 arch/s390/kvm/kvm-s390.c static void kvm_free_vcpus(struct kvm *kvm)
kvm              2549 arch/s390/kvm/kvm-s390.c 	kvm_for_each_vcpu(i, vcpu, kvm)
kvm              2552 arch/s390/kvm/kvm-s390.c 	mutex_lock(&kvm->lock);
kvm              2553 arch/s390/kvm/kvm-s390.c 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
kvm              2554 arch/s390/kvm/kvm-s390.c 		kvm->vcpus[i] = NULL;
kvm              2556 arch/s390/kvm/kvm-s390.c 	atomic_set(&kvm->online_vcpus, 0);
kvm              2557 arch/s390/kvm/kvm-s390.c 	mutex_unlock(&kvm->lock);
kvm              2560 arch/s390/kvm/kvm-s390.c void kvm_arch_destroy_vm(struct kvm *kvm)
kvm              2562 arch/s390/kvm/kvm-s390.c 	kvm_free_vcpus(kvm);
kvm              2563 arch/s390/kvm/kvm-s390.c 	sca_dispose(kvm);
kvm              2564 arch/s390/kvm/kvm-s390.c 	debug_unregister(kvm->arch.dbf);
kvm              2565 arch/s390/kvm/kvm-s390.c 	kvm_s390_gisa_destroy(kvm);
kvm              2566 arch/s390/kvm/kvm-s390.c 	free_page((unsigned long)kvm->arch.sie_page2);
kvm              2567 arch/s390/kvm/kvm-s390.c 	if (!kvm_is_ucontrol(kvm))
kvm              2568 arch/s390/kvm/kvm-s390.c 		gmap_remove(kvm->arch.gmap);
kvm              2569 arch/s390/kvm/kvm-s390.c 	kvm_s390_destroy_adapters(kvm);
kvm              2570 arch/s390/kvm/kvm-s390.c 	kvm_s390_clear_float_irqs(kvm);
kvm              2571 arch/s390/kvm/kvm-s390.c 	kvm_s390_vsie_destroy(kvm);
kvm              2572 arch/s390/kvm/kvm-s390.c 	KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
kvm              2581 arch/s390/kvm/kvm-s390.c 	vcpu->arch.gmap->private = vcpu->kvm;
kvm              2590 arch/s390/kvm/kvm-s390.c 	read_lock(&vcpu->kvm->arch.sca_lock);
kvm              2591 arch/s390/kvm/kvm-s390.c 	if (vcpu->kvm->arch.use_esca) {
kvm              2592 arch/s390/kvm/kvm-s390.c 		struct esca_block *sca = vcpu->kvm->arch.sca;
kvm              2597 arch/s390/kvm/kvm-s390.c 		struct bsca_block *sca = vcpu->kvm->arch.sca;
kvm              2602 arch/s390/kvm/kvm-s390.c 	read_unlock(&vcpu->kvm->arch.sca_lock);
kvm              2608 arch/s390/kvm/kvm-s390.c 		struct bsca_block *sca = vcpu->kvm->arch.sca;
kvm              2615 arch/s390/kvm/kvm-s390.c 	read_lock(&vcpu->kvm->arch.sca_lock);
kvm              2616 arch/s390/kvm/kvm-s390.c 	if (vcpu->kvm->arch.use_esca) {
kvm              2617 arch/s390/kvm/kvm-s390.c 		struct esca_block *sca = vcpu->kvm->arch.sca;
kvm              2625 arch/s390/kvm/kvm-s390.c 		struct bsca_block *sca = vcpu->kvm->arch.sca;
kvm              2632 arch/s390/kvm/kvm-s390.c 	read_unlock(&vcpu->kvm->arch.sca_lock);
kvm              2653 arch/s390/kvm/kvm-s390.c static int sca_switch_to_extended(struct kvm *kvm)
kvm              2655 arch/s390/kvm/kvm-s390.c 	struct bsca_block *old_sca = kvm->arch.sca;
kvm              2668 arch/s390/kvm/kvm-s390.c 	kvm_s390_vcpu_block_all(kvm);
kvm              2669 arch/s390/kvm/kvm-s390.c 	write_lock(&kvm->arch.sca_lock);
kvm              2673 arch/s390/kvm/kvm-s390.c 	kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
kvm              2678 arch/s390/kvm/kvm-s390.c 	kvm->arch.sca = new_sca;
kvm              2679 arch/s390/kvm/kvm-s390.c 	kvm->arch.use_esca = 1;
kvm              2681 arch/s390/kvm/kvm-s390.c 	write_unlock(&kvm->arch.sca_lock);
kvm              2682 arch/s390/kvm/kvm-s390.c 	kvm_s390_vcpu_unblock_all(kvm);
kvm              2686 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
kvm              2687 arch/s390/kvm/kvm-s390.c 		 old_sca, kvm->arch.sca);
kvm              2691 arch/s390/kvm/kvm-s390.c static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
kvm              2705 arch/s390/kvm/kvm-s390.c 	mutex_lock(&kvm->lock);
kvm              2706 arch/s390/kvm/kvm-s390.c 	rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
kvm              2707 arch/s390/kvm/kvm-s390.c 	mutex_unlock(&kvm->lock);
kvm              2723 arch/s390/kvm/kvm-s390.c 	if (test_kvm_facility(vcpu->kvm, 64))
kvm              2725 arch/s390/kvm/kvm-s390.c 	if (test_kvm_facility(vcpu->kvm, 82))
kvm              2727 arch/s390/kvm/kvm-s390.c 	if (test_kvm_facility(vcpu->kvm, 133))
kvm              2729 arch/s390/kvm/kvm-s390.c 	if (test_kvm_facility(vcpu->kvm, 156))
kvm              2739 arch/s390/kvm/kvm-s390.c 	if (kvm_is_ucontrol(vcpu->kvm))
kvm              2875 arch/s390/kvm/kvm-s390.c 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
kvm              2882 arch/s390/kvm/kvm-s390.c 	mutex_lock(&vcpu->kvm->lock);
kvm              2884 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
kvm              2885 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
kvm              2887 arch/s390/kvm/kvm-s390.c 	mutex_unlock(&vcpu->kvm->lock);
kvm              2888 arch/s390/kvm/kvm-s390.c 	if (!kvm_is_ucontrol(vcpu->kvm)) {
kvm              2889 arch/s390/kvm/kvm-s390.c 		vcpu->arch.gmap = vcpu->kvm->arch.gmap;
kvm              2892 arch/s390/kvm/kvm-s390.c 	if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
kvm              2898 arch/s390/kvm/kvm-s390.c static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
kvm              2900 arch/s390/kvm/kvm-s390.c 	if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
kvm              2906 arch/s390/kvm/kvm-s390.c static bool kvm_has_pckmo_ecc(struct kvm *kvm)
kvm              2909 arch/s390/kvm/kvm-s390.c 	return kvm_has_pckmo_subfunc(kvm, 32) ||
kvm              2910 arch/s390/kvm/kvm-s390.c 	       kvm_has_pckmo_subfunc(kvm, 33) ||
kvm              2911 arch/s390/kvm/kvm-s390.c 	       kvm_has_pckmo_subfunc(kvm, 34) ||
kvm              2912 arch/s390/kvm/kvm-s390.c 	       kvm_has_pckmo_subfunc(kvm, 40) ||
kvm              2913 arch/s390/kvm/kvm-s390.c 	       kvm_has_pckmo_subfunc(kvm, 41);
kvm              2923 arch/s390/kvm/kvm-s390.c 	if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
kvm              2926 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
kvm              2931 arch/s390/kvm/kvm-s390.c 	if (vcpu->kvm->arch.crypto.apie)
kvm              2935 arch/s390/kvm/kvm-s390.c 	if (vcpu->kvm->arch.crypto.aes_kw) {
kvm              2938 arch/s390/kvm/kvm-s390.c 		if (kvm_has_pckmo_ecc(vcpu->kvm))
kvm              2942 arch/s390/kvm/kvm-s390.c 	if (vcpu->kvm->arch.crypto.dea_kw)
kvm              2962 arch/s390/kvm/kvm-s390.c 	struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
kvm              2965 arch/s390/kvm/kvm-s390.c 	if (test_kvm_facility(vcpu->kvm, 7))
kvm              2977 arch/s390/kvm/kvm-s390.c 	if (test_kvm_facility(vcpu->kvm, 78))
kvm              2979 arch/s390/kvm/kvm-s390.c 	else if (test_kvm_facility(vcpu->kvm, 8))
kvm              2987 arch/s390/kvm/kvm-s390.c 	if (test_kvm_facility(vcpu->kvm, 9))
kvm              2989 arch/s390/kvm/kvm-s390.c 	if (test_kvm_facility(vcpu->kvm, 73))
kvm              2992 arch/s390/kvm/kvm-s390.c 	if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
kvm              2994 arch/s390/kvm/kvm-s390.c 	if (test_kvm_facility(vcpu->kvm, 130))
kvm              3005 arch/s390/kvm/kvm-s390.c 	if (test_kvm_facility(vcpu->kvm, 129)) {
kvm              3009 arch/s390/kvm/kvm-s390.c 	if (test_kvm_facility(vcpu->kvm, 139))
kvm              3011 arch/s390/kvm/kvm-s390.c 	if (test_kvm_facility(vcpu->kvm, 156))
kvm              3027 arch/s390/kvm/kvm-s390.c 	if (vcpu->kvm->arch.use_cmma) {
kvm              3042 arch/s390/kvm/kvm-s390.c struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
kvm              3049 arch/s390/kvm/kvm-s390.c 	if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
kvm              3072 arch/s390/kvm/kvm-s390.c 	vcpu->arch.sie_block->gd = (u32)(u64)kvm->arch.gisa_int.origin;
kvm              3077 arch/s390/kvm/kvm-s390.c 	rc = kvm_vcpu_init(vcpu, kvm, id);
kvm              3080 arch/s390/kvm/kvm-s390.c 	VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
kvm              3153 arch/s390/kvm/kvm-s390.c 	struct kvm *kvm = gmap->private;
kvm              3163 arch/s390/kvm/kvm-s390.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
kvm              3467 arch/s390/kvm/kvm-s390.c 	vcpu->kvm->arch.user_cpu_state_ctrl = 1;
kvm              3558 arch/s390/kvm/kvm-s390.c 		if ((vcpu->kvm->arch.use_cmma) &&
kvm              3559 arch/s390/kvm/kvm-s390.c 		    (vcpu->kvm->mm->context.uses_cmm))
kvm              3572 arch/s390/kvm/kvm-s390.c void kvm_s390_set_tod_clock(struct kvm *kvm,
kvm              3579 arch/s390/kvm/kvm-s390.c 	mutex_lock(&kvm->lock);
kvm              3584 arch/s390/kvm/kvm-s390.c 	kvm->arch.epoch = gtod->tod - htod.tod;
kvm              3585 arch/s390/kvm/kvm-s390.c 	kvm->arch.epdx = 0;
kvm              3586 arch/s390/kvm/kvm-s390.c 	if (test_kvm_facility(kvm, 139)) {
kvm              3587 arch/s390/kvm/kvm-s390.c 		kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
kvm              3588 arch/s390/kvm/kvm-s390.c 		if (kvm->arch.epoch > gtod->tod)
kvm              3589 arch/s390/kvm/kvm-s390.c 			kvm->arch.epdx -= 1;
kvm              3592 arch/s390/kvm/kvm-s390.c 	kvm_s390_vcpu_block_all(kvm);
kvm              3593 arch/s390/kvm/kvm-s390.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
kvm              3594 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->epoch = kvm->arch.epoch;
kvm              3595 arch/s390/kvm/kvm-s390.c 		vcpu->arch.sie_block->epdx  = kvm->arch.epdx;
kvm              3598 arch/s390/kvm/kvm-s390.c 	kvm_s390_vcpu_unblock_all(kvm);
kvm              3600 arch/s390/kvm/kvm-s390.c 	mutex_unlock(&kvm->lock);
kvm              3632 arch/s390/kvm/kvm-s390.c 		WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
kvm              3685 arch/s390/kvm/kvm-s390.c 	hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
kvm              3714 arch/s390/kvm/kvm-s390.c 	if (!kvm_is_ucontrol(vcpu->kvm)) {
kvm              3729 arch/s390/kvm/kvm-s390.c 	clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.gisa_int.kicked_mask);
kvm              3812 arch/s390/kvm/kvm-s390.c 	} else if (kvm_is_ucontrol(vcpu->kvm)) {
kvm              3836 arch/s390/kvm/kvm-s390.c 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm              3843 arch/s390/kvm/kvm-s390.c 		srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
kvm              3858 arch/s390/kvm/kvm-s390.c 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm              3863 arch/s390/kvm/kvm-s390.c 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
kvm              3902 arch/s390/kvm/kvm-s390.c 	    test_kvm_facility(vcpu->kvm, 64) &&
kvm              3913 arch/s390/kvm/kvm-s390.c 	    test_kvm_facility(vcpu->kvm, 133) &&
kvm              3922 arch/s390/kvm/kvm-s390.c 	    test_kvm_facility(vcpu->kvm, 82)) {
kvm              4018 arch/s390/kvm/kvm-s390.c 	if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
kvm              4137 arch/s390/kvm/kvm-s390.c static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
kvm              4142 arch/s390/kvm/kvm-s390.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
kvm              4164 arch/s390/kvm/kvm-s390.c 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
kvm              4165 arch/s390/kvm/kvm-s390.c 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
kvm              4168 arch/s390/kvm/kvm-s390.c 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
kvm              4181 arch/s390/kvm/kvm-s390.c 		__disable_ibs_on_all_vcpus(vcpu->kvm);
kvm              4190 arch/s390/kvm/kvm-s390.c 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
kvm              4204 arch/s390/kvm/kvm-s390.c 	spin_lock(&vcpu->kvm->arch.start_stop_lock);
kvm              4205 arch/s390/kvm/kvm-s390.c 	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
kvm              4214 arch/s390/kvm/kvm-s390.c 		if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
kvm              4216 arch/s390/kvm/kvm-s390.c 			started_vcpu = vcpu->kvm->vcpus[i];
kvm              4228 arch/s390/kvm/kvm-s390.c 	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
kvm              4242 arch/s390/kvm/kvm-s390.c 		if (!vcpu->kvm->arch.css_support) {
kvm              4243 arch/s390/kvm/kvm-s390.c 			vcpu->kvm->arch.css_support = 1;
kvm              4244 arch/s390/kvm/kvm-s390.c 			VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
kvm              4245 arch/s390/kvm/kvm-s390.c 			trace_kvm_s390_enable_css(vcpu->kvm);
kvm              4277 arch/s390/kvm/kvm-s390.c 	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm              4308 arch/s390/kvm/kvm-s390.c 	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
kvm              4357 arch/s390/kvm/kvm-s390.c 		idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm              4359 arch/s390/kvm/kvm-s390.c 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
kvm              4394 arch/s390/kvm/kvm-s390.c 		if (!kvm_is_ucontrol(vcpu->kvm)) {
kvm              4411 arch/s390/kvm/kvm-s390.c 		if (!kvm_is_ucontrol(vcpu->kvm)) {
kvm              4489 arch/s390/kvm/kvm-s390.c 		 && (kvm_is_ucontrol(vcpu->kvm))) {
kvm              4498 arch/s390/kvm/kvm-s390.c int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
kvm              4505 arch/s390/kvm/kvm-s390.c int kvm_arch_prepare_memory_region(struct kvm *kvm,
kvm              4521 arch/s390/kvm/kvm-s390.c 	if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
kvm              4527 arch/s390/kvm/kvm-s390.c void kvm_arch_commit_memory_region(struct kvm *kvm,
kvm              4537 arch/s390/kvm/kvm-s390.c 		rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
kvm              4541 arch/s390/kvm/kvm-s390.c 		rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
kvm              4547 arch/s390/kvm/kvm-s390.c 		rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
kvm                42 arch/s390/kvm/kvm-s390.h 	debug_sprintf_event(d_vcpu->kvm->arch.dbf, d_loglevel, \
kvm                70 arch/s390/kvm/kvm-s390.h 	return test_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
kvm                73 arch/s390/kvm/kvm-s390.h static inline int kvm_is_ucontrol(struct kvm *kvm)
kvm                76 arch/s390/kvm/kvm-s390.h 	if (kvm->arch.gmap)
kvm               170 arch/s390/kvm/kvm-s390.h static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr)
kvm               172 arch/s390/kvm/kvm-s390.h 	return __test_facility(nr, kvm->arch.model.fac_mask) &&
kvm               173 arch/s390/kvm/kvm-s390.h 		__test_facility(nr, kvm->arch.model.fac_list);
kvm               187 arch/s390/kvm/kvm-s390.h static inline int test_kvm_cpu_feat(struct kvm *kvm, unsigned long nr)
kvm               190 arch/s390/kvm/kvm-s390.h 	return test_bit_inv(nr, kvm->arch.cpu_feat);
kvm               194 arch/s390/kvm/kvm-s390.h static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm)
kvm               196 arch/s390/kvm/kvm-s390.h 	return kvm->arch.user_cpu_state_ctrl != 0;
kvm               205 arch/s390/kvm/kvm-s390.h void kvm_s390_clear_float_irqs(struct kvm *kvm);
kvm               206 arch/s390/kvm/kvm-s390.h int __must_check kvm_s390_inject_vm(struct kvm *kvm,
kvm               229 arch/s390/kvm/kvm-s390.h struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
kvm               231 arch/s390/kvm/kvm-s390.h int kvm_s390_reinject_io_int(struct kvm *kvm,
kvm               233 arch/s390/kvm/kvm-s390.h int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked);
kvm               276 arch/s390/kvm/kvm-s390.h void kvm_s390_vsie_init(struct kvm *kvm);
kvm               277 arch/s390/kvm/kvm-s390.h void kvm_s390_vsie_destroy(struct kvm *kvm);
kvm               284 arch/s390/kvm/kvm-s390.h void kvm_s390_set_tod_clock(struct kvm *kvm,
kvm               304 arch/s390/kvm/kvm-s390.h static inline void kvm_s390_vcpu_block_all(struct kvm *kvm)
kvm               309 arch/s390/kvm/kvm-s390.h 	WARN_ON(!mutex_is_locked(&kvm->lock));
kvm               310 arch/s390/kvm/kvm-s390.h 	kvm_for_each_vcpu(i, vcpu, kvm)
kvm               314 arch/s390/kvm/kvm-s390.h static inline void kvm_s390_vcpu_unblock_all(struct kvm *kvm)
kvm               319 arch/s390/kvm/kvm-s390.h 	kvm_for_each_vcpu(i, vcpu, kvm)
kvm               323 arch/s390/kvm/kvm-s390.h static inline u64 kvm_s390_get_tod_clock_fast(struct kvm *kvm)
kvm               328 arch/s390/kvm/kvm-s390.h 	rc = get_tod_clock_fast() + kvm->arch.epoch;
kvm               372 arch/s390/kvm/kvm-s390.h void kvm_s390_destroy_adapters(struct kvm *kvm);
kvm               381 arch/s390/kvm/kvm-s390.h void kvm_s390_gisa_init(struct kvm *kvm);
kvm               382 arch/s390/kvm/kvm-s390.h void kvm_s390_gisa_clear(struct kvm *kvm);
kvm               383 arch/s390/kvm/kvm-s390.h void kvm_s390_gisa_destroy(struct kvm *kvm);
kvm               399 arch/s390/kvm/kvm-s390.h static inline union ipte_control *kvm_s390_get_ipte_control(struct kvm *kvm)
kvm               401 arch/s390/kvm/kvm-s390.h 	struct bsca_block *sca = kvm->arch.sca; /* SCA version doesn't matter */
kvm               428 arch/s390/kvm/kvm-s390.h void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm);
kvm                39 arch/s390/kvm/priv.c 	if (test_kvm_facility(vcpu->kvm, 64)) {
kvm                60 arch/s390/kvm/priv.c 	if (test_kvm_facility(vcpu->kvm, 133)) {
kvm               106 arch/s390/kvm/priv.c 	kvm_s390_set_tod_clock(vcpu->kvm, &gtod);
kvm               142 arch/s390/kvm/priv.c 	if (kvm_is_error_gpa(vcpu->kvm, address))
kvm               222 arch/s390/kvm/priv.c 	if (!vcpu->kvm->arch.use_skf)
kvm               237 arch/s390/kvm/priv.c 	if (vcpu->kvm->arch.use_skf) {
kvm               268 arch/s390/kvm/priv.c 	vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr));
kvm               315 arch/s390/kvm/priv.c 	vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr));
kvm               361 arch/s390/kvm/priv.c 	if (!test_kvm_facility(vcpu->kvm, 8))
kvm               363 arch/s390/kvm/priv.c 	if (!test_kvm_facility(vcpu->kvm, 10))
kvm               365 arch/s390/kvm/priv.c 	if (!test_kvm_facility(vcpu->kvm, 14))
kvm               382 arch/s390/kvm/priv.c 		unsigned long vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
kvm               432 arch/s390/kvm/priv.c 	wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
kvm               455 arch/s390/kvm/priv.c 	if (kvm_is_error_gpa(vcpu->kvm, addr))
kvm               461 arch/s390/kvm/priv.c 	if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE))
kvm               483 arch/s390/kvm/priv.c 	inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
kvm               526 arch/s390/kvm/priv.c 	if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
kvm               543 arch/s390/kvm/priv.c 		inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask,
kvm               574 arch/s390/kvm/priv.c 	if (vcpu->kvm->arch.css_support) {
kvm               646 arch/s390/kvm/priv.c 	if (!test_kvm_facility(vcpu->kvm, 15) && (reg0 & 0x00800000UL))
kvm               649 arch/s390/kvm/priv.c 	if (!(vcpu->kvm->arch.crypto.crycbd & 0x02) && (reg0 & 0x0000c0f0UL))
kvm               654 arch/s390/kvm/priv.c 	if (!test_kvm_facility(vcpu->kvm, 65))
kvm               661 arch/s390/kvm/priv.c 	if (vcpu->kvm->arch.crypto.pqap_hook) {
kvm               662 arch/s390/kvm/priv.c 		if (!try_module_get(vcpu->kvm->arch.crypto.pqap_hook->owner))
kvm               664 arch/s390/kvm/priv.c 		ret = vcpu->kvm->arch.crypto.pqap_hook->hook(vcpu);
kvm               665 arch/s390/kvm/priv.c 		module_put(vcpu->kvm->arch.crypto.pqap_hook->owner);
kvm               695 arch/s390/kvm/priv.c 	fac = *vcpu->kvm->arch.model.fac_list >> 32;
kvm               783 arch/s390/kvm/priv.c 	u64 stidp_data = vcpu->kvm->arch.model.cpuid;
kvm               811 arch/s390/kvm/priv.c 	cpus = atomic_read(&vcpu->kvm->online_vcpus);
kvm               904 arch/s390/kvm/priv.c 	if (vcpu->kvm->arch.user_stsi) {
kvm              1018 arch/s390/kvm/priv.c 	if (!test_kvm_facility(vcpu->kvm, 8))
kvm              1029 arch/s390/kvm/priv.c 	    !test_kvm_facility(vcpu->kvm, 14))
kvm              1034 arch/s390/kvm/priv.c 	    test_kvm_facility(vcpu->kvm, 10)) {
kvm              1061 arch/s390/kvm/priv.c 		if (!test_kvm_facility(vcpu->kvm, 78) ||
kvm              1075 arch/s390/kvm/priv.c 		vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
kvm              1080 arch/s390/kvm/priv.c 			if (kvm_clear_guest(vcpu->kvm, start, PAGE_SIZE))
kvm              1135 arch/s390/kvm/priv.c 	hva = gfn_to_hva(vcpu->kvm, gfn);
kvm              1141 arch/s390/kvm/priv.c 	nappended = pgste_perform_essa(vcpu->kvm->mm, hva, orc, &ptev, &pgstev);
kvm              1174 arch/s390/kvm/priv.c 		struct kvm_memory_slot *ms = gfn_to_memslot(vcpu->kvm, gfn);
kvm              1178 arch/s390/kvm/priv.c 			atomic64_inc(&vcpu->kvm->arch.cmma_dirty_pages);
kvm              1195 arch/s390/kvm/priv.c 	if (!vcpu->kvm->arch.use_cmma)
kvm              1203 arch/s390/kvm/priv.c 	if (orc > (test_kvm_facility(vcpu->kvm, 147) ? ESSA_SET_STABLE_NODAT
kvm              1207 arch/s390/kvm/priv.c 	if (!vcpu->kvm->arch.migration_mode) {
kvm              1217 arch/s390/kvm/priv.c 		if (vcpu->kvm->mm->context.uses_cmm == 0) {
kvm              1218 arch/s390/kvm/priv.c 			down_write(&vcpu->kvm->mm->mmap_sem);
kvm              1219 arch/s390/kvm/priv.c 			vcpu->kvm->mm->context.uses_cmm = 1;
kvm              1220 arch/s390/kvm/priv.c 			up_write(&vcpu->kvm->mm->mmap_sem);
kvm              1237 arch/s390/kvm/priv.c 		down_read(&vcpu->kvm->mm->mmap_sem);
kvm              1238 arch/s390/kvm/priv.c 		srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm              1240 arch/s390/kvm/priv.c 		srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
kvm              1241 arch/s390/kvm/priv.c 		up_read(&vcpu->kvm->mm->mmap_sem);
kvm              1475 arch/s390/kvm/priv.c 	hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable);
kvm               158 arch/s390/kvm/sigp.c 	kvm_for_each_vcpu(i, v, vcpu->kvm) {
kvm               187 arch/s390/kvm/sigp.c 	if (kvm_is_error_gpa(vcpu->kvm, irq.u.prefix.address)) {
kvm               230 arch/s390/kvm/sigp.c 	if (!test_kvm_facility(vcpu->kvm, 9)) {
kvm               286 arch/s390/kvm/sigp.c 	struct kvm_vcpu *dst_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr);
kvm               362 arch/s390/kvm/sigp.c 	if (!vcpu->kvm->arch.user_sigp)
kvm               470 arch/s390/kvm/sigp.c 		dest_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr);
kvm                 8 arch/s390/kvm/trace-s390.h #define TRACE_SYSTEM kvm-s390
kvm               247 arch/s390/kvm/trace-s390.h 	    TP_PROTO(void *kvm),
kvm               248 arch/s390/kvm/trace-s390.h 	    TP_ARGS(kvm),
kvm               251 arch/s390/kvm/trace-s390.h 		    __field(void *, kvm)
kvm               255 arch/s390/kvm/trace-s390.h 		    __entry->kvm = kvm;
kvm               259 arch/s390/kvm/trace-s390.h 		      __entry->kvm)
kvm                11 arch/s390/kvm/trace.h #define TRACE_SYSTEM kvm
kvm               119 arch/s390/kvm/vsie.c 	if (cpuflags & CPUSTAT_GED && test_kvm_facility(vcpu->kvm, 8))
kvm               121 arch/s390/kvm/vsie.c 	if (cpuflags & CPUSTAT_GED2 && test_kvm_facility(vcpu->kvm, 78)) {
kvm               126 arch/s390/kvm/vsie.c 	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GPERE))
kvm               128 arch/s390/kvm/vsie.c 	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GSLS))
kvm               130 arch/s390/kvm/vsie.c 	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IBS))
kvm               132 arch/s390/kvm/vsie.c 	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_KSS))
kvm               294 arch/s390/kvm/vsie.c 	int key_msk = test_kvm_facility(vcpu->kvm, 76);
kvm               316 arch/s390/kvm/vsie.c 				 vcpu->kvm->arch.crypto.crycb,
kvm               341 arch/s390/kvm/vsie.c 			    vcpu->kvm->arch.crypto.crycb->dea_wrapping_key_mask;
kvm               369 arch/s390/kvm/vsie.c 	if (vcpu->kvm->arch.model.ibc && new_ibc) {
kvm               375 arch/s390/kvm/vsie.c 		if (scb_s->ibc > vcpu->kvm->arch.model.ibc)
kvm               376 arch/s390/kvm/vsie.c 			scb_s->ibc = vcpu->kvm->arch.model.ibc;
kvm               406 arch/s390/kvm/vsie.c 	if (test_kvm_facility(vcpu->kvm, 82)) {
kvm               508 arch/s390/kvm/vsie.c 	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_ESOP))
kvm               511 arch/s390/kvm/vsie.c 	if (test_kvm_facility(vcpu->kvm, 73) && wants_tx) {
kvm               518 arch/s390/kvm/vsie.c 	if (test_kvm_facility(vcpu->kvm, 82))
kvm               521 arch/s390/kvm/vsie.c 	if (test_kvm_facility(vcpu->kvm, 129)) {
kvm               526 arch/s390/kvm/vsie.c 	if (test_kvm_facility(vcpu->kvm, 64))
kvm               529 arch/s390/kvm/vsie.c 	if (test_kvm_facility(vcpu->kvm, 130))
kvm               532 arch/s390/kvm/vsie.c 	if (test_kvm_facility(vcpu->kvm, 133)) {
kvm               536 arch/s390/kvm/vsie.c 	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIIF))
kvm               538 arch/s390/kvm/vsie.c 	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IB))
kvm               540 arch/s390/kvm/vsie.c 	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_CEI))
kvm               543 arch/s390/kvm/vsie.c 	if (test_kvm_facility(vcpu->kvm, 139))
kvm               547 arch/s390/kvm/vsie.c 	if (test_kvm_facility(vcpu->kvm, 156))
kvm               563 arch/s390/kvm/vsie.c 	struct kvm *kvm = gmap->private;
kvm               579 arch/s390/kvm/vsie.c 	for (i = 0; i < kvm->arch.vsie.page_count; i++) {
kvm               580 arch/s390/kvm/vsie.c 		page = READ_ONCE(kvm->arch.vsie.pages[i]);
kvm               643 arch/s390/kvm/vsie.c static int pin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t *hpa)
kvm               647 arch/s390/kvm/vsie.c 	page = gfn_to_page(kvm, gpa_to_gfn(gpa));
kvm               655 arch/s390/kvm/vsie.c static void unpin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t hpa)
kvm               659 arch/s390/kvm/vsie.c 	mark_page_dirty(kvm, gpa_to_gfn(gpa));
kvm               670 arch/s390/kvm/vsie.c 		unpin_guest_page(vcpu->kvm, vsie_page->sca_gpa, hpa);
kvm               678 arch/s390/kvm/vsie.c 		unpin_guest_page(vcpu->kvm, vsie_page->itdba_gpa, hpa);
kvm               685 arch/s390/kvm/vsie.c 		unpin_guest_page(vcpu->kvm, vsie_page->gvrd_gpa, hpa);
kvm               692 arch/s390/kvm/vsie.c 		unpin_guest_page(vcpu->kvm, vsie_page->riccbd_gpa, hpa);
kvm               699 arch/s390/kvm/vsie.c 		unpin_guest_page(vcpu->kvm, vsie_page->sdnx_gpa, hpa);
kvm               728 arch/s390/kvm/vsie.c 	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_64BSCAO))
kvm               739 arch/s390/kvm/vsie.c 			rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
kvm               757 arch/s390/kvm/vsie.c 		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
kvm               776 arch/s390/kvm/vsie.c 		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
kvm               792 arch/s390/kvm/vsie.c 		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
kvm               822 arch/s390/kvm/vsie.c 		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
kvm               843 arch/s390/kvm/vsie.c 		unpin_guest_page(vcpu->kvm, gpa, hpa);
kvm               859 arch/s390/kvm/vsie.c 	rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
kvm               974 arch/s390/kvm/vsie.c 	if (fac && test_kvm_facility(vcpu->kvm, 7)) {
kvm               993 arch/s390/kvm/vsie.c 	__releases(vcpu->kvm->srcu)
kvm               994 arch/s390/kvm/vsie.c 	__acquires(vcpu->kvm->srcu)
kvm              1008 arch/s390/kvm/vsie.c 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
kvm              1019 arch/s390/kvm/vsie.c 	if (test_kvm_facility(vcpu->kvm, 82) &&
kvm              1048 arch/s390/kvm/vsie.c 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm              1097 arch/s390/kvm/vsie.c 	edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8);
kvm              1098 arch/s390/kvm/vsie.c 	edat += edat && test_kvm_facility(vcpu->kvm, 78);
kvm              1113 arch/s390/kvm/vsie.c 	gmap->private = vcpu->kvm;
kvm              1137 arch/s390/kvm/vsie.c 	scb_s->epoch += vcpu->kvm->arch.epoch;
kvm              1140 arch/s390/kvm/vsie.c 		scb_s->epdx += vcpu->kvm->arch.epdx;
kvm              1141 arch/s390/kvm/vsie.c 		if (scb_s->epoch < vcpu->kvm->arch.epoch)
kvm              1217 arch/s390/kvm/vsie.c static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
kvm              1224 arch/s390/kvm/vsie.c 	page = radix_tree_lookup(&kvm->arch.vsie.addr_to_page, addr >> 9);
kvm              1236 arch/s390/kvm/vsie.c 	nr_vcpus = atomic_read(&kvm->online_vcpus);
kvm              1238 arch/s390/kvm/vsie.c 	mutex_lock(&kvm->arch.vsie.mutex);
kvm              1239 arch/s390/kvm/vsie.c 	if (kvm->arch.vsie.page_count < nr_vcpus) {
kvm              1242 arch/s390/kvm/vsie.c 			mutex_unlock(&kvm->arch.vsie.mutex);
kvm              1246 arch/s390/kvm/vsie.c 		kvm->arch.vsie.pages[kvm->arch.vsie.page_count] = page;
kvm              1247 arch/s390/kvm/vsie.c 		kvm->arch.vsie.page_count++;
kvm              1251 arch/s390/kvm/vsie.c 			page = kvm->arch.vsie.pages[kvm->arch.vsie.next];
kvm              1255 arch/s390/kvm/vsie.c 			kvm->arch.vsie.next++;
kvm              1256 arch/s390/kvm/vsie.c 			kvm->arch.vsie.next %= nr_vcpus;
kvm              1258 arch/s390/kvm/vsie.c 		radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
kvm              1262 arch/s390/kvm/vsie.c 	if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9, page)) {
kvm              1264 arch/s390/kvm/vsie.c 		mutex_unlock(&kvm->arch.vsie.mutex);
kvm              1267 arch/s390/kvm/vsie.c 	mutex_unlock(&kvm->arch.vsie.mutex);
kvm              1278 arch/s390/kvm/vsie.c static void put_vsie_page(struct kvm *kvm, struct vsie_page *vsie_page)
kvm              1292 arch/s390/kvm/vsie.c 	if (!test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIEF2))
kvm              1308 arch/s390/kvm/vsie.c 	vsie_page = get_vsie_page(vcpu->kvm, scb_addr);
kvm              1333 arch/s390/kvm/vsie.c 	put_vsie_page(vcpu->kvm, vsie_page);
kvm              1339 arch/s390/kvm/vsie.c void kvm_s390_vsie_init(struct kvm *kvm)
kvm              1341 arch/s390/kvm/vsie.c 	mutex_init(&kvm->arch.vsie.mutex);
kvm              1342 arch/s390/kvm/vsie.c 	INIT_RADIX_TREE(&kvm->arch.vsie.addr_to_page, GFP_KERNEL);
kvm              1346 arch/s390/kvm/vsie.c void kvm_s390_vsie_destroy(struct kvm *kvm)
kvm              1352 arch/s390/kvm/vsie.c 	mutex_lock(&kvm->arch.vsie.mutex);
kvm              1353 arch/s390/kvm/vsie.c 	for (i = 0; i < kvm->arch.vsie.page_count; i++) {
kvm              1354 arch/s390/kvm/vsie.c 		page = kvm->arch.vsie.pages[i];
kvm              1355 arch/s390/kvm/vsie.c 		kvm->arch.vsie.pages[i] = NULL;
kvm              1359 arch/s390/kvm/vsie.c 		radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
kvm              1362 arch/s390/kvm/vsie.c 	kvm->arch.vsie.page_count = 0;
kvm              1363 arch/s390/kvm/vsie.c 	mutex_unlock(&kvm->arch.vsie.mutex);
kvm              1020 arch/x86/include/asm/kvm_host.h 	struct kvm *(*vm_alloc)(void);
kvm              1021 arch/x86/include/asm/kvm_host.h 	void (*vm_free)(struct kvm *);
kvm              1022 arch/x86/include/asm/kvm_host.h 	int (*vm_init)(struct kvm *kvm);
kvm              1023 arch/x86/include/asm/kvm_host.h 	void (*vm_destroy)(struct kvm *kvm);
kvm              1026 arch/x86/include/asm/kvm_host.h 	struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
kvm              1064 arch/x86/include/asm/kvm_host.h 	int  (*tlb_remote_flush)(struct kvm *kvm);
kvm              1065 arch/x86/include/asm/kvm_host.h 	int  (*tlb_remote_flush_with_range)(struct kvm *kvm,
kvm              1104 arch/x86/include/asm/kvm_host.h 	int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
kvm              1105 arch/x86/include/asm/kvm_host.h 	int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
kvm              1137 arch/x86/include/asm/kvm_host.h 	void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
kvm              1155 arch/x86/include/asm/kvm_host.h 	void (*slot_enable_log_dirty)(struct kvm *kvm,
kvm              1157 arch/x86/include/asm/kvm_host.h 	void (*slot_disable_log_dirty)(struct kvm *kvm,
kvm              1159 arch/x86/include/asm/kvm_host.h 	void (*flush_log_dirty)(struct kvm *kvm);
kvm              1160 arch/x86/include/asm/kvm_host.h 	void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
kvm              1183 arch/x86/include/asm/kvm_host.h 	int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
kvm              1207 arch/x86/include/asm/kvm_host.h 	int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
kvm              1208 arch/x86/include/asm/kvm_host.h 	int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
kvm              1209 arch/x86/include/asm/kvm_host.h 	int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
kvm              1234 arch/x86/include/asm/kvm_host.h static inline struct kvm *kvm_arch_alloc_vm(void)
kvm              1239 arch/x86/include/asm/kvm_host.h static inline void kvm_arch_free_vm(struct kvm *kvm)
kvm              1241 arch/x86/include/asm/kvm_host.h 	return kvm_x86_ops->vm_free(kvm);
kvm              1245 arch/x86/include/asm/kvm_host.h static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
kvm              1248 arch/x86/include/asm/kvm_host.h 	    !kvm_x86_ops->tlb_remote_flush(kvm))
kvm              1259 arch/x86/include/asm/kvm_host.h void kvm_mmu_init_vm(struct kvm *kvm);
kvm              1260 arch/x86/include/asm/kvm_host.h void kvm_mmu_uninit_vm(struct kvm *kvm);
kvm              1266 arch/x86/include/asm/kvm_host.h void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
kvm              1268 arch/x86/include/asm/kvm_host.h void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
kvm              1270 arch/x86/include/asm/kvm_host.h void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
kvm              1272 arch/x86/include/asm/kvm_host.h void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
kvm              1274 arch/x86/include/asm/kvm_host.h void kvm_mmu_slot_set_dirty(struct kvm *kvm,
kvm              1276 arch/x86/include/asm/kvm_host.h void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
kvm              1279 arch/x86/include/asm/kvm_host.h void kvm_mmu_zap_all(struct kvm *kvm);
kvm              1280 arch/x86/include/asm/kvm_host.h void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
kvm              1281 arch/x86/include/asm/kvm_host.h unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
kvm              1282 arch/x86/include/asm/kvm_host.h void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages);
kvm              1296 arch/x86/include/asm/kvm_host.h void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
kvm              1298 arch/x86/include/asm/kvm_host.h void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
kvm              1300 arch/x86/include/asm/kvm_host.h void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
kvm              1432 arch/x86/include/asm/kvm_host.h int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
kvm              1536 arch/x86/include/asm/kvm_host.h #define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)
kvm              1556 arch/x86/include/asm/kvm_host.h int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
kvm              1557 arch/x86/include/asm/kvm_host.h int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
kvm              1558 arch/x86/include/asm/kvm_host.h int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
kvm              1559 arch/x86/include/asm/kvm_host.h int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
kvm              1567 arch/x86/include/asm/kvm_host.h int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
kvm              1580 arch/x86/include/asm/kvm_host.h void kvm_make_mclock_inprogress_request(struct kvm *kvm);
kvm              1581 arch/x86/include/asm/kvm_host.h void kvm_make_scan_ioapic_request(struct kvm *kvm);
kvm              1598 arch/x86/include/asm/kvm_host.h int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
kvm              1599 arch/x86/include/asm/kvm_host.h int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
kvm              1603 arch/x86/include/asm/kvm_host.h bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq,
kvm              1606 arch/x86/include/asm/kvm_host.h void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
kvm                45 arch/x86/include/asm/kvm_page_track.h 	void (*track_flush_slot)(struct kvm *kvm, struct kvm_memory_slot *slot,
kvm                49 arch/x86/include/asm/kvm_page_track.h void kvm_page_track_init(struct kvm *kvm);
kvm                50 arch/x86/include/asm/kvm_page_track.h void kvm_page_track_cleanup(struct kvm *kvm);
kvm                57 arch/x86/include/asm/kvm_page_track.h void kvm_slot_page_track_add_page(struct kvm *kvm,
kvm                60 arch/x86/include/asm/kvm_page_track.h void kvm_slot_page_track_remove_page(struct kvm *kvm,
kvm                67 arch/x86/include/asm/kvm_page_track.h kvm_page_track_register_notifier(struct kvm *kvm,
kvm                70 arch/x86/include/asm/kvm_page_track.h kvm_page_track_unregister_notifier(struct kvm *kvm,
kvm                74 arch/x86/include/asm/kvm_page_track.h void kvm_page_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot);
kvm               133 arch/x86/kvm/cpuid.c 	if (kvm_hlt_in_guest(vcpu->kvm) && best &&
kvm               137 arch/x86/kvm/cpuid.c 	if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) {
kvm               133 arch/x86/kvm/hyperv.c static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
kvm               141 arch/x86/kvm/hyperv.c 	vcpu = kvm_get_vcpu(kvm, vpidx);
kvm               144 arch/x86/kvm/hyperv.c 	kvm_for_each_vcpu(i, vcpu, kvm)
kvm               150 arch/x86/kvm/hyperv.c static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx)
kvm               155 arch/x86/kvm/hyperv.c 	vcpu = get_vcpu_by_vpidx(kvm, vpidx);
kvm               164 arch/x86/kvm/hyperv.c 	struct kvm *kvm = vcpu->kvm;
kvm               181 arch/x86/kvm/hyperv.c 	idx = srcu_read_lock(&kvm->irq_srcu);
kvm               184 arch/x86/kvm/hyperv.c 		kvm_notify_acked_gsi(kvm, gsi);
kvm               185 arch/x86/kvm/hyperv.c 	srcu_read_unlock(&kvm->irq_srcu, idx);
kvm               230 arch/x86/kvm/hyperv.c 			if (kvm_clear_guest(vcpu->kvm,
kvm               242 arch/x86/kvm/hyperv.c 			if (kvm_clear_guest(vcpu->kvm,
kvm               323 arch/x86/kvm/hyperv.c 	ret = kvm_irq_delivery_to_apic(vcpu->kvm, vcpu->arch.apic, &irq, NULL);
kvm               328 arch/x86/kvm/hyperv.c int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vpidx, u32 sint)
kvm               332 arch/x86/kvm/hyperv.c 	synic = synic_get(kvm, vpidx);
kvm               351 arch/x86/kvm/hyperv.c static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vpidx, u32 sint, int gsi)
kvm               355 arch/x86/kvm/hyperv.c 	synic = synic_get(kvm, vpidx);
kvm               366 arch/x86/kvm/hyperv.c void kvm_hv_irq_routing_update(struct kvm *kvm)
kvm               372 arch/x86/kvm/hyperv.c 	irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
kvm               373 arch/x86/kvm/hyperv.c 					lockdep_is_held(&kvm->irq_lock));
kvm               378 arch/x86/kvm/hyperv.c 				kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu,
kvm               396 arch/x86/kvm/hyperv.c static u64 get_time_ref_counter(struct kvm *kvm)
kvm               398 arch/x86/kvm/hyperv.c 	struct kvm_hv *hv = &kvm->arch.hyperv;
kvm               407 arch/x86/kvm/hyperv.c 		return div_u64(get_kvmclock_ns(kvm), 100);
kvm               409 arch/x86/kvm/hyperv.c 	vcpu = kvm_get_vcpu(kvm, 0);
kvm               463 arch/x86/kvm/hyperv.c 	time_now = get_time_ref_counter(stimer_to_vcpu(stimer)->kvm);
kvm               635 arch/x86/kvm/hyperv.c 	payload->delivery_time = get_time_ref_counter(vcpu->kvm);
kvm               687 arch/x86/kvm/hyperv.c 						get_time_ref_counter(vcpu->kvm);
kvm               724 arch/x86/kvm/hyperv.c 	return !kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data,
kvm               811 arch/x86/kvm/hyperv.c 	struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
kvm               823 arch/x86/kvm/hyperv.c 	struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
kvm               831 arch/x86/kvm/hyperv.c 	struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
kvm               855 arch/x86/kvm/hyperv.c 	struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
kvm               934 arch/x86/kvm/hyperv.c void kvm_hv_setup_tsc_page(struct kvm *kvm,
kvm               937 arch/x86/kvm/hyperv.c 	struct kvm_hv *hv = &kvm->arch.hyperv;
kvm               947 arch/x86/kvm/hyperv.c 	mutex_lock(&kvm->arch.hyperv.hv_lock);
kvm               956 arch/x86/kvm/hyperv.c 	if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
kvm               965 arch/x86/kvm/hyperv.c 	if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
kvm               974 arch/x86/kvm/hyperv.c 	if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
kvm               988 arch/x86/kvm/hyperv.c 	kvm_write_guest(kvm, gfn_to_gpa(gfn),
kvm               991 arch/x86/kvm/hyperv.c 	mutex_unlock(&kvm->arch.hyperv.hv_lock);
kvm               997 arch/x86/kvm/hyperv.c 	struct kvm *kvm = vcpu->kvm;
kvm               998 arch/x86/kvm/hyperv.c 	struct kvm_hv *hv = &kvm->arch.hyperv;
kvm              1020 arch/x86/kvm/hyperv.c 		addr = gfn_to_hva(kvm, gfn);
kvm              1028 arch/x86/kvm/hyperv.c 		mark_page_dirty(kvm, gfn);
kvm              1086 arch/x86/kvm/hyperv.c 		struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
kvm              1194 arch/x86/kvm/hyperv.c 	struct kvm *kvm = vcpu->kvm;
kvm              1195 arch/x86/kvm/hyperv.c 	struct kvm_hv *hv = &kvm->arch.hyperv;
kvm              1205 arch/x86/kvm/hyperv.c 		data = get_time_ref_counter(kvm);
kvm              1303 arch/x86/kvm/hyperv.c 		mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
kvm              1305 arch/x86/kvm/hyperv.c 		mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
kvm              1316 arch/x86/kvm/hyperv.c 		mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
kvm              1318 arch/x86/kvm/hyperv.c 		mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
kvm              1325 arch/x86/kvm/hyperv.c 	struct kvm *kvm, u64 *sparse_banks, u64 valid_bank_mask,
kvm              1328 arch/x86/kvm/hyperv.c 	struct kvm_hv *hv = &kvm->arch.hyperv;
kvm              1344 arch/x86/kvm/hyperv.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
kvm              1355 arch/x86/kvm/hyperv.c 	struct kvm *kvm = current_vcpu->kvm;
kvm              1368 arch/x86/kvm/hyperv.c 		if (unlikely(kvm_read_guest(kvm, ingpa, &flush, sizeof(flush))))
kvm              1387 arch/x86/kvm/hyperv.c 		if (unlikely(kvm_read_guest(kvm, ingpa, &flush_ex,
kvm              1408 arch/x86/kvm/hyperv.c 		    kvm_read_guest(kvm,
kvm              1419 arch/x86/kvm/hyperv.c 		sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask,
kvm              1426 arch/x86/kvm/hyperv.c 	kvm_make_vcpus_request_mask(kvm,
kvm              1436 arch/x86/kvm/hyperv.c static void kvm_send_ipi_to_many(struct kvm *kvm, u32 vector,
kvm              1446 arch/x86/kvm/hyperv.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
kvm              1458 arch/x86/kvm/hyperv.c 	struct kvm *kvm = current_vcpu->kvm;
kvm              1472 arch/x86/kvm/hyperv.c 			if (unlikely(kvm_read_guest(kvm, ingpa, &send_ipi,
kvm              1489 arch/x86/kvm/hyperv.c 		if (unlikely(kvm_read_guest(kvm, ingpa, &send_ipi_ex,
kvm              1508 arch/x86/kvm/hyperv.c 		    kvm_read_guest(kvm,
kvm              1520 arch/x86/kvm/hyperv.c 		sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask,
kvm              1523 arch/x86/kvm/hyperv.c 	kvm_send_ipi_to_many(kvm, vector, vcpu_mask);
kvm              1529 arch/x86/kvm/hyperv.c bool kvm_hv_hypercall_enabled(struct kvm *kvm)
kvm              1531 arch/x86/kvm/hyperv.c 	return READ_ONCE(kvm->arch.hyperv.hv_hypercall) & HV_X64_MSR_HYPERCALL_ENABLE;
kvm              1589 arch/x86/kvm/hyperv.c 	eventfd = idr_find(&vcpu->kvm->arch.hyperv.conn_to_evt, param);
kvm              1718 arch/x86/kvm/hyperv.c void kvm_hv_init_vm(struct kvm *kvm)
kvm              1720 arch/x86/kvm/hyperv.c 	mutex_init(&kvm->arch.hyperv.hv_lock);
kvm              1721 arch/x86/kvm/hyperv.c 	idr_init(&kvm->arch.hyperv.conn_to_evt);
kvm              1724 arch/x86/kvm/hyperv.c void kvm_hv_destroy_vm(struct kvm *kvm)
kvm              1729 arch/x86/kvm/hyperv.c 	idr_for_each_entry(&kvm->arch.hyperv.conn_to_evt, eventfd, i)
kvm              1731 arch/x86/kvm/hyperv.c 	idr_destroy(&kvm->arch.hyperv.conn_to_evt);
kvm              1734 arch/x86/kvm/hyperv.c static int kvm_hv_eventfd_assign(struct kvm *kvm, u32 conn_id, int fd)
kvm              1736 arch/x86/kvm/hyperv.c 	struct kvm_hv *hv = &kvm->arch.hyperv;
kvm              1758 arch/x86/kvm/hyperv.c static int kvm_hv_eventfd_deassign(struct kvm *kvm, u32 conn_id)
kvm              1760 arch/x86/kvm/hyperv.c 	struct kvm_hv *hv = &kvm->arch.hyperv;
kvm              1770 arch/x86/kvm/hyperv.c 	synchronize_srcu(&kvm->srcu);
kvm              1775 arch/x86/kvm/hyperv.c int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args)
kvm              1782 arch/x86/kvm/hyperv.c 		return kvm_hv_eventfd_deassign(kvm, args->conn_id);
kvm              1783 arch/x86/kvm/hyperv.c 	return kvm_hv_eventfd_assign(kvm, args->conn_id, args->fd);
kvm                52 arch/x86/kvm/hyperv.h bool kvm_hv_hypercall_enabled(struct kvm *kvm);
kvm                55 arch/x86/kvm/hyperv.h void kvm_hv_irq_routing_update(struct kvm *kvm);
kvm                56 arch/x86/kvm/hyperv.h int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vcpu_id, u32 sint);
kvm                91 arch/x86/kvm/hyperv.h void kvm_hv_setup_tsc_page(struct kvm *kvm,
kvm                94 arch/x86/kvm/hyperv.h void kvm_hv_init_vm(struct kvm *kvm);
kvm                95 arch/x86/kvm/hyperv.h void kvm_hv_destroy_vm(struct kvm *kvm);
kvm                96 arch/x86/kvm/hyperv.h int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args);
kvm               220 arch/x86/kvm/i8254.c 	struct kvm_pit *pit = vcpu->kvm->arch.vpit;
kvm               242 arch/x86/kvm/i8254.c 	struct kvm *kvm = pit->kvm;
kvm               250 arch/x86/kvm/i8254.c 	kvm_set_irq(kvm, pit->irq_source_id, 0, 1, false);
kvm               251 arch/x86/kvm/i8254.c 	kvm_set_irq(kvm, pit->irq_source_id, 0, 0, false);
kvm               262 arch/x86/kvm/i8254.c 	if (atomic_read(&kvm->arch.vapics_in_nmi_mode) > 0)
kvm               263 arch/x86/kvm/i8254.c 		kvm_for_each_vcpu(i, vcpu, kvm)
kvm               293 arch/x86/kvm/i8254.c 	struct kvm *kvm = pit->kvm;
kvm               301 arch/x86/kvm/i8254.c 		kvm_register_irq_ack_notifier(kvm, &ps->irq_ack_notifier);
kvm               302 arch/x86/kvm/i8254.c 		kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
kvm               304 arch/x86/kvm/i8254.c 		kvm_unregister_irq_ack_notifier(kvm, &ps->irq_ack_notifier);
kvm               305 arch/x86/kvm/i8254.c 		kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
kvm               314 arch/x86/kvm/i8254.c 	struct kvm *kvm = pit->kvm;
kvm               317 arch/x86/kvm/i8254.c 	if (!ioapic_in_kernel(kvm) ||
kvm               648 arch/x86/kvm/i8254.c struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
kvm               660 arch/x86/kvm/i8254.c 	pit->irq_source_id = kvm_request_irq_source_id(kvm);
kvm               676 arch/x86/kvm/i8254.c 	pit->kvm = kvm;
kvm               690 arch/x86/kvm/i8254.c 	mutex_lock(&kvm->slots_lock);
kvm               692 arch/x86/kvm/i8254.c 	ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, KVM_PIT_BASE_ADDRESS,
kvm               699 arch/x86/kvm/i8254.c 		ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS,
kvm               705 arch/x86/kvm/i8254.c 	mutex_unlock(&kvm->slots_lock);
kvm               710 arch/x86/kvm/i8254.c 	kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->dev);
kvm               712 arch/x86/kvm/i8254.c 	mutex_unlock(&kvm->slots_lock);
kvm               716 arch/x86/kvm/i8254.c 	kvm_free_irq_source_id(kvm, pit->irq_source_id);
kvm               722 arch/x86/kvm/i8254.c void kvm_free_pit(struct kvm *kvm)
kvm               724 arch/x86/kvm/i8254.c 	struct kvm_pit *pit = kvm->arch.vpit;
kvm               727 arch/x86/kvm/i8254.c 		mutex_lock(&kvm->slots_lock);
kvm               728 arch/x86/kvm/i8254.c 		kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->dev);
kvm               729 arch/x86/kvm/i8254.c 		kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->speaker_dev);
kvm               730 arch/x86/kvm/i8254.c 		mutex_unlock(&kvm->slots_lock);
kvm               734 arch/x86/kvm/i8254.c 		kvm_free_irq_source_id(kvm, pit->irq_source_id);
kvm                44 arch/x86/kvm/i8254.h 	struct kvm *kvm;
kvm                59 arch/x86/kvm/i8254.h struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags);
kvm                60 arch/x86/kvm/i8254.h void kvm_free_pit(struct kvm *kvm);
kvm                40 arch/x86/kvm/i8259.c static void pic_irq_request(struct kvm *kvm, int level);
kvm                60 arch/x86/kvm/i8259.c 		kvm_for_each_vcpu(i, vcpu, s->kvm) {
kvm                82 arch/x86/kvm/i8259.c 	kvm_notify_acked_irq(s->pics_state->kvm, SELECT_PIC(irq), irq);
kvm               176 arch/x86/kvm/i8259.c 	pic_irq_request(s->kvm, irq >= 0);
kvm               234 arch/x86/kvm/i8259.c int kvm_pic_read_irq(struct kvm *kvm)
kvm               237 arch/x86/kvm/i8259.c 	struct kvm_pic *s = kvm->arch.vpic;
kvm               290 arch/x86/kvm/i8259.c 	kvm_for_each_vcpu(i, vcpu, s->pics_state->kvm)
kvm               373 arch/x86/kvm/i8259.c 						s->pics_state->kvm,
kvm               561 arch/x86/kvm/i8259.c static void pic_irq_request(struct kvm *kvm, int level)
kvm               563 arch/x86/kvm/i8259.c 	struct kvm_pic *s = kvm->arch.vpic;
kvm               585 arch/x86/kvm/i8259.c int kvm_pic_init(struct kvm *kvm)
kvm               594 arch/x86/kvm/i8259.c 	s->kvm = kvm;
kvm               606 arch/x86/kvm/i8259.c 	mutex_lock(&kvm->slots_lock);
kvm               607 arch/x86/kvm/i8259.c 	ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, 0x20, 2,
kvm               612 arch/x86/kvm/i8259.c 	ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, 0xa0, 2, &s->dev_slave);
kvm               616 arch/x86/kvm/i8259.c 	ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, 0x4d0, 2, &s->dev_eclr);
kvm               620 arch/x86/kvm/i8259.c 	mutex_unlock(&kvm->slots_lock);
kvm               622 arch/x86/kvm/i8259.c 	kvm->arch.vpic = s;
kvm               627 arch/x86/kvm/i8259.c 	kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &s->dev_slave);
kvm               630 arch/x86/kvm/i8259.c 	kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &s->dev_master);
kvm               633 arch/x86/kvm/i8259.c 	mutex_unlock(&kvm->slots_lock);
kvm               640 arch/x86/kvm/i8259.c void kvm_pic_destroy(struct kvm *kvm)
kvm               642 arch/x86/kvm/i8259.c 	struct kvm_pic *vpic = kvm->arch.vpic;
kvm               647 arch/x86/kvm/i8259.c 	mutex_lock(&kvm->slots_lock);
kvm               648 arch/x86/kvm/i8259.c 	kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_master);
kvm               649 arch/x86/kvm/i8259.c 	kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_slave);
kvm               650 arch/x86/kvm/i8259.c 	kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_eclr);
kvm               651 arch/x86/kvm/i8259.c 	mutex_unlock(&kvm->slots_lock);
kvm               653 arch/x86/kvm/i8259.c 	kvm->arch.vpic = NULL;
kvm               108 arch/x86/kvm/ioapic.c 	struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
kvm               136 arch/x86/kvm/ioapic.c 	struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
kvm               152 arch/x86/kvm/ioapic.c 	kvm_for_each_vcpu(i, vcpu, ioapic->kvm)
kvm               238 arch/x86/kvm/ioapic.c 	struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
kvm               253 arch/x86/kvm/ioapic.c 		    kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index) ||
kvm               265 arch/x86/kvm/ioapic.c void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
kvm               267 arch/x86/kvm/ioapic.c 	if (!ioapic_in_kernel(kvm))
kvm               269 arch/x86/kvm/ioapic.c 	kvm_make_scan_ioapic_request(kvm);
kvm               323 arch/x86/kvm/ioapic.c 			kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after);
kvm               327 arch/x86/kvm/ioapic.c 		kvm_make_scan_ioapic_request(ioapic->kvm);
kvm               363 arch/x86/kvm/ioapic.c 		ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe,
kvm               367 arch/x86/kvm/ioapic.c 		ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL);
kvm               449 arch/x86/kvm/ioapic.c 		kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i);
kvm               483 arch/x86/kvm/ioapic.c 	struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
kvm               608 arch/x86/kvm/ioapic.c int kvm_ioapic_init(struct kvm *kvm)
kvm               618 arch/x86/kvm/ioapic.c 	kvm->arch.vioapic = ioapic;
kvm               621 arch/x86/kvm/ioapic.c 	ioapic->kvm = kvm;
kvm               622 arch/x86/kvm/ioapic.c 	mutex_lock(&kvm->slots_lock);
kvm               623 arch/x86/kvm/ioapic.c 	ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, ioapic->base_address,
kvm               625 arch/x86/kvm/ioapic.c 	mutex_unlock(&kvm->slots_lock);
kvm               627 arch/x86/kvm/ioapic.c 		kvm->arch.vioapic = NULL;
kvm               634 arch/x86/kvm/ioapic.c void kvm_ioapic_destroy(struct kvm *kvm)
kvm               636 arch/x86/kvm/ioapic.c 	struct kvm_ioapic *ioapic = kvm->arch.vioapic;
kvm               642 arch/x86/kvm/ioapic.c 	mutex_lock(&kvm->slots_lock);
kvm               643 arch/x86/kvm/ioapic.c 	kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
kvm               644 arch/x86/kvm/ioapic.c 	mutex_unlock(&kvm->slots_lock);
kvm               645 arch/x86/kvm/ioapic.c 	kvm->arch.vioapic = NULL;
kvm               649 arch/x86/kvm/ioapic.c void kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
kvm               651 arch/x86/kvm/ioapic.c 	struct kvm_ioapic *ioapic = kvm->arch.vioapic;
kvm               659 arch/x86/kvm/ioapic.c void kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
kvm               661 arch/x86/kvm/ioapic.c 	struct kvm_ioapic *ioapic = kvm->arch.vioapic;
kvm               667 arch/x86/kvm/ioapic.c 	kvm_make_scan_ioapic_request(kvm);
kvm                 9 arch/x86/kvm/ioapic.h struct kvm;
kvm                87 arch/x86/kvm/ioapic.h 	struct kvm *kvm;
kvm               109 arch/x86/kvm/ioapic.h static inline int ioapic_in_kernel(struct kvm *kvm)
kvm               111 arch/x86/kvm/ioapic.h 	int mode = kvm->arch.irqchip_mode;
kvm               124 arch/x86/kvm/ioapic.h int kvm_ioapic_init(struct kvm *kvm);
kvm               125 arch/x86/kvm/ioapic.h void kvm_ioapic_destroy(struct kvm *kvm);
kvm               129 arch/x86/kvm/ioapic.h int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
kvm               132 arch/x86/kvm/ioapic.h void kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
kvm               133 arch/x86/kvm/ioapic.h void kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
kvm                48 arch/x86/kvm/irq.c 		if (irqchip_split(v->kvm))
kvm                51 arch/x86/kvm/irq.c 			return v->kvm->arch.vpic->output;
kvm               121 arch/x86/kvm/irq.c 		if (irqchip_split(v->kvm)) {
kvm               127 arch/x86/kvm/irq.c 			return kvm_pic_read_irq(v->kvm); /* PIC */
kvm               164 arch/x86/kvm/irq.c bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
kvm               168 arch/x86/kvm/irq.c 	return resample ? irqchip_kernel(kvm) : irqchip_in_kernel(kvm);
kvm                26 arch/x86/kvm/irq.h struct kvm;
kvm                54 arch/x86/kvm/irq.h 	struct kvm *kvm;
kvm                64 arch/x86/kvm/irq.h int kvm_pic_init(struct kvm *kvm);
kvm                65 arch/x86/kvm/irq.h void kvm_pic_destroy(struct kvm *kvm);
kvm                66 arch/x86/kvm/irq.h int kvm_pic_read_irq(struct kvm *kvm);
kvm                69 arch/x86/kvm/irq.h static inline int pic_in_kernel(struct kvm *kvm)
kvm                71 arch/x86/kvm/irq.h 	int mode = kvm->arch.irqchip_mode;
kvm                78 arch/x86/kvm/irq.h static inline int irqchip_split(struct kvm *kvm)
kvm                80 arch/x86/kvm/irq.h 	int mode = kvm->arch.irqchip_mode;
kvm                87 arch/x86/kvm/irq.h static inline int irqchip_kernel(struct kvm *kvm)
kvm                89 arch/x86/kvm/irq.h 	int mode = kvm->arch.irqchip_mode;
kvm                96 arch/x86/kvm/irq.h static inline int irqchip_in_kernel(struct kvm *kvm)
kvm                98 arch/x86/kvm/irq.h 	int mode = kvm->arch.irqchip_mode;
kvm               114 arch/x86/kvm/irq.h int kvm_setup_default_irq_routing(struct kvm *kvm);
kvm               115 arch/x86/kvm/irq.h int kvm_setup_empty_irq_routing(struct kvm *kvm);
kvm                31 arch/x86/kvm/irq_comm.c 			   struct kvm *kvm, int irq_source_id, int level,
kvm                34 arch/x86/kvm/irq_comm.c 	struct kvm_pic *pic = kvm->arch.vpic;
kvm                39 arch/x86/kvm/irq_comm.c 			      struct kvm *kvm, int irq_source_id, int level,
kvm                42 arch/x86/kvm/irq_comm.c 	struct kvm_ioapic *ioapic = kvm->arch.vioapic;
kvm                47 arch/x86/kvm/irq_comm.c int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
kvm                61 arch/x86/kvm/irq_comm.c 	if (kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r, dest_map))
kvm                66 arch/x86/kvm/irq_comm.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
kvm                95 arch/x86/kvm/irq_comm.c 		lowest = kvm_get_vcpu(kvm, idx);
kvm               104 arch/x86/kvm/irq_comm.c void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
kvm               107 arch/x86/kvm/irq_comm.c 	trace_kvm_msi_set_irq(e->msi.address_lo | (kvm->arch.x2apic_format ?
kvm               113 arch/x86/kvm/irq_comm.c 	if (kvm->arch.x2apic_format)
kvm               127 arch/x86/kvm/irq_comm.c static inline bool kvm_msi_route_invalid(struct kvm *kvm,
kvm               130 arch/x86/kvm/irq_comm.c 	return kvm->arch.x2apic_format && (e->msi.address_hi & 0xff);
kvm               134 arch/x86/kvm/irq_comm.c 		struct kvm *kvm, int irq_source_id, int level, bool line_status)
kvm               138 arch/x86/kvm/irq_comm.c 	if (kvm_msi_route_invalid(kvm, e))
kvm               144 arch/x86/kvm/irq_comm.c 	kvm_set_msi_irq(kvm, e, &irq);
kvm               146 arch/x86/kvm/irq_comm.c 	return kvm_irq_delivery_to_apic(kvm, NULL, &irq, NULL);
kvm               151 arch/x86/kvm/irq_comm.c 		    struct kvm *kvm, int irq_source_id, int level,
kvm               157 arch/x86/kvm/irq_comm.c 	return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint);
kvm               161 arch/x86/kvm/irq_comm.c 			      struct kvm *kvm, int irq_source_id, int level,
kvm               169 arch/x86/kvm/irq_comm.c 		return kvm_hv_set_sint(e, kvm, irq_source_id, level,
kvm               173 arch/x86/kvm/irq_comm.c 		if (kvm_msi_route_invalid(kvm, e))
kvm               176 arch/x86/kvm/irq_comm.c 		kvm_set_msi_irq(kvm, e, &irq);
kvm               178 arch/x86/kvm/irq_comm.c 		if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL))
kvm               189 arch/x86/kvm/irq_comm.c int kvm_request_irq_source_id(struct kvm *kvm)
kvm               191 arch/x86/kvm/irq_comm.c 	unsigned long *bitmap = &kvm->arch.irq_sources_bitmap;
kvm               194 arch/x86/kvm/irq_comm.c 	mutex_lock(&kvm->irq_lock);
kvm               207 arch/x86/kvm/irq_comm.c 	mutex_unlock(&kvm->irq_lock);
kvm               212 arch/x86/kvm/irq_comm.c void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
kvm               217 arch/x86/kvm/irq_comm.c 	mutex_lock(&kvm->irq_lock);
kvm               223 arch/x86/kvm/irq_comm.c 	clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap);
kvm               224 arch/x86/kvm/irq_comm.c 	if (!irqchip_kernel(kvm))
kvm               227 arch/x86/kvm/irq_comm.c 	kvm_ioapic_clear_all(kvm->arch.vioapic, irq_source_id);
kvm               228 arch/x86/kvm/irq_comm.c 	kvm_pic_clear_all(kvm->arch.vpic, irq_source_id);
kvm               230 arch/x86/kvm/irq_comm.c 	mutex_unlock(&kvm->irq_lock);
kvm               233 arch/x86/kvm/irq_comm.c void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
kvm               236 arch/x86/kvm/irq_comm.c 	mutex_lock(&kvm->irq_lock);
kvm               238 arch/x86/kvm/irq_comm.c 	hlist_add_head_rcu(&kimn->link, &kvm->arch.mask_notifier_list);
kvm               239 arch/x86/kvm/irq_comm.c 	mutex_unlock(&kvm->irq_lock);
kvm               242 arch/x86/kvm/irq_comm.c void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
kvm               245 arch/x86/kvm/irq_comm.c 	mutex_lock(&kvm->irq_lock);
kvm               247 arch/x86/kvm/irq_comm.c 	mutex_unlock(&kvm->irq_lock);
kvm               248 arch/x86/kvm/irq_comm.c 	synchronize_srcu(&kvm->irq_srcu);
kvm               251 arch/x86/kvm/irq_comm.c void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
kvm               257 arch/x86/kvm/irq_comm.c 	idx = srcu_read_lock(&kvm->irq_srcu);
kvm               258 arch/x86/kvm/irq_comm.c 	gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
kvm               260 arch/x86/kvm/irq_comm.c 		hlist_for_each_entry_rcu(kimn, &kvm->arch.mask_notifier_list, link)
kvm               263 arch/x86/kvm/irq_comm.c 	srcu_read_unlock(&kvm->irq_srcu, idx);
kvm               266 arch/x86/kvm/irq_comm.c bool kvm_arch_can_set_irq_routing(struct kvm *kvm)
kvm               268 arch/x86/kvm/irq_comm.c 	return irqchip_in_kernel(kvm);
kvm               271 arch/x86/kvm/irq_comm.c int kvm_set_routing_entry(struct kvm *kvm,
kvm               281 arch/x86/kvm/irq_comm.c 		if (irqchip_split(kvm))
kvm               309 arch/x86/kvm/irq_comm.c 		if (kvm_msi_route_invalid(kvm, e))
kvm               324 arch/x86/kvm/irq_comm.c bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq,
kvm               330 arch/x86/kvm/irq_comm.c 	if (kvm_intr_is_single_vcpu_fast(kvm, irq, dest_vcpu))
kvm               333 arch/x86/kvm/irq_comm.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
kvm               377 arch/x86/kvm/irq_comm.c int kvm_setup_default_irq_routing(struct kvm *kvm)
kvm               379 arch/x86/kvm/irq_comm.c 	return kvm_set_irq_routing(kvm, default_routing,
kvm               385 arch/x86/kvm/irq_comm.c int kvm_setup_empty_irq_routing(struct kvm *kvm)
kvm               387 arch/x86/kvm/irq_comm.c 	return kvm_set_irq_routing(kvm, empty_routing, 0, 0);
kvm               390 arch/x86/kvm/irq_comm.c void kvm_arch_post_irq_routing_update(struct kvm *kvm)
kvm               392 arch/x86/kvm/irq_comm.c 	if (!irqchip_split(kvm))
kvm               394 arch/x86/kvm/irq_comm.c 	kvm_make_scan_ioapic_request(kvm);
kvm               400 arch/x86/kvm/irq_comm.c 	struct kvm *kvm = vcpu->kvm;
kvm               406 arch/x86/kvm/irq_comm.c 	idx = srcu_read_lock(&kvm->irq_srcu);
kvm               407 arch/x86/kvm/irq_comm.c 	table = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
kvm               409 arch/x86/kvm/irq_comm.c 			       kvm->arch.nr_reserved_ioapic_pins);
kvm               417 arch/x86/kvm/irq_comm.c 			kvm_set_msi_irq(vcpu->kvm, entry, &irq);
kvm               424 arch/x86/kvm/irq_comm.c 	srcu_read_unlock(&kvm->irq_srcu, idx);
kvm               427 arch/x86/kvm/irq_comm.c void kvm_arch_irq_routing_update(struct kvm *kvm)
kvm               429 arch/x86/kvm/irq_comm.c 	kvm_hv_irq_routing_update(kvm);
kvm               170 arch/x86/kvm/lapic.c static void recalculate_apic_map(struct kvm *kvm)
kvm               177 arch/x86/kvm/lapic.c 	mutex_lock(&kvm->arch.apic_map_lock);
kvm               179 arch/x86/kvm/lapic.c 	kvm_for_each_vcpu(i, vcpu, kvm)
kvm               192 arch/x86/kvm/lapic.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
kvm               239 arch/x86/kvm/lapic.c 	old = rcu_dereference_protected(kvm->arch.apic_map,
kvm               240 arch/x86/kvm/lapic.c 			lockdep_is_held(&kvm->arch.apic_map_lock));
kvm               241 arch/x86/kvm/lapic.c 	rcu_assign_pointer(kvm->arch.apic_map, new);
kvm               242 arch/x86/kvm/lapic.c 	mutex_unlock(&kvm->arch.apic_map_lock);
kvm               247 arch/x86/kvm/lapic.c 	kvm_make_scan_ioapic_request(kvm);
kvm               263 arch/x86/kvm/lapic.c 		recalculate_apic_map(apic->vcpu->kvm);
kvm               270 arch/x86/kvm/lapic.c 	recalculate_apic_map(apic->vcpu->kvm);
kvm               276 arch/x86/kvm/lapic.c 	recalculate_apic_map(apic->vcpu->kvm);
kvm               292 arch/x86/kvm/lapic.c 	recalculate_apic_map(apic->vcpu->kvm);
kvm               343 arch/x86/kvm/lapic.c 	    !ioapic_in_kernel(vcpu->kvm))
kvm               560 arch/x86/kvm/lapic.c int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
kvm               582 arch/x86/kvm/lapic.c 	map = rcu_dereference(kvm->arch.apic_map);
kvm               621 arch/x86/kvm/lapic.c 	return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
kvm               628 arch/x86/kvm/lapic.c 	return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
kvm               796 arch/x86/kvm/lapic.c 	if (!vcpu->kvm->arch.x2apic_broadcast_quirk_disabled &&
kvm               844 arch/x86/kvm/lapic.c static void kvm_apic_disabled_lapic_found(struct kvm *kvm)
kvm               846 arch/x86/kvm/lapic.c 	if (!kvm->arch.disabled_lapic_found) {
kvm               847 arch/x86/kvm/lapic.c 		kvm->arch.disabled_lapic_found = true;
kvm               853 arch/x86/kvm/lapic.c static bool kvm_apic_is_broadcast_dest(struct kvm *kvm, struct kvm_lapic **src,
kvm               856 arch/x86/kvm/lapic.c 	if (kvm->arch.x2apic_broadcast_quirk_disabled) {
kvm               879 arch/x86/kvm/lapic.c static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
kvm               893 arch/x86/kvm/lapic.c 	if (!map || kvm_apic_is_broadcast_dest(kvm, src, irq, map))
kvm               934 arch/x86/kvm/lapic.c 			kvm_apic_disabled_lapic_found(kvm);
kvm               945 arch/x86/kvm/lapic.c bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
kvm               962 arch/x86/kvm/lapic.c 	map = rcu_dereference(kvm->arch.apic_map);
kvm               964 arch/x86/kvm/lapic.c 	ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dst, &bitmap);
kvm               992 arch/x86/kvm/lapic.c bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
kvm              1004 arch/x86/kvm/lapic.c 	map = rcu_dereference(kvm->arch.apic_map);
kvm              1006 arch/x86/kvm/lapic.c 	if (kvm_apic_map_get_dest_lapic(kvm, NULL, irq, map, &dst, &bitmap) &&
kvm              1145 arch/x86/kvm/lapic.c 	if (irqchip_split(apic->vcpu->kvm)) {
kvm              1216 arch/x86/kvm/lapic.c 	kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL);
kvm              1373 arch/x86/kvm/lapic.c 		if (!kvm_check_has_quirk(vcpu->kvm,
kvm              1844 arch/x86/kvm/lapic.c 			atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
kvm              1846 arch/x86/kvm/lapic.c 			atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
kvm              1883 arch/x86/kvm/lapic.c 			recalculate_apic_map(apic->vcpu->kvm);
kvm              2004 arch/x86/kvm/lapic.c 		if (!kvm_check_has_quirk(vcpu->kvm,
kvm              2137 arch/x86/kvm/lapic.c 			recalculate_apic_map(vcpu->kvm);
kvm              2177 arch/x86/kvm/lapic.c 	    kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
kvm              2408 arch/x86/kvm/lapic.c 		if (vcpu->kvm->arch.x2apic_format) {
kvm              2447 arch/x86/kvm/lapic.c 	recalculate_apic_map(vcpu->kvm);
kvm              2468 arch/x86/kvm/lapic.c 	if (ioapic_in_kernel(vcpu->kvm))
kvm              2536 arch/x86/kvm/lapic.c 	if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
kvm              2589 arch/x86/kvm/lapic.c 	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
kvm              2596 arch/x86/kvm/lapic.c 		if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
kvm              2696 arch/x86/kvm/lapic.c 	return kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len);
kvm                94 arch/x86/kvm/lapic.h bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
kvm               229 arch/x86/kvm/lapic.h bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
kvm               319 arch/x86/kvm/mmu.c static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm,
kvm               325 arch/x86/kvm/mmu.c 		ret = kvm_x86_ops->tlb_remote_flush_with_range(kvm, range);
kvm               328 arch/x86/kvm/mmu.c 		kvm_flush_remote_tlbs(kvm);
kvm               331 arch/x86/kvm/mmu.c static void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
kvm               339 arch/x86/kvm/mmu.c 	kvm_flush_remote_tlbs_with_range(kvm, &range);
kvm              1214 arch/x86/kvm/mmu.c static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
kvm              1220 arch/x86/kvm/mmu.c 	kvm->arch.indirect_shadow_pages++;
kvm              1222 arch/x86/kvm/mmu.c 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
kvm              1227 arch/x86/kvm/mmu.c 		return kvm_slot_page_track_add_page(kvm, slot, gfn,
kvm              1233 arch/x86/kvm/mmu.c static void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
kvm              1238 arch/x86/kvm/mmu.c 	++kvm->stat.nx_lpage_splits;
kvm              1240 arch/x86/kvm/mmu.c 		      &kvm->arch.lpage_disallowed_mmu_pages);
kvm              1244 arch/x86/kvm/mmu.c static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
kvm              1250 arch/x86/kvm/mmu.c 	kvm->arch.indirect_shadow_pages--;
kvm              1252 arch/x86/kvm/mmu.c 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
kvm              1255 arch/x86/kvm/mmu.c 		return kvm_slot_page_track_remove_page(kvm, slot, gfn,
kvm              1261 arch/x86/kvm/mmu.c static void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
kvm              1263 arch/x86/kvm/mmu.c 	--kvm->stat.nx_lpage_splits;
kvm              1478 arch/x86/kvm/mmu.c static struct kvm_rmap_head *gfn_to_rmap(struct kvm *kvm, gfn_t gfn,
kvm              1484 arch/x86/kvm/mmu.c 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
kvm              1504 arch/x86/kvm/mmu.c 	rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
kvm              1508 arch/x86/kvm/mmu.c static void rmap_remove(struct kvm *kvm, u64 *spte)
kvm              1516 arch/x86/kvm/mmu.c 	rmap_head = gfn_to_rmap(kvm, gfn, sp);
kvm              1596 arch/x86/kvm/mmu.c static void drop_spte(struct kvm *kvm, u64 *sptep)
kvm              1599 arch/x86/kvm/mmu.c 		rmap_remove(kvm, sptep);
kvm              1603 arch/x86/kvm/mmu.c static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
kvm              1608 arch/x86/kvm/mmu.c 		drop_spte(kvm, sptep);
kvm              1609 arch/x86/kvm/mmu.c 		--kvm->stat.lpages;
kvm              1618 arch/x86/kvm/mmu.c 	if (__drop_large_spte(vcpu->kvm, sptep)) {
kvm              1621 arch/x86/kvm/mmu.c 		kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
kvm              1656 arch/x86/kvm/mmu.c static bool __rmap_write_protect(struct kvm *kvm,
kvm              1697 arch/x86/kvm/mmu.c static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
kvm              1728 arch/x86/kvm/mmu.c static bool __rmap_set_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
kvm              1751 arch/x86/kvm/mmu.c static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
kvm              1760 arch/x86/kvm/mmu.c 		__rmap_write_protect(kvm, rmap_head, false);
kvm              1777 arch/x86/kvm/mmu.c void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
kvm              1786 arch/x86/kvm/mmu.c 		__rmap_clear_dirty(kvm, rmap_head);
kvm              1804 arch/x86/kvm/mmu.c void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
kvm              1809 arch/x86/kvm/mmu.c 		kvm_x86_ops->enable_log_dirty_pt_masked(kvm, slot, gfn_offset,
kvm              1812 arch/x86/kvm/mmu.c 		kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
kvm              1830 arch/x86/kvm/mmu.c bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
kvm              1839 arch/x86/kvm/mmu.c 		write_protected |= __rmap_write_protect(kvm, rmap_head, true);
kvm              1850 arch/x86/kvm/mmu.c 	return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn);
kvm              1853 arch/x86/kvm/mmu.c static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
kvm              1869 arch/x86/kvm/mmu.c static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
kvm              1873 arch/x86/kvm/mmu.c 	return kvm_zap_rmapp(kvm, rmap_head);
kvm              1876 arch/x86/kvm/mmu.c static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
kvm              1915 arch/x86/kvm/mmu.c 		kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
kvm              1990 arch/x86/kvm/mmu.c static int kvm_handle_hva_range(struct kvm *kvm,
kvm              1994 arch/x86/kvm/mmu.c 				int (*handler)(struct kvm *kvm,
kvm              2008 arch/x86/kvm/mmu.c 		slots = __kvm_memslots(kvm, i);
kvm              2029 arch/x86/kvm/mmu.c 				ret |= handler(kvm, iterator.rmap, memslot,
kvm              2037 arch/x86/kvm/mmu.c static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
kvm              2039 arch/x86/kvm/mmu.c 			  int (*handler)(struct kvm *kvm,
kvm              2045 arch/x86/kvm/mmu.c 	return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
kvm              2048 arch/x86/kvm/mmu.c int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
kvm              2050 arch/x86/kvm/mmu.c 	return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
kvm              2053 arch/x86/kvm/mmu.c int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
kvm              2055 arch/x86/kvm/mmu.c 	return kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
kvm              2058 arch/x86/kvm/mmu.c static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
kvm              2073 arch/x86/kvm/mmu.c static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
kvm              2095 arch/x86/kvm/mmu.c 	rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
kvm              2097 arch/x86/kvm/mmu.c 	kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, 0);
kvm              2098 arch/x86/kvm/mmu.c 	kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
kvm              2102 arch/x86/kvm/mmu.c int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
kvm              2104 arch/x86/kvm/mmu.c 	return kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp);
kvm              2107 arch/x86/kvm/mmu.c int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
kvm              2109 arch/x86/kvm/mmu.c 	return kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp);
kvm              2134 arch/x86/kvm/mmu.c static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, unsigned long nr)
kvm              2136 arch/x86/kvm/mmu.c 	kvm->arch.n_used_mmu_pages += nr;
kvm              2193 arch/x86/kvm/mmu.c 	sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
kvm              2194 arch/x86/kvm/mmu.c 	list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
kvm              2195 arch/x86/kvm/mmu.c 	kvm_mod_used_mmu_pages(vcpu->kvm, +1);
kvm              2326 arch/x86/kvm/mmu.c static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
kvm              2331 arch/x86/kvm/mmu.c 	--kvm->stat.mmu_unsync;
kvm              2334 arch/x86/kvm/mmu.c static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
kvm              2336 arch/x86/kvm/mmu.c static void kvm_mmu_commit_zap_page(struct kvm *kvm,
kvm              2361 arch/x86/kvm/mmu.c 		kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
kvm              2368 arch/x86/kvm/mmu.c static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
kvm              2376 arch/x86/kvm/mmu.c 		kvm_mmu_commit_zap_page(kvm, invalid_list);
kvm              2378 arch/x86/kvm/mmu.c 		kvm_flush_remote_tlbs(kvm);
kvm              2386 arch/x86/kvm/mmu.c 	if (kvm_mmu_remote_flush_or_zap(vcpu->kvm, invalid_list, remote_flush))
kvm              2400 arch/x86/kvm/mmu.c static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
kvm              2403 arch/x86/kvm/mmu.c 	       unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
kvm              2409 arch/x86/kvm/mmu.c 	kvm_unlink_unsync_page(vcpu->kvm, sp);
kvm              2420 arch/x86/kvm/mmu.c 	for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
kvm              2520 arch/x86/kvm/mmu.c 			kvm_flush_remote_tlbs(vcpu->kvm);
kvm              2528 arch/x86/kvm/mmu.c 		if (need_resched() || spin_needbreak(&vcpu->kvm->mmu_lock)) {
kvm              2530 arch/x86/kvm/mmu.c 			cond_resched_lock(&vcpu->kvm->mmu_lock);
kvm              2577 arch/x86/kvm/mmu.c 	for_each_valid_sp(vcpu->kvm, sp, gfn) {
kvm              2608 arch/x86/kvm/mmu.c 	++vcpu->kvm->stat.mmu_cache_miss;
kvm              2615 arch/x86/kvm/mmu.c 		&vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]);
kvm              2622 arch/x86/kvm/mmu.c 		account_shadowed(vcpu->kvm, sp);
kvm              2625 arch/x86/kvm/mmu.c 			kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
kvm              2635 arch/x86/kvm/mmu.c 	if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions)
kvm              2636 arch/x86/kvm/mmu.c 		vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions;
kvm              2744 arch/x86/kvm/mmu.c 		kvm_flush_remote_tlbs_with_address(vcpu->kvm, child->gfn, 1);
kvm              2748 arch/x86/kvm/mmu.c static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
kvm              2757 arch/x86/kvm/mmu.c 			drop_spte(kvm, spte);
kvm              2759 arch/x86/kvm/mmu.c 				--kvm->stat.lpages;
kvm              2773 arch/x86/kvm/mmu.c static void kvm_mmu_page_unlink_children(struct kvm *kvm,
kvm              2779 arch/x86/kvm/mmu.c 		mmu_page_zap_pte(kvm, sp, sp->spt + i);
kvm              2782 arch/x86/kvm/mmu.c static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
kvm              2791 arch/x86/kvm/mmu.c static int mmu_zap_unsync_children(struct kvm *kvm,
kvm              2806 arch/x86/kvm/mmu.c 			kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
kvm              2815 arch/x86/kvm/mmu.c static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
kvm              2823 arch/x86/kvm/mmu.c 	++kvm->stat.mmu_shadow_zapped;
kvm              2824 arch/x86/kvm/mmu.c 	*nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list);
kvm              2825 arch/x86/kvm/mmu.c 	kvm_mmu_page_unlink_children(kvm, sp);
kvm              2826 arch/x86/kvm/mmu.c 	kvm_mmu_unlink_parents(kvm, sp);
kvm              2832 arch/x86/kvm/mmu.c 		unaccount_shadowed(kvm, sp);
kvm              2835 arch/x86/kvm/mmu.c 		kvm_unlink_unsync_page(kvm, sp);
kvm              2840 arch/x86/kvm/mmu.c 		kvm_mod_used_mmu_pages(kvm, -1);
kvm              2842 arch/x86/kvm/mmu.c 		list_move(&sp->link, &kvm->arch.active_mmu_pages);
kvm              2849 arch/x86/kvm/mmu.c 		if (!is_obsolete_sp(kvm, sp))
kvm              2850 arch/x86/kvm/mmu.c 			kvm_reload_remote_mmus(kvm);
kvm              2854 arch/x86/kvm/mmu.c 		unaccount_huge_nx_page(kvm, sp);
kvm              2860 arch/x86/kvm/mmu.c static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
kvm              2865 arch/x86/kvm/mmu.c 	__kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped);
kvm              2869 arch/x86/kvm/mmu.c static void kvm_mmu_commit_zap_page(struct kvm *kvm,
kvm              2886 arch/x86/kvm/mmu.c 	kvm_flush_remote_tlbs(kvm);
kvm              2894 arch/x86/kvm/mmu.c static bool prepare_zap_oldest_mmu_page(struct kvm *kvm,
kvm              2899 arch/x86/kvm/mmu.c 	if (list_empty(&kvm->arch.active_mmu_pages))
kvm              2902 arch/x86/kvm/mmu.c 	sp = list_last_entry(&kvm->arch.active_mmu_pages,
kvm              2904 arch/x86/kvm/mmu.c 	return kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
kvm              2911 arch/x86/kvm/mmu.c void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
kvm              2915 arch/x86/kvm/mmu.c 	spin_lock(&kvm->mmu_lock);
kvm              2917 arch/x86/kvm/mmu.c 	if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
kvm              2919 arch/x86/kvm/mmu.c 		while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages)
kvm              2920 arch/x86/kvm/mmu.c 			if (!prepare_zap_oldest_mmu_page(kvm, &invalid_list))
kvm              2923 arch/x86/kvm/mmu.c 		kvm_mmu_commit_zap_page(kvm, &invalid_list);
kvm              2924 arch/x86/kvm/mmu.c 		goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
kvm              2927 arch/x86/kvm/mmu.c 	kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
kvm              2929 arch/x86/kvm/mmu.c 	spin_unlock(&kvm->mmu_lock);
kvm              2932 arch/x86/kvm/mmu.c int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
kvm              2940 arch/x86/kvm/mmu.c 	spin_lock(&kvm->mmu_lock);
kvm              2941 arch/x86/kvm/mmu.c 	for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
kvm              2945 arch/x86/kvm/mmu.c 		kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
kvm              2947 arch/x86/kvm/mmu.c 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
kvm              2948 arch/x86/kvm/mmu.c 	spin_unlock(&kvm->mmu_lock);
kvm              2957 arch/x86/kvm/mmu.c 	++vcpu->kvm->stat.mmu_unsync;
kvm              2971 arch/x86/kvm/mmu.c 	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
kvm              3182 arch/x86/kvm/mmu.c 			drop_spte(vcpu->kvm, sptep);
kvm              3197 arch/x86/kvm/mmu.c 		kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn,
kvm              3206 arch/x86/kvm/mmu.c 		++vcpu->kvm->stat.lpages;
kvm              3357 arch/x86/kvm/mmu.c 				account_huge_nx_page(vcpu->kvm, sp);
kvm              3675 arch/x86/kvm/mmu.c 	mmu_seq = vcpu->kvm->mmu_notifier_seq;
kvm              3685 arch/x86/kvm/mmu.c 	spin_lock(&vcpu->kvm->mmu_lock);
kvm              3686 arch/x86/kvm/mmu.c 	if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
kvm              3695 arch/x86/kvm/mmu.c 	spin_unlock(&vcpu->kvm->mmu_lock);
kvm              3700 arch/x86/kvm/mmu.c static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
kvm              3711 arch/x86/kvm/mmu.c 		kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
kvm              3737 arch/x86/kvm/mmu.c 	spin_lock(&vcpu->kvm->mmu_lock);
kvm              3741 arch/x86/kvm/mmu.c 			mmu_free_root_page(vcpu->kvm, &mmu->prev_roots[i].hpa,
kvm              3747 arch/x86/kvm/mmu.c 			mmu_free_root_page(vcpu->kvm, &mmu->root_hpa,
kvm              3752 arch/x86/kvm/mmu.c 					mmu_free_root_page(vcpu->kvm,
kvm              3760 arch/x86/kvm/mmu.c 	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
kvm              3761 arch/x86/kvm/mmu.c 	spin_unlock(&vcpu->kvm->mmu_lock);
kvm              3769 arch/x86/kvm/mmu.c 	if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
kvm              3783 arch/x86/kvm/mmu.c 		spin_lock(&vcpu->kvm->mmu_lock);
kvm              3785 arch/x86/kvm/mmu.c 			spin_unlock(&vcpu->kvm->mmu_lock);
kvm              3791 arch/x86/kvm/mmu.c 		spin_unlock(&vcpu->kvm->mmu_lock);
kvm              3798 arch/x86/kvm/mmu.c 			spin_lock(&vcpu->kvm->mmu_lock);
kvm              3800 arch/x86/kvm/mmu.c 				spin_unlock(&vcpu->kvm->mmu_lock);
kvm              3807 arch/x86/kvm/mmu.c 			spin_unlock(&vcpu->kvm->mmu_lock);
kvm              3840 arch/x86/kvm/mmu.c 		spin_lock(&vcpu->kvm->mmu_lock);
kvm              3842 arch/x86/kvm/mmu.c 			spin_unlock(&vcpu->kvm->mmu_lock);
kvm              3849 arch/x86/kvm/mmu.c 		spin_unlock(&vcpu->kvm->mmu_lock);
kvm              3877 arch/x86/kvm/mmu.c 		spin_lock(&vcpu->kvm->mmu_lock);
kvm              3879 arch/x86/kvm/mmu.c 			spin_unlock(&vcpu->kvm->mmu_lock);
kvm              3886 arch/x86/kvm/mmu.c 		spin_unlock(&vcpu->kvm->mmu_lock);
kvm              3962 arch/x86/kvm/mmu.c 		spin_lock(&vcpu->kvm->mmu_lock);
kvm              3968 arch/x86/kvm/mmu.c 		spin_unlock(&vcpu->kvm->mmu_lock);
kvm              3972 arch/x86/kvm/mmu.c 	spin_lock(&vcpu->kvm->mmu_lock);
kvm              3986 arch/x86/kvm/mmu.c 	spin_unlock(&vcpu->kvm->mmu_lock);
kvm              4205 arch/x86/kvm/mmu.c 	if (is_guest_mode(vcpu) && !kvm_is_visible_gfn(vcpu->kvm, gfn)) {
kvm              4315 arch/x86/kvm/mmu.c 	mmu_seq = vcpu->kvm->mmu_notifier_seq;
kvm              4325 arch/x86/kvm/mmu.c 	spin_lock(&vcpu->kvm->mmu_lock);
kvm              4326 arch/x86/kvm/mmu.c 	if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
kvm              4335 arch/x86/kvm/mmu.c 	spin_unlock(&vcpu->kvm->mmu_lock);
kvm              5308 arch/x86/kvm/mmu.c 		++vcpu->kvm->stat.mmu_pde_zapped;
kvm              5312 arch/x86/kvm/mmu.c 	++vcpu->kvm->stat.mmu_pte_updated;
kvm              5446 arch/x86/kvm/mmu.c 	if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
kvm              5460 arch/x86/kvm/mmu.c 	spin_lock(&vcpu->kvm->mmu_lock);
kvm              5464 arch/x86/kvm/mmu.c 	++vcpu->kvm->stat.mmu_pte_write;
kvm              5467 arch/x86/kvm/mmu.c 	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
kvm              5470 arch/x86/kvm/mmu.c 			kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
kvm              5471 arch/x86/kvm/mmu.c 			++vcpu->kvm->stat.mmu_flooded;
kvm              5484 arch/x86/kvm/mmu.c 			mmu_page_zap_pte(vcpu->kvm, sp, spte);
kvm              5496 arch/x86/kvm/mmu.c 	spin_unlock(&vcpu->kvm->mmu_lock);
kvm              5509 arch/x86/kvm/mmu.c 	r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
kvm              5519 arch/x86/kvm/mmu.c 	if (likely(kvm_mmu_available_pages(vcpu->kvm) >= KVM_MIN_FREE_MMU_PAGES))
kvm              5522 arch/x86/kvm/mmu.c 	while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES) {
kvm              5523 arch/x86/kvm/mmu.c 		if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list))
kvm              5526 arch/x86/kvm/mmu.c 		++vcpu->kvm->stat.mmu_recycled;
kvm              5528 arch/x86/kvm/mmu.c 	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
kvm              5530 arch/x86/kvm/mmu.c 	if (!kvm_mmu_available_pages(vcpu->kvm))
kvm              5575 arch/x86/kvm/mmu.c 		kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa));
kvm              5687 arch/x86/kvm/mmu.c typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head);
kvm              5691 arch/x86/kvm/mmu.c slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
kvm              5701 arch/x86/kvm/mmu.c 			flush |= fn(kvm, iterator.rmap);
kvm              5703 arch/x86/kvm/mmu.c 		if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
kvm              5705 arch/x86/kvm/mmu.c 				kvm_flush_remote_tlbs_with_address(kvm,
kvm              5710 arch/x86/kvm/mmu.c 			cond_resched_lock(&kvm->mmu_lock);
kvm              5715 arch/x86/kvm/mmu.c 		kvm_flush_remote_tlbs_with_address(kvm, start_gfn,
kvm              5724 arch/x86/kvm/mmu.c slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
kvm              5728 arch/x86/kvm/mmu.c 	return slot_handle_level_range(kvm, memslot, fn, start_level,
kvm              5735 arch/x86/kvm/mmu.c slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
kvm              5738 arch/x86/kvm/mmu.c 	return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
kvm              5743 arch/x86/kvm/mmu.c slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
kvm              5746 arch/x86/kvm/mmu.c 	return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL + 1,
kvm              5751 arch/x86/kvm/mmu.c slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
kvm              5754 arch/x86/kvm/mmu.c 	return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
kvm              5829 arch/x86/kvm/mmu.c static void kvm_zap_obsolete_pages(struct kvm *kvm)
kvm              5836 arch/x86/kvm/mmu.c 	      &kvm->arch.active_mmu_pages, link) {
kvm              5841 arch/x86/kvm/mmu.c 		if (!is_obsolete_sp(kvm, sp))
kvm              5860 arch/x86/kvm/mmu.c 		    cond_resched_lock(&kvm->mmu_lock)) {
kvm              5865 arch/x86/kvm/mmu.c 		if (__kvm_mmu_prepare_zap_page(kvm, sp,
kvm              5866 arch/x86/kvm/mmu.c 				&kvm->arch.zapped_obsolete_pages, &nr_zapped)) {
kvm              5877 arch/x86/kvm/mmu.c 	kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
kvm              5889 arch/x86/kvm/mmu.c static void kvm_mmu_zap_all_fast(struct kvm *kvm)
kvm              5891 arch/x86/kvm/mmu.c 	lockdep_assert_held(&kvm->slots_lock);
kvm              5893 arch/x86/kvm/mmu.c 	spin_lock(&kvm->mmu_lock);
kvm              5894 arch/x86/kvm/mmu.c 	trace_kvm_mmu_zap_all_fast(kvm);
kvm              5903 arch/x86/kvm/mmu.c 	kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1;
kvm              5913 arch/x86/kvm/mmu.c 	kvm_reload_remote_mmus(kvm);
kvm              5915 arch/x86/kvm/mmu.c 	kvm_zap_obsolete_pages(kvm);
kvm              5916 arch/x86/kvm/mmu.c 	spin_unlock(&kvm->mmu_lock);
kvm              5919 arch/x86/kvm/mmu.c static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
kvm              5921 arch/x86/kvm/mmu.c 	return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
kvm              5924 arch/x86/kvm/mmu.c static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
kvm              5928 arch/x86/kvm/mmu.c 	kvm_mmu_zap_all_fast(kvm);
kvm              5931 arch/x86/kvm/mmu.c void kvm_mmu_init_vm(struct kvm *kvm)
kvm              5933 arch/x86/kvm/mmu.c 	struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
kvm              5937 arch/x86/kvm/mmu.c 	kvm_page_track_register_notifier(kvm, node);
kvm              5940 arch/x86/kvm/mmu.c void kvm_mmu_uninit_vm(struct kvm *kvm)
kvm              5942 arch/x86/kvm/mmu.c 	struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
kvm              5944 arch/x86/kvm/mmu.c 	kvm_page_track_unregister_notifier(kvm, node);
kvm              5947 arch/x86/kvm/mmu.c void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
kvm              5953 arch/x86/kvm/mmu.c 	spin_lock(&kvm->mmu_lock);
kvm              5955 arch/x86/kvm/mmu.c 		slots = __kvm_memslots(kvm, i);
kvm              5964 arch/x86/kvm/mmu.c 			slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
kvm              5970 arch/x86/kvm/mmu.c 	spin_unlock(&kvm->mmu_lock);
kvm              5973 arch/x86/kvm/mmu.c static bool slot_rmap_write_protect(struct kvm *kvm,
kvm              5976 arch/x86/kvm/mmu.c 	return __rmap_write_protect(kvm, rmap_head, false);
kvm              5979 arch/x86/kvm/mmu.c void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
kvm              5984 arch/x86/kvm/mmu.c 	spin_lock(&kvm->mmu_lock);
kvm              5985 arch/x86/kvm/mmu.c 	flush = slot_handle_all_level(kvm, memslot, slot_rmap_write_protect,
kvm              5987 arch/x86/kvm/mmu.c 	spin_unlock(&kvm->mmu_lock);
kvm              5994 arch/x86/kvm/mmu.c 	lockdep_assert_held(&kvm->slots_lock);
kvm              6008 arch/x86/kvm/mmu.c 		kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
kvm              6012 arch/x86/kvm/mmu.c static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
kvm              6039 arch/x86/kvm/mmu.c 				kvm_flush_remote_tlbs_with_address(kvm, sp->gfn,
kvm              6051 arch/x86/kvm/mmu.c void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
kvm              6055 arch/x86/kvm/mmu.c 	spin_lock(&kvm->mmu_lock);
kvm              6056 arch/x86/kvm/mmu.c 	slot_handle_leaf(kvm, (struct kvm_memory_slot *)memslot,
kvm              6058 arch/x86/kvm/mmu.c 	spin_unlock(&kvm->mmu_lock);
kvm              6061 arch/x86/kvm/mmu.c void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
kvm              6066 arch/x86/kvm/mmu.c 	spin_lock(&kvm->mmu_lock);
kvm              6067 arch/x86/kvm/mmu.c 	flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false);
kvm              6068 arch/x86/kvm/mmu.c 	spin_unlock(&kvm->mmu_lock);
kvm              6070 arch/x86/kvm/mmu.c 	lockdep_assert_held(&kvm->slots_lock);
kvm              6079 arch/x86/kvm/mmu.c 		kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
kvm              6084 arch/x86/kvm/mmu.c void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
kvm              6089 arch/x86/kvm/mmu.c 	spin_lock(&kvm->mmu_lock);
kvm              6090 arch/x86/kvm/mmu.c 	flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect,
kvm              6092 arch/x86/kvm/mmu.c 	spin_unlock(&kvm->mmu_lock);
kvm              6095 arch/x86/kvm/mmu.c 	lockdep_assert_held(&kvm->slots_lock);
kvm              6098 arch/x86/kvm/mmu.c 		kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
kvm              6103 arch/x86/kvm/mmu.c void kvm_mmu_slot_set_dirty(struct kvm *kvm,
kvm              6108 arch/x86/kvm/mmu.c 	spin_lock(&kvm->mmu_lock);
kvm              6109 arch/x86/kvm/mmu.c 	flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false);
kvm              6110 arch/x86/kvm/mmu.c 	spin_unlock(&kvm->mmu_lock);
kvm              6112 arch/x86/kvm/mmu.c 	lockdep_assert_held(&kvm->slots_lock);
kvm              6116 arch/x86/kvm/mmu.c 		kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
kvm              6121 arch/x86/kvm/mmu.c void kvm_mmu_zap_all(struct kvm *kvm)
kvm              6127 arch/x86/kvm/mmu.c 	spin_lock(&kvm->mmu_lock);
kvm              6129 arch/x86/kvm/mmu.c 	list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
kvm              6132 arch/x86/kvm/mmu.c 		if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
kvm              6134 arch/x86/kvm/mmu.c 		if (cond_resched_lock(&kvm->mmu_lock))
kvm              6138 arch/x86/kvm/mmu.c 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
kvm              6139 arch/x86/kvm/mmu.c 	spin_unlock(&kvm->mmu_lock);
kvm              6142 arch/x86/kvm/mmu.c void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
kvm              6163 arch/x86/kvm/mmu.c 		kvm_mmu_zap_all_fast(kvm);
kvm              6170 arch/x86/kvm/mmu.c 	struct kvm *kvm;
kvm              6176 arch/x86/kvm/mmu.c 	list_for_each_entry(kvm, &vm_list, vm_list) {
kvm              6194 arch/x86/kvm/mmu.c 		if (!kvm->arch.n_used_mmu_pages &&
kvm              6195 arch/x86/kvm/mmu.c 		    !kvm_has_zapped_obsolete_pages(kvm))
kvm              6198 arch/x86/kvm/mmu.c 		idx = srcu_read_lock(&kvm->srcu);
kvm              6199 arch/x86/kvm/mmu.c 		spin_lock(&kvm->mmu_lock);
kvm              6201 arch/x86/kvm/mmu.c 		if (kvm_has_zapped_obsolete_pages(kvm)) {
kvm              6202 arch/x86/kvm/mmu.c 			kvm_mmu_commit_zap_page(kvm,
kvm              6203 arch/x86/kvm/mmu.c 			      &kvm->arch.zapped_obsolete_pages);
kvm              6207 arch/x86/kvm/mmu.c 		if (prepare_zap_oldest_mmu_page(kvm, &invalid_list))
kvm              6209 arch/x86/kvm/mmu.c 		kvm_mmu_commit_zap_page(kvm, &invalid_list);
kvm              6212 arch/x86/kvm/mmu.c 		spin_unlock(&kvm->mmu_lock);
kvm              6213 arch/x86/kvm/mmu.c 		srcu_read_unlock(&kvm->srcu, idx);
kvm              6220 arch/x86/kvm/mmu.c 		list_move_tail(&kvm->vm_list, &vm_list);
kvm              6294 arch/x86/kvm/mmu.c 		struct kvm *kvm;
kvm              6298 arch/x86/kvm/mmu.c 		list_for_each_entry(kvm, &vm_list, vm_list) {
kvm              6299 arch/x86/kvm/mmu.c 			mutex_lock(&kvm->slots_lock);
kvm              6300 arch/x86/kvm/mmu.c 			kvm_mmu_zap_all_fast(kvm);
kvm              6301 arch/x86/kvm/mmu.c 			mutex_unlock(&kvm->slots_lock);
kvm              6303 arch/x86/kvm/mmu.c 			wake_up_process(kvm->arch.nx_lpage_recovery_thread);
kvm              6361 arch/x86/kvm/mmu.c unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
kvm              6370 arch/x86/kvm/mmu.c 		slots = __kvm_memslots(kvm, i);
kvm              6410 arch/x86/kvm/mmu.c 		struct kvm *kvm;
kvm              6414 arch/x86/kvm/mmu.c 		list_for_each_entry(kvm, &vm_list, vm_list)
kvm              6415 arch/x86/kvm/mmu.c 			wake_up_process(kvm->arch.nx_lpage_recovery_thread);
kvm              6423 arch/x86/kvm/mmu.c static void kvm_recover_nx_lpages(struct kvm *kvm)
kvm              6431 arch/x86/kvm/mmu.c 	rcu_idx = srcu_read_lock(&kvm->srcu);
kvm              6432 arch/x86/kvm/mmu.c 	spin_lock(&kvm->mmu_lock);
kvm              6435 arch/x86/kvm/mmu.c 	to_zap = ratio ? DIV_ROUND_UP(kvm->stat.nx_lpage_splits, ratio) : 0;
kvm              6436 arch/x86/kvm/mmu.c 	while (to_zap && !list_empty(&kvm->arch.lpage_disallowed_mmu_pages)) {
kvm              6442 arch/x86/kvm/mmu.c 		sp = list_first_entry(&kvm->arch.lpage_disallowed_mmu_pages,
kvm              6446 arch/x86/kvm/mmu.c 		kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
kvm              6449 arch/x86/kvm/mmu.c 		if (!--to_zap || need_resched() || spin_needbreak(&kvm->mmu_lock)) {
kvm              6450 arch/x86/kvm/mmu.c 			kvm_mmu_commit_zap_page(kvm, &invalid_list);
kvm              6452 arch/x86/kvm/mmu.c 				cond_resched_lock(&kvm->mmu_lock);
kvm              6456 arch/x86/kvm/mmu.c 	spin_unlock(&kvm->mmu_lock);
kvm              6457 arch/x86/kvm/mmu.c 	srcu_read_unlock(&kvm->srcu, rcu_idx);
kvm              6467 arch/x86/kvm/mmu.c static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data)
kvm              6488 arch/x86/kvm/mmu.c 		kvm_recover_nx_lpages(kvm);
kvm              6492 arch/x86/kvm/mmu.c int kvm_mmu_post_init_vm(struct kvm *kvm)
kvm              6496 arch/x86/kvm/mmu.c 	err = kvm_vm_create_worker_thread(kvm, kvm_nx_lpage_recovery_worker, 0,
kvm              6498 arch/x86/kvm/mmu.c 					  &kvm->arch.nx_lpage_recovery_thread);
kvm              6500 arch/x86/kvm/mmu.c 		kthread_unpark(kvm->arch.nx_lpage_recovery_thread);
kvm              6505 arch/x86/kvm/mmu.c void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
kvm              6507 arch/x86/kvm/mmu.c 	if (kvm->arch.nx_lpage_recovery_thread)
kvm              6508 arch/x86/kvm/mmu.c 		kthread_stop(kvm->arch.nx_lpage_recovery_thread);
kvm                67 arch/x86/kvm/mmu.h static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
kvm                69 arch/x86/kvm/mmu.h 	if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
kvm                70 arch/x86/kvm/mmu.h 		return kvm->arch.n_max_mmu_pages -
kvm                71 arch/x86/kvm/mmu.h 			kvm->arch.n_used_mmu_pages;
kvm               206 arch/x86/kvm/mmu.h void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
kvm               210 arch/x86/kvm/mmu.h bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
kvm               214 arch/x86/kvm/mmu.h int kvm_mmu_post_init_vm(struct kvm *kvm);
kvm               215 arch/x86/kvm/mmu.h void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
kvm                28 arch/x86/kvm/mmu_audit.c #define audit_printk(kvm, fmt, args...)		\
kvm                30 arch/x86/kvm/mmu_audit.c 		fmt, audit_point_name[kvm->arch.audit_point], ##args)
kvm                83 arch/x86/kvm/mmu_audit.c typedef void (*sp_handler) (struct kvm *kvm, struct kvm_mmu_page *sp);
kvm                85 arch/x86/kvm/mmu_audit.c static void walk_all_active_sps(struct kvm *kvm, sp_handler fn)
kvm                89 arch/x86/kvm/mmu_audit.c 	list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link)
kvm                90 arch/x86/kvm/mmu_audit.c 		fn(kvm, sp);
kvm               104 arch/x86/kvm/mmu_audit.c 			audit_printk(vcpu->kvm, "unsync sp: %p "
kvm               121 arch/x86/kvm/mmu_audit.c 		audit_printk(vcpu->kvm, "levels %d pfn %llx hpa %llx "
kvm               126 arch/x86/kvm/mmu_audit.c static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
kvm               138 arch/x86/kvm/mmu_audit.c 	slots = kvm_memslots_for_spte_role(kvm, rev_sp->role);
kvm               143 arch/x86/kvm/mmu_audit.c 		audit_printk(kvm, "no memslot for gfn %llx\n", gfn);
kvm               144 arch/x86/kvm/mmu_audit.c 		audit_printk(kvm, "index %ld of sp (gfn=%llx)\n",
kvm               154 arch/x86/kvm/mmu_audit.c 		audit_printk(kvm, "no rmap for writable spte %llx\n",
kvm               163 arch/x86/kvm/mmu_audit.c 		inspect_spte_has_rmap(vcpu->kvm, sptep);
kvm               170 arch/x86/kvm/mmu_audit.c 	if (vcpu->kvm->arch.audit_point == AUDIT_POST_SYNC && sp->unsync)
kvm               171 arch/x86/kvm/mmu_audit.c 		audit_printk(vcpu->kvm, "meet unsync sp(%p) after sync "
kvm               175 arch/x86/kvm/mmu_audit.c static void check_mappings_rmap(struct kvm *kvm, struct kvm_mmu_page *sp)
kvm               186 arch/x86/kvm/mmu_audit.c 		inspect_spte_has_rmap(kvm, sp->spt + i);
kvm               190 arch/x86/kvm/mmu_audit.c static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
kvm               201 arch/x86/kvm/mmu_audit.c 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
kvm               207 arch/x86/kvm/mmu_audit.c 			audit_printk(kvm, "shadow page has writable "
kvm               213 arch/x86/kvm/mmu_audit.c static void audit_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
kvm               215 arch/x86/kvm/mmu_audit.c 	check_mappings_rmap(kvm, sp);
kvm               216 arch/x86/kvm/mmu_audit.c 	audit_write_protection(kvm, sp);
kvm               219 arch/x86/kvm/mmu_audit.c static void audit_all_active_sps(struct kvm *kvm)
kvm               221 arch/x86/kvm/mmu_audit.c 	walk_all_active_sps(kvm, audit_sp);
kvm               246 arch/x86/kvm/mmu_audit.c 	vcpu->kvm->arch.audit_point = point;
kvm               247 arch/x86/kvm/mmu_audit.c 	audit_all_active_sps(vcpu->kvm);
kvm               287 arch/x86/kvm/mmutrace.h 	TP_PROTO(struct kvm *kvm),
kvm               288 arch/x86/kvm/mmutrace.h 	TP_ARGS(kvm),
kvm               296 arch/x86/kvm/mmutrace.h 		__entry->mmu_valid_gen = kvm->arch.mmu_valid_gen;
kvm               297 arch/x86/kvm/mmutrace.h 		__entry->mmu_used_pages = kvm->arch.n_used_mmu_pages;
kvm               317 arch/x86/kvm/mtrr.c 	      !kvm_arch_has_noncoherent_dma(vcpu->kvm))
kvm               336 arch/x86/kvm/mtrr.c 	kvm_zap_gfn_range(vcpu->kvm, gpa_to_gfn(start), gpa_to_gfn(end));
kvm                90 arch/x86/kvm/page_track.c void kvm_slot_page_track_add_page(struct kvm *kvm,
kvm               107 arch/x86/kvm/page_track.c 		if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn))
kvm               108 arch/x86/kvm/page_track.c 			kvm_flush_remote_tlbs(kvm);
kvm               125 arch/x86/kvm/page_track.c void kvm_slot_page_track_remove_page(struct kvm *kvm,
kvm               162 arch/x86/kvm/page_track.c void kvm_page_track_cleanup(struct kvm *kvm)
kvm               166 arch/x86/kvm/page_track.c 	head = &kvm->arch.track_notifier_head;
kvm               170 arch/x86/kvm/page_track.c void kvm_page_track_init(struct kvm *kvm)
kvm               174 arch/x86/kvm/page_track.c 	head = &kvm->arch.track_notifier_head;
kvm               184 arch/x86/kvm/page_track.c kvm_page_track_register_notifier(struct kvm *kvm,
kvm               189 arch/x86/kvm/page_track.c 	head = &kvm->arch.track_notifier_head;
kvm               191 arch/x86/kvm/page_track.c 	spin_lock(&kvm->mmu_lock);
kvm               193 arch/x86/kvm/page_track.c 	spin_unlock(&kvm->mmu_lock);
kvm               202 arch/x86/kvm/page_track.c kvm_page_track_unregister_notifier(struct kvm *kvm,
kvm               207 arch/x86/kvm/page_track.c 	head = &kvm->arch.track_notifier_head;
kvm               209 arch/x86/kvm/page_track.c 	spin_lock(&kvm->mmu_lock);
kvm               211 arch/x86/kvm/page_track.c 	spin_unlock(&kvm->mmu_lock);
kvm               230 arch/x86/kvm/page_track.c 	head = &vcpu->kvm->arch.track_notifier_head;
kvm               249 arch/x86/kvm/page_track.c void kvm_page_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
kvm               255 arch/x86/kvm/page_track.c 	head = &kvm->arch.track_notifier_head;
kvm               263 arch/x86/kvm/page_track.c 			n->track_flush_slot(kvm, slot, n);
kvm               192 arch/x86/kvm/paging_tmpl.h 	drop_spte(vcpu->kvm, spte);
kvm               700 arch/x86/kvm/paging_tmpl.h 				account_huge_nx_page(vcpu->kvm, sp);
kvm               830 arch/x86/kvm/paging_tmpl.h 	mmu_seq = vcpu->kvm->mmu_notifier_seq;
kvm               861 arch/x86/kvm/paging_tmpl.h 	spin_lock(&vcpu->kvm->mmu_lock);
kvm               862 arch/x86/kvm/paging_tmpl.h 	if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
kvm               875 arch/x86/kvm/paging_tmpl.h 	spin_unlock(&vcpu->kvm->mmu_lock);
kvm               912 arch/x86/kvm/paging_tmpl.h 	spin_lock(&vcpu->kvm->mmu_lock);
kvm               928 arch/x86/kvm/paging_tmpl.h 			if (mmu_page_zap_pte(vcpu->kvm, sp, sptep))
kvm               929 arch/x86/kvm/paging_tmpl.h 				kvm_flush_remote_tlbs_with_address(vcpu->kvm,
kvm               945 arch/x86/kvm/paging_tmpl.h 	spin_unlock(&vcpu->kvm->mmu_lock);
kvm              1041 arch/x86/kvm/paging_tmpl.h 			vcpu->kvm->tlbs_dirty++;
kvm              1055 arch/x86/kvm/paging_tmpl.h 			drop_spte(vcpu->kvm, &sp->spt[i]);
kvm              1061 arch/x86/kvm/paging_tmpl.h 			vcpu->kvm->tlbs_dirty++;
kvm              1076 arch/x86/kvm/paging_tmpl.h 		kvm_flush_remote_tlbs(vcpu->kvm);
kvm               147 arch/x86/kvm/pmu.c 	struct kvm *kvm = pmc->vcpu->kvm;
kvm               162 arch/x86/kvm/pmu.c 	filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
kvm               210 arch/x86/kvm/pmu.c 	struct kvm *kvm = pmc->vcpu->kvm;
kvm               217 arch/x86/kvm/pmu.c 	filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
kvm               303 arch/x86/kvm/pmu.c 			vcpu->kvm->arch.kvmclock_offset;
kvm               387 arch/x86/kvm/pmu.c int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
kvm               418 arch/x86/kvm/pmu.c 	mutex_lock(&kvm->lock);
kvm               419 arch/x86/kvm/pmu.c 	rcu_swap_protected(kvm->arch.pmu_event_filter, filter,
kvm               420 arch/x86/kvm/pmu.c 			   mutex_is_locked(&kvm->lock));
kvm               421 arch/x86/kvm/pmu.c 	mutex_unlock(&kvm->lock);
kvm               423 arch/x86/kvm/pmu.c 	synchronize_srcu_expedited(&kvm->srcu);
kvm               131 arch/x86/kvm/pmu.h int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp);
kvm               139 arch/x86/kvm/svm.c 	struct kvm kvm;
kvm               435 arch/x86/kvm/svm.c static inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
kvm               437 arch/x86/kvm/svm.c 	return container_of(kvm, struct kvm_svm, kvm);
kvm               445 arch/x86/kvm/svm.c static inline bool sev_guest(struct kvm *kvm)
kvm               448 arch/x86/kvm/svm.c 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
kvm               456 arch/x86/kvm/svm.c static inline int sev_get_asid(struct kvm *kvm)
kvm               458 arch/x86/kvm/svm.c 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
kvm              1207 arch/x86/kvm/svm.c 		vcpu = kvm_get_vcpu_by_id(&kvm_svm->kvm, vcpu_id);
kvm              1525 arch/x86/kvm/svm.c 	struct kvm_svm *kvm_svm = to_kvm_svm(svm->vcpu.kvm);
kvm              1594 arch/x86/kvm/svm.c 	if (!kvm_mwait_in_guest(svm->vcpu.kvm)) {
kvm              1599 arch/x86/kvm/svm.c 	if (!kvm_hlt_in_guest(svm->vcpu.kvm))
kvm              1685 arch/x86/kvm/svm.c 	if (sev_guest(svm->vcpu.kvm)) {
kvm              1700 arch/x86/kvm/svm.c 	struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
kvm              1720 arch/x86/kvm/svm.c 	struct kvm *kvm = vcpu->kvm;
kvm              1723 arch/x86/kvm/svm.c 	mutex_lock(&kvm->slots_lock);
kvm              1724 arch/x86/kvm/svm.c 	if (kvm->arch.apic_access_page_done)
kvm              1727 arch/x86/kvm/svm.c 	ret = __x86_set_memory_region(kvm,
kvm              1734 arch/x86/kvm/svm.c 	kvm->arch.apic_access_page_done = true;
kvm              1736 arch/x86/kvm/svm.c 	mutex_unlock(&kvm->slots_lock);
kvm              1788 arch/x86/kvm/svm.c static void sev_asid_free(struct kvm *kvm)
kvm              1790 arch/x86/kvm/svm.c 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
kvm              1795 arch/x86/kvm/svm.c static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
kvm              1826 arch/x86/kvm/svm.c static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
kvm              1830 arch/x86/kvm/svm.c 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
kvm              1882 arch/x86/kvm/svm.c static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
kvm              1885 arch/x86/kvm/svm.c 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
kvm              1907 arch/x86/kvm/svm.c static void __unregister_enc_region_locked(struct kvm *kvm,
kvm              1918 arch/x86/kvm/svm.c 	sev_unpin_memory(kvm, region->pages, region->npages);
kvm              1923 arch/x86/kvm/svm.c static struct kvm *svm_vm_alloc(void)
kvm              1932 arch/x86/kvm/svm.c 	return &kvm_svm->kvm;
kvm              1935 arch/x86/kvm/svm.c static void svm_vm_free(struct kvm *kvm)
kvm              1937 arch/x86/kvm/svm.c 	vfree(to_kvm_svm(kvm));
kvm              1940 arch/x86/kvm/svm.c static void sev_vm_destroy(struct kvm *kvm)
kvm              1942 arch/x86/kvm/svm.c 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
kvm              1946 arch/x86/kvm/svm.c 	if (!sev_guest(kvm))
kvm              1949 arch/x86/kvm/svm.c 	mutex_lock(&kvm->lock);
kvm              1957 arch/x86/kvm/svm.c 			__unregister_enc_region_locked(kvm,
kvm              1962 arch/x86/kvm/svm.c 	mutex_unlock(&kvm->lock);
kvm              1964 arch/x86/kvm/svm.c 	sev_unbind_asid(kvm, sev->handle);
kvm              1965 arch/x86/kvm/svm.c 	sev_asid_free(kvm);
kvm              1968 arch/x86/kvm/svm.c static void avic_vm_destroy(struct kvm *kvm)
kvm              1971 arch/x86/kvm/svm.c 	struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
kvm              1986 arch/x86/kvm/svm.c static void svm_vm_destroy(struct kvm *kvm)
kvm              1988 arch/x86/kvm/svm.c 	avic_vm_destroy(kvm);
kvm              1989 arch/x86/kvm/svm.c 	sev_vm_destroy(kvm);
kvm              1992 arch/x86/kvm/svm.c static int avic_vm_init(struct kvm *kvm)
kvm              1996 arch/x86/kvm/svm.c 	struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
kvm              2042 arch/x86/kvm/svm.c 	avic_vm_destroy(kvm);
kvm              2054 arch/x86/kvm/svm.c 	if (!kvm_arch_has_assigned_device(vcpu->kvm))
kvm              2181 arch/x86/kvm/svm.c static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
kvm              2215 arch/x86/kvm/svm.c 	err = kvm_vcpu_init(&svm->vcpu, kvm, id);
kvm              2626 arch/x86/kvm/svm.c 	if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
kvm              4550 arch/x86/kvm/svm.c 		struct kvm *kvm = svm->vcpu.kvm;
kvm              4558 arch/x86/kvm/svm.c 		kvm_for_each_vcpu(i, vcpu, kvm) {
kvm              4585 arch/x86/kvm/svm.c 	struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
kvm              5061 arch/x86/kvm/svm.c 	int asid = sev_get_asid(svm->vcpu.kvm);
kvm              5088 arch/x86/kvm/svm.c 	if (sev_guest(svm->vcpu.kvm))
kvm              5161 arch/x86/kvm/svm.c 	return avic && irqchip_split(vcpu->kvm);
kvm              5243 arch/x86/kvm/svm.c 		struct kvm *kvm = svm->vcpu.kvm;
kvm              5245 arch/x86/kvm/svm.c 		struct kvm_vcpu *prev_vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
kvm              5287 arch/x86/kvm/svm.c get_pi_vcpu_info(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
kvm              5293 arch/x86/kvm/svm.c 	kvm_set_msi_irq(kvm, e, &irq);
kvm              5295 arch/x86/kvm/svm.c 	if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) ||
kvm              5320 arch/x86/kvm/svm.c static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
kvm              5327 arch/x86/kvm/svm.c 	if (!kvm_arch_has_assigned_device(kvm) ||
kvm              5334 arch/x86/kvm/svm.c 	idx = srcu_read_lock(&kvm->irq_srcu);
kvm              5335 arch/x86/kvm/svm.c 	irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
kvm              5352 arch/x86/kvm/svm.c 		if (!get_pi_vcpu_info(kvm, e, &vcpu_info, &svm) && set &&
kvm              5359 arch/x86/kvm/svm.c 			pi.ga_tag = AVIC_GATAG(to_kvm_svm(kvm)->avic_vm_id,
kvm              5396 arch/x86/kvm/svm.c 				vcpu = kvm_get_vcpu_by_id(kvm, id);
kvm              5416 arch/x86/kvm/svm.c 	srcu_read_unlock(&kvm->irq_srcu, idx);
kvm              5517 arch/x86/kvm/svm.c static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
kvm              5522 arch/x86/kvm/svm.c static int svm_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
kvm              6344 arch/x86/kvm/svm.c static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
kvm              6346 arch/x86/kvm/svm.c 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
kvm              6372 arch/x86/kvm/svm.c static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
kvm              6375 arch/x86/kvm/svm.c 	int asid = sev_get_asid(kvm);
kvm              6412 arch/x86/kvm/svm.c static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
kvm              6414 arch/x86/kvm/svm.c 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
kvm              6419 arch/x86/kvm/svm.c static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
kvm              6421 arch/x86/kvm/svm.c 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
kvm              6428 arch/x86/kvm/svm.c 	if (!sev_guest(kvm))
kvm              6471 arch/x86/kvm/svm.c 	ret = sev_bind_asid(kvm, start->handle, error);
kvm              6478 arch/x86/kvm/svm.c 		sev_unbind_asid(kvm, start->handle);
kvm              6516 arch/x86/kvm/svm.c static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
kvm              6519 arch/x86/kvm/svm.c 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
kvm              6525 arch/x86/kvm/svm.c 	if (!sev_guest(kvm))
kvm              6540 arch/x86/kvm/svm.c 	inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
kvm              6571 arch/x86/kvm/svm.c 		ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, data, &argp->error);
kvm              6586 arch/x86/kvm/svm.c 	sev_unpin_memory(kvm, inpages, npages);
kvm              6592 arch/x86/kvm/svm.c static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
kvm              6595 arch/x86/kvm/svm.c 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
kvm              6602 arch/x86/kvm/svm.c 	if (!sev_guest(kvm))
kvm              6634 arch/x86/kvm/svm.c 	ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, data, &argp->error);
kvm              6661 arch/x86/kvm/svm.c static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
kvm              6663 arch/x86/kvm/svm.c 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
kvm              6667 arch/x86/kvm/svm.c 	if (!sev_guest(kvm))
kvm              6675 arch/x86/kvm/svm.c 	ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, data, &argp->error);
kvm              6681 arch/x86/kvm/svm.c static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
kvm              6683 arch/x86/kvm/svm.c 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
kvm              6688 arch/x86/kvm/svm.c 	if (!sev_guest(kvm))
kvm              6696 arch/x86/kvm/svm.c 	ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, data, &argp->error);
kvm              6711 arch/x86/kvm/svm.c static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
kvm              6715 arch/x86/kvm/svm.c 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
kvm              6728 arch/x86/kvm/svm.c 	ret = sev_issue_cmd(kvm,
kvm              6735 arch/x86/kvm/svm.c static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
kvm              6748 arch/x86/kvm/svm.c 	return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
kvm              6751 arch/x86/kvm/svm.c static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
kvm              6770 arch/x86/kvm/svm.c 	ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
kvm              6788 arch/x86/kvm/svm.c static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
kvm              6828 arch/x86/kvm/svm.c 		ret = __sev_dbg_decrypt(kvm, dst_paddr,
kvm              6855 arch/x86/kvm/svm.c 	ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
kvm              6865 arch/x86/kvm/svm.c static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
kvm              6875 arch/x86/kvm/svm.c 	if (!sev_guest(kvm))
kvm              6895 arch/x86/kvm/svm.c 		src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
kvm              6899 arch/x86/kvm/svm.c 		dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
kvm              6901 arch/x86/kvm/svm.c 			sev_unpin_memory(kvm, src_p, n);
kvm              6923 arch/x86/kvm/svm.c 			ret = __sev_dbg_decrypt_user(kvm,
kvm              6929 arch/x86/kvm/svm.c 			ret = __sev_dbg_encrypt_user(kvm,
kvm              6936 arch/x86/kvm/svm.c 		sev_unpin_memory(kvm, src_p, n);
kvm              6937 arch/x86/kvm/svm.c 		sev_unpin_memory(kvm, dst_p, n);
kvm              6950 arch/x86/kvm/svm.c static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
kvm              6952 arch/x86/kvm/svm.c 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
kvm              6960 arch/x86/kvm/svm.c 	if (!sev_guest(kvm))
kvm              6966 arch/x86/kvm/svm.c 	pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
kvm              7006 arch/x86/kvm/svm.c 	ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error);
kvm              7015 arch/x86/kvm/svm.c 	sev_unpin_memory(kvm, pages, n);
kvm              7019 arch/x86/kvm/svm.c static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
kvm              7030 arch/x86/kvm/svm.c 	mutex_lock(&kvm->lock);
kvm              7034 arch/x86/kvm/svm.c 		r = sev_guest_init(kvm, &sev_cmd);
kvm              7037 arch/x86/kvm/svm.c 		r = sev_launch_start(kvm, &sev_cmd);
kvm              7040 arch/x86/kvm/svm.c 		r = sev_launch_update_data(kvm, &sev_cmd);
kvm              7043 arch/x86/kvm/svm.c 		r = sev_launch_measure(kvm, &sev_cmd);
kvm              7046 arch/x86/kvm/svm.c 		r = sev_launch_finish(kvm, &sev_cmd);
kvm              7049 arch/x86/kvm/svm.c 		r = sev_guest_status(kvm, &sev_cmd);
kvm              7052 arch/x86/kvm/svm.c 		r = sev_dbg_crypt(kvm, &sev_cmd, true);
kvm              7055 arch/x86/kvm/svm.c 		r = sev_dbg_crypt(kvm, &sev_cmd, false);
kvm              7058 arch/x86/kvm/svm.c 		r = sev_launch_secret(kvm, &sev_cmd);
kvm              7069 arch/x86/kvm/svm.c 	mutex_unlock(&kvm->lock);
kvm              7073 arch/x86/kvm/svm.c static int svm_register_enc_region(struct kvm *kvm,
kvm              7076 arch/x86/kvm/svm.c 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
kvm              7080 arch/x86/kvm/svm.c 	if (!sev_guest(kvm))
kvm              7090 arch/x86/kvm/svm.c 	region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
kvm              7107 arch/x86/kvm/svm.c 	mutex_lock(&kvm->lock);
kvm              7109 arch/x86/kvm/svm.c 	mutex_unlock(&kvm->lock);
kvm              7119 arch/x86/kvm/svm.c find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
kvm              7121 arch/x86/kvm/svm.c 	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
kvm              7135 arch/x86/kvm/svm.c static int svm_unregister_enc_region(struct kvm *kvm,
kvm              7141 arch/x86/kvm/svm.c 	mutex_lock(&kvm->lock);
kvm              7143 arch/x86/kvm/svm.c 	if (!sev_guest(kvm)) {
kvm              7148 arch/x86/kvm/svm.c 	region = find_enc_region(kvm, range);
kvm              7154 arch/x86/kvm/svm.c 	__unregister_enc_region_locked(kvm, region);
kvm              7156 arch/x86/kvm/svm.c 	mutex_unlock(&kvm->lock);
kvm              7160 arch/x86/kvm/svm.c 	mutex_unlock(&kvm->lock);
kvm              7212 arch/x86/kvm/svm.c 		if (!sev_guest(vcpu->kvm))
kvm                12 arch/x86/kvm/trace.h #define TRACE_SYSTEM kvm
kvm              1502 arch/x86/kvm/trace.h #define TRACE_INCLUDE_PATH ../../arch/x86/kvm
kvm               686 arch/x86/kvm/vmx/nested.c 	kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer,
kvm              4485 arch/x86/kvm/vmx/nested.c 	if (kvm_read_guest(vcpu->kvm, vmptr, &revision, sizeof(revision)) ||
kvm               460 arch/x86/kvm/vmx/vmx.c static void check_ept_pointer_match(struct kvm *kvm)
kvm               466 arch/x86/kvm/vmx/vmx.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
kvm               470 arch/x86/kvm/vmx/vmx.c 			to_kvm_vmx(kvm)->ept_pointers_match
kvm               476 arch/x86/kvm/vmx/vmx.c 	to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH;
kvm               488 arch/x86/kvm/vmx/vmx.c static inline int __hv_remote_flush_tlb_with_range(struct kvm *kvm,
kvm               505 arch/x86/kvm/vmx/vmx.c static int hv_remote_flush_tlb_with_range(struct kvm *kvm,
kvm               511 arch/x86/kvm/vmx/vmx.c 	spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock);
kvm               513 arch/x86/kvm/vmx/vmx.c 	if (to_kvm_vmx(kvm)->ept_pointers_match == EPT_POINTERS_CHECK)
kvm               514 arch/x86/kvm/vmx/vmx.c 		check_ept_pointer_match(kvm);
kvm               516 arch/x86/kvm/vmx/vmx.c 	if (to_kvm_vmx(kvm)->ept_pointers_match != EPT_POINTERS_MATCH) {
kvm               517 arch/x86/kvm/vmx/vmx.c 		kvm_for_each_vcpu(i, vcpu, kvm) {
kvm               521 arch/x86/kvm/vmx/vmx.c 					kvm, vcpu, range);
kvm               524 arch/x86/kvm/vmx/vmx.c 		ret = __hv_remote_flush_tlb_with_range(kvm,
kvm               525 arch/x86/kvm/vmx/vmx.c 				kvm_get_vcpu(kvm, 0), range);
kvm               528 arch/x86/kvm/vmx/vmx.c 	spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
kvm               531 arch/x86/kvm/vmx/vmx.c static int hv_remote_flush_tlb(struct kvm *kvm)
kvm               533 arch/x86/kvm/vmx/vmx.c 	return hv_remote_flush_tlb_with_range(kvm, NULL);
kvm               540 arch/x86/kvm/vmx/vmx.c 			&vcpu->kvm->arch.hyperv.hv_pa_pg;
kvm               555 arch/x86/kvm/vmx/vmx.c 	evmcs->hv_vm_id = (unsigned long)vcpu->kvm;
kvm              1380 arch/x86/kvm/vmx/vmx.c 	if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
kvm              1574 arch/x86/kvm/vmx/vmx.c 	if (kvm_hlt_in_guest(vcpu->kvm) &&
kvm              2720 arch/x86/kvm/vmx/vmx.c 	struct kvm_vmx *kvm_vmx = to_kvm_vmx(vcpu->kvm);
kvm              2972 arch/x86/kvm/vmx/vmx.c 	struct kvm *kvm = vcpu->kvm;
kvm              2983 arch/x86/kvm/vmx/vmx.c 			spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock);
kvm              2985 arch/x86/kvm/vmx/vmx.c 			to_kvm_vmx(kvm)->ept_pointers_match
kvm              2987 arch/x86/kvm/vmx/vmx.c 			spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
kvm              2996 arch/x86/kvm/vmx/vmx.c 			guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr;
kvm              3426 arch/x86/kvm/vmx/vmx.c static int init_rmode_tss(struct kvm *kvm)
kvm              3432 arch/x86/kvm/vmx/vmx.c 	idx = srcu_read_lock(&kvm->srcu);
kvm              3433 arch/x86/kvm/vmx/vmx.c 	fn = to_kvm_vmx(kvm)->tss_addr >> PAGE_SHIFT;
kvm              3434 arch/x86/kvm/vmx/vmx.c 	r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
kvm              3438 arch/x86/kvm/vmx/vmx.c 	r = kvm_write_guest_page(kvm, fn++, &data,
kvm              3442 arch/x86/kvm/vmx/vmx.c 	r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE);
kvm              3445 arch/x86/kvm/vmx/vmx.c 	r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
kvm              3449 arch/x86/kvm/vmx/vmx.c 	r = kvm_write_guest_page(kvm, fn, &data,
kvm              3453 arch/x86/kvm/vmx/vmx.c 	srcu_read_unlock(&kvm->srcu, idx);
kvm              3457 arch/x86/kvm/vmx/vmx.c static int init_rmode_identity_map(struct kvm *kvm)
kvm              3459 arch/x86/kvm/vmx/vmx.c 	struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
kvm              3465 arch/x86/kvm/vmx/vmx.c 	mutex_lock(&kvm->slots_lock);
kvm              3474 arch/x86/kvm/vmx/vmx.c 	r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
kvm              3479 arch/x86/kvm/vmx/vmx.c 	idx = srcu_read_lock(&kvm->srcu);
kvm              3480 arch/x86/kvm/vmx/vmx.c 	r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE);
kvm              3487 arch/x86/kvm/vmx/vmx.c 		r = kvm_write_guest_page(kvm, identity_map_pfn,
kvm              3495 arch/x86/kvm/vmx/vmx.c 	srcu_read_unlock(&kvm->srcu, idx);
kvm              3498 arch/x86/kvm/vmx/vmx.c 	mutex_unlock(&kvm->slots_lock);
kvm              3517 arch/x86/kvm/vmx/vmx.c static int alloc_apic_access_page(struct kvm *kvm)
kvm              3522 arch/x86/kvm/vmx/vmx.c 	mutex_lock(&kvm->slots_lock);
kvm              3523 arch/x86/kvm/vmx/vmx.c 	if (kvm->arch.apic_access_page_done)
kvm              3525 arch/x86/kvm/vmx/vmx.c 	r = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
kvm              3530 arch/x86/kvm/vmx/vmx.c 	page = gfn_to_page(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
kvm              3541 arch/x86/kvm/vmx/vmx.c 	kvm->arch.apic_access_page_done = true;
kvm              3543 arch/x86/kvm/vmx/vmx.c 	mutex_unlock(&kvm->slots_lock);
kvm              3979 arch/x86/kvm/vmx/vmx.c 	if (kvm_mwait_in_guest(vmx->vcpu.kvm))
kvm              3982 arch/x86/kvm/vmx/vmx.c 	if (kvm_hlt_in_guest(vmx->vcpu.kvm))
kvm              4006 arch/x86/kvm/vmx/vmx.c 	if (kvm_pause_in_guest(vmx->vcpu.kvm))
kvm              4182 arch/x86/kvm/vmx/vmx.c 	if (!kvm_pause_in_guest(vmx->vcpu.kvm)) {
kvm              4489 arch/x86/kvm/vmx/vmx.c static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
kvm              4496 arch/x86/kvm/vmx/vmx.c 	ret = x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr,
kvm              4500 arch/x86/kvm/vmx/vmx.c 	to_kvm_vmx(kvm)->tss_addr = addr;
kvm              4501 arch/x86/kvm/vmx/vmx.c 	return init_rmode_tss(kvm);
kvm              4504 arch/x86/kvm/vmx/vmx.c static int vmx_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
kvm              4506 arch/x86/kvm/vmx/vmx.c 	to_kvm_vmx(kvm)->ept_identity_map_addr = ident_addr;
kvm              5343 arch/x86/kvm/vmx/vmx.c 	if (!kvm_pause_in_guest(vcpu->kvm))
kvm              5651 arch/x86/kvm/vmx/vmx.c static void kvm_flush_pml_buffers(struct kvm *kvm)
kvm              5661 arch/x86/kvm/vmx/vmx.c 	kvm_for_each_vcpu(i, vcpu, kvm)
kvm              6640 arch/x86/kvm/vmx/vmx.c static struct kvm *vmx_vm_alloc(void)
kvm              6649 arch/x86/kvm/vmx/vmx.c 	return &kvm_vmx->kvm;
kvm              6652 arch/x86/kvm/vmx/vmx.c static void vmx_vm_free(struct kvm *kvm)
kvm              6654 arch/x86/kvm/vmx/vmx.c 	kfree(kvm->arch.hyperv.hv_pa_pg);
kvm              6655 arch/x86/kvm/vmx/vmx.c 	vfree(to_kvm_vmx(kvm));
kvm              6674 arch/x86/kvm/vmx/vmx.c static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
kvm              6706 arch/x86/kvm/vmx/vmx.c 	err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
kvm              6743 arch/x86/kvm/vmx/vmx.c 	if (kvm_cstate_in_guest(kvm)) {
kvm              6759 arch/x86/kvm/vmx/vmx.c 		err = alloc_apic_access_page(kvm);
kvm              6765 arch/x86/kvm/vmx/vmx.c 		err = init_rmode_identity_map(kvm);
kvm              6813 arch/x86/kvm/vmx/vmx.c static int vmx_vm_init(struct kvm *kvm)
kvm              6815 arch/x86/kvm/vmx/vmx.c 	spin_lock_init(&to_kvm_vmx(kvm)->ept_pointer_lock);
kvm              6818 arch/x86/kvm/vmx/vmx.c 		kvm->arch.pause_in_guest = true;
kvm              6884 arch/x86/kvm/vmx/vmx.c 	if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
kvm              6892 arch/x86/kvm/vmx/vmx.c 		if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
kvm              7208 arch/x86/kvm/vmx/vmx.c 	if (kvm_mwait_in_guest(vcpu->kvm) ||
kvm              7253 arch/x86/kvm/vmx/vmx.c 	if (!kvm_pause_in_guest(vcpu->kvm))
kvm              7257 arch/x86/kvm/vmx/vmx.c static void vmx_slot_enable_log_dirty(struct kvm *kvm,
kvm              7260 arch/x86/kvm/vmx/vmx.c 	kvm_mmu_slot_leaf_clear_dirty(kvm, slot);
kvm              7261 arch/x86/kvm/vmx/vmx.c 	kvm_mmu_slot_largepage_remove_write_access(kvm, slot);
kvm              7264 arch/x86/kvm/vmx/vmx.c static void vmx_slot_disable_log_dirty(struct kvm *kvm,
kvm              7267 arch/x86/kvm/vmx/vmx.c 	kvm_mmu_slot_set_dirty(kvm, slot);
kvm              7270 arch/x86/kvm/vmx/vmx.c static void vmx_flush_log_dirty(struct kvm *kvm)
kvm              7272 arch/x86/kvm/vmx/vmx.c 	kvm_flush_pml_buffers(kvm);
kvm              7301 arch/x86/kvm/vmx/vmx.c 		if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa,
kvm              7311 arch/x86/kvm/vmx/vmx.c static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
kvm              7315 arch/x86/kvm/vmx/vmx.c 	kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask);
kvm              7368 arch/x86/kvm/vmx/vmx.c 	if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
kvm              7458 arch/x86/kvm/vmx/vmx.c static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
kvm              7468 arch/x86/kvm/vmx/vmx.c 	if (!kvm_arch_has_assigned_device(kvm) ||
kvm              7470 arch/x86/kvm/vmx/vmx.c 		!kvm_vcpu_apicv_active(kvm->vcpus[0]))
kvm              7473 arch/x86/kvm/vmx/vmx.c 	idx = srcu_read_lock(&kvm->irq_srcu);
kvm              7474 arch/x86/kvm/vmx/vmx.c 	irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
kvm              7501 arch/x86/kvm/vmx/vmx.c 		kvm_set_msi_irq(kvm, e, &irq);
kvm              7502 arch/x86/kvm/vmx/vmx.c 		if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) ||
kvm              7539 arch/x86/kvm/vmx/vmx.c 	srcu_read_unlock(&kvm->irq_srcu, idx);
kvm               296 arch/x86/kvm/vmx/vmx.h 	struct kvm kvm;
kvm               461 arch/x86/kvm/vmx/vmx.h static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
kvm               463 arch/x86/kvm/vmx/vmx.h 	return container_of(kvm, struct kvm_vmx, kvm);
kvm                97 arch/x86/kvm/x86.c #define VM_STAT(x, ...) offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__
kvm               519 arch/x86/kvm/x86.c 		if (!vcpu->kvm->arch.exception_payload_enabled ||
kvm               812 arch/x86/kvm/x86.c 	    kvm_arch_has_noncoherent_dma(vcpu->kvm) &&
kvm               813 arch/x86/kvm/x86.c 	    !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
kvm               814 arch/x86/kvm/x86.c 		kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL);
kvm              1668 arch/x86/kvm/x86.c static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
kvm              1678 arch/x86/kvm/x86.c 	r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
kvm              1687 arch/x86/kvm/x86.c 	if (kvm_write_guest(kvm, wall_clock, &version, sizeof(version)))
kvm              1698 arch/x86/kvm/x86.c 	if (kvm->arch.kvmclock_offset) {
kvm              1699 arch/x86/kvm/x86.c 		struct timespec64 ts = ns_to_timespec64(kvm->arch.kvmclock_offset);
kvm              1706 arch/x86/kvm/x86.c 	kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
kvm              1709 arch/x86/kvm/x86.c 	kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
kvm              1847 arch/x86/kvm/x86.c 	struct kvm_arch *ka = &vcpu->kvm->arch;
kvm              1851 arch/x86/kvm/x86.c 			 atomic_read(&vcpu->kvm->online_vcpus));
kvm              1866 arch/x86/kvm/x86.c 			    atomic_read(&vcpu->kvm->online_vcpus),
kvm              1941 arch/x86/kvm/x86.c 	struct kvm *kvm = vcpu->kvm;
kvm              1949 arch/x86/kvm/x86.c 	raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
kvm              1952 arch/x86/kvm/x86.c 	elapsed = ns - kvm->arch.last_tsc_nsec;
kvm              1963 arch/x86/kvm/x86.c 			u64 tsc_exp = kvm->arch.last_tsc_write +
kvm              1983 arch/x86/kvm/x86.c 	    vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
kvm              1985 arch/x86/kvm/x86.c 			offset = kvm->arch.cur_tsc_offset;
kvm              1992 arch/x86/kvm/x86.c 		already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation);
kvm              2003 arch/x86/kvm/x86.c 		kvm->arch.cur_tsc_generation++;
kvm              2004 arch/x86/kvm/x86.c 		kvm->arch.cur_tsc_nsec = ns;
kvm              2005 arch/x86/kvm/x86.c 		kvm->arch.cur_tsc_write = data;
kvm              2006 arch/x86/kvm/x86.c 		kvm->arch.cur_tsc_offset = offset;
kvm              2014 arch/x86/kvm/x86.c 	kvm->arch.last_tsc_nsec = ns;
kvm              2015 arch/x86/kvm/x86.c 	kvm->arch.last_tsc_write = data;
kvm              2016 arch/x86/kvm/x86.c 	kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
kvm              2021 arch/x86/kvm/x86.c 	vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
kvm              2022 arch/x86/kvm/x86.c 	vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
kvm              2023 arch/x86/kvm/x86.c 	vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
kvm              2029 arch/x86/kvm/x86.c 	raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
kvm              2031 arch/x86/kvm/x86.c 	spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
kvm              2033 arch/x86/kvm/x86.c 		kvm->arch.nr_vcpus_matched_tsc = 0;
kvm              2035 arch/x86/kvm/x86.c 		kvm->arch.nr_vcpus_matched_tsc++;
kvm              2039 arch/x86/kvm/x86.c 	spin_unlock(&kvm->arch.pvclock_gtod_sync_lock);
kvm              2221 arch/x86/kvm/x86.c static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
kvm              2224 arch/x86/kvm/x86.c 	struct kvm_arch *ka = &kvm->arch;
kvm              2229 arch/x86/kvm/x86.c 			atomic_read(&kvm->online_vcpus));
kvm              2252 arch/x86/kvm/x86.c void kvm_make_mclock_inprogress_request(struct kvm *kvm)
kvm              2254 arch/x86/kvm/x86.c 	kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);
kvm              2257 arch/x86/kvm/x86.c static void kvm_gen_update_masterclock(struct kvm *kvm)
kvm              2262 arch/x86/kvm/x86.c 	struct kvm_arch *ka = &kvm->arch;
kvm              2265 arch/x86/kvm/x86.c 	kvm_make_mclock_inprogress_request(kvm);
kvm              2267 arch/x86/kvm/x86.c 	pvclock_update_vm_gtod_copy(kvm);
kvm              2269 arch/x86/kvm/x86.c 	kvm_for_each_vcpu(i, vcpu, kvm)
kvm              2273 arch/x86/kvm/x86.c 	kvm_for_each_vcpu(i, vcpu, kvm)
kvm              2280 arch/x86/kvm/x86.c u64 get_kvmclock_ns(struct kvm *kvm)
kvm              2282 arch/x86/kvm/x86.c 	struct kvm_arch *ka = &kvm->arch;
kvm              2317 arch/x86/kvm/x86.c 	if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
kvm              2341 arch/x86/kvm/x86.c 	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
kvm              2357 arch/x86/kvm/x86.c 	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
kvm              2364 arch/x86/kvm/x86.c 	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
kvm              2373 arch/x86/kvm/x86.c 	struct kvm_arch *ka = &v->kvm->arch;
kvm              2442 arch/x86/kvm/x86.c 	vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
kvm              2454 arch/x86/kvm/x86.c 	if (v == kvm_get_vcpu(v->kvm, 0))
kvm              2455 arch/x86/kvm/x86.c 		kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
kvm              2481 arch/x86/kvm/x86.c 	struct kvm *kvm = container_of(ka, struct kvm, arch);
kvm              2484 arch/x86/kvm/x86.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
kvm              2492 arch/x86/kvm/x86.c 	struct kvm *kvm = v->kvm;
kvm              2495 arch/x86/kvm/x86.c 	schedule_delayed_work(&kvm->arch.kvmclock_update_work,
kvm              2506 arch/x86/kvm/x86.c 	struct kvm *kvm = container_of(ka, struct kvm, arch);
kvm              2511 arch/x86/kvm/x86.c 	schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0);
kvm              2512 arch/x86/kvm/x86.c 	schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
kvm              2580 arch/x86/kvm/x86.c 	struct kvm *kvm = vcpu->kvm;
kvm              2582 arch/x86/kvm/x86.c 	u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
kvm              2583 arch/x86/kvm/x86.c 		: (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
kvm              2584 arch/x86/kvm/x86.c 	u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
kvm              2585 arch/x86/kvm/x86.c 		: kvm->arch.xen_hvm_config.blob_size_32;
kvm              2625 arch/x86/kvm/x86.c 	if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
kvm              2771 arch/x86/kvm/x86.c 		if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT) &&
kvm              2799 arch/x86/kvm/x86.c 		vcpu->kvm->arch.wall_clock = data;
kvm              2800 arch/x86/kvm/x86.c 		kvm_write_wall_clock(vcpu->kvm, data);
kvm              2804 arch/x86/kvm/x86.c 		struct kvm_arch *ka = &vcpu->kvm->arch;
kvm              2823 arch/x86/kvm/x86.c 		if (!kvm_gfn_to_hva_cache_init(vcpu->kvm,
kvm              2932 arch/x86/kvm/x86.c 		if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
kvm              3091 arch/x86/kvm/x86.c 		msr_info->data = vcpu->kvm->arch.wall_clock;
kvm              3165 arch/x86/kvm/x86.c 		    !vcpu->kvm->arch.guest_can_read_msr_platform_info)
kvm              3266 arch/x86/kvm/x86.c int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
kvm              3490 arch/x86/kvm/x86.c 	return kvm_arch_has_noncoherent_dma(vcpu->kvm);
kvm              3533 arch/x86/kvm/x86.c 		if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1)
kvm              3586 arch/x86/kvm/x86.c 	idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm              3588 arch/x86/kvm/x86.c 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
kvm              3648 arch/x86/kvm/x86.c 	if (!irqchip_in_kernel(vcpu->kvm)) {
kvm              3658 arch/x86/kvm/x86.c 	if (pic_in_kernel(vcpu->kvm))
kvm              3787 arch/x86/kvm/x86.c 		if (!vcpu->kvm->arch.exception_payload_enabled)
kvm              3819 arch/x86/kvm/x86.c 	if (vcpu->kvm->arch.exception_payload_enabled)
kvm              3838 arch/x86/kvm/x86.c 		if (!vcpu->kvm->arch.exception_payload_enabled)
kvm              4147 arch/x86/kvm/x86.c 		if (!irqchip_in_kernel(vcpu->kvm))
kvm              4278 arch/x86/kvm/x86.c 		int idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm              4280 arch/x86/kvm/x86.c 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
kvm              4284 arch/x86/kvm/x86.c 		int idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm              4286 arch/x86/kvm/x86.c 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
kvm              4314 arch/x86/kvm/x86.c 		idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm              4316 arch/x86/kvm/x86.c 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
kvm              4520 arch/x86/kvm/x86.c 		idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm              4522 arch/x86/kvm/x86.c 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
kvm              4559 arch/x86/kvm/x86.c static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
kvm              4565 arch/x86/kvm/x86.c 	ret = kvm_x86_ops->set_tss_addr(kvm, addr);
kvm              4569 arch/x86/kvm/x86.c static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
kvm              4572 arch/x86/kvm/x86.c 	return kvm_x86_ops->set_identity_map_addr(kvm, ident_addr);
kvm              4575 arch/x86/kvm/x86.c static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
kvm              4581 arch/x86/kvm/x86.c 	mutex_lock(&kvm->slots_lock);
kvm              4583 arch/x86/kvm/x86.c 	kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
kvm              4584 arch/x86/kvm/x86.c 	kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
kvm              4586 arch/x86/kvm/x86.c 	mutex_unlock(&kvm->slots_lock);
kvm              4590 arch/x86/kvm/x86.c static unsigned long kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
kvm              4592 arch/x86/kvm/x86.c 	return kvm->arch.n_max_mmu_pages;
kvm              4595 arch/x86/kvm/x86.c static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
kvm              4597 arch/x86/kvm/x86.c 	struct kvm_pic *pic = kvm->arch.vpic;
kvm              4611 arch/x86/kvm/x86.c 		kvm_get_ioapic(kvm, &chip->chip.ioapic);
kvm              4620 arch/x86/kvm/x86.c static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
kvm              4622 arch/x86/kvm/x86.c 	struct kvm_pic *pic = kvm->arch.vpic;
kvm              4640 arch/x86/kvm/x86.c 		kvm_set_ioapic(kvm, &chip->chip.ioapic);
kvm              4650 arch/x86/kvm/x86.c static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
kvm              4652 arch/x86/kvm/x86.c 	struct kvm_kpit_state *kps = &kvm->arch.vpit->pit_state;
kvm              4662 arch/x86/kvm/x86.c static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
kvm              4665 arch/x86/kvm/x86.c 	struct kvm_pit *pit = kvm->arch.vpit;
kvm              4675 arch/x86/kvm/x86.c static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
kvm              4677 arch/x86/kvm/x86.c 	mutex_lock(&kvm->arch.vpit->pit_state.lock);
kvm              4678 arch/x86/kvm/x86.c 	memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
kvm              4680 arch/x86/kvm/x86.c 	ps->flags = kvm->arch.vpit->pit_state.flags;
kvm              4681 arch/x86/kvm/x86.c 	mutex_unlock(&kvm->arch.vpit->pit_state.lock);
kvm              4686 arch/x86/kvm/x86.c static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
kvm              4691 arch/x86/kvm/x86.c 	struct kvm_pit *pit = kvm->arch.vpit;
kvm              4708 arch/x86/kvm/x86.c static int kvm_vm_ioctl_reinject(struct kvm *kvm,
kvm              4711 arch/x86/kvm/x86.c 	struct kvm_pit *pit = kvm->arch.vpit;
kvm              4746 arch/x86/kvm/x86.c int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
kvm              4751 arch/x86/kvm/x86.c 	mutex_lock(&kvm->slots_lock);
kvm              4757 arch/x86/kvm/x86.c 		kvm_x86_ops->flush_log_dirty(kvm);
kvm              4759 arch/x86/kvm/x86.c 	r = kvm_get_dirty_log_protect(kvm, log, &flush);
kvm              4765 arch/x86/kvm/x86.c 	lockdep_assert_held(&kvm->slots_lock);
kvm              4767 arch/x86/kvm/x86.c 		kvm_flush_remote_tlbs(kvm);
kvm              4769 arch/x86/kvm/x86.c 	mutex_unlock(&kvm->slots_lock);
kvm              4773 arch/x86/kvm/x86.c int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, struct kvm_clear_dirty_log *log)
kvm              4778 arch/x86/kvm/x86.c 	mutex_lock(&kvm->slots_lock);
kvm              4784 arch/x86/kvm/x86.c 		kvm_x86_ops->flush_log_dirty(kvm);
kvm              4786 arch/x86/kvm/x86.c 	r = kvm_clear_dirty_log_protect(kvm, log, &flush);
kvm              4792 arch/x86/kvm/x86.c 	lockdep_assert_held(&kvm->slots_lock);
kvm              4794 arch/x86/kvm/x86.c 		kvm_flush_remote_tlbs(kvm);
kvm              4796 arch/x86/kvm/x86.c 	mutex_unlock(&kvm->slots_lock);
kvm              4800 arch/x86/kvm/x86.c int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
kvm              4803 arch/x86/kvm/x86.c 	if (!irqchip_in_kernel(kvm))
kvm              4806 arch/x86/kvm/x86.c 	irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
kvm              4812 arch/x86/kvm/x86.c int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
kvm              4822 arch/x86/kvm/x86.c 		kvm->arch.disabled_quirks = cap->args[0];
kvm              4826 arch/x86/kvm/x86.c 		mutex_lock(&kvm->lock);
kvm              4831 arch/x86/kvm/x86.c 		if (irqchip_in_kernel(kvm))
kvm              4833 arch/x86/kvm/x86.c 		if (kvm->created_vcpus)
kvm              4835 arch/x86/kvm/x86.c 		r = kvm_setup_empty_irq_routing(kvm);
kvm              4840 arch/x86/kvm/x86.c 		kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT;
kvm              4841 arch/x86/kvm/x86.c 		kvm->arch.nr_reserved_ioapic_pins = cap->args[0];
kvm              4844 arch/x86/kvm/x86.c 		mutex_unlock(&kvm->lock);
kvm              4853 arch/x86/kvm/x86.c 			kvm->arch.x2apic_format = true;
kvm              4855 arch/x86/kvm/x86.c 			kvm->arch.x2apic_broadcast_quirk_disabled = true;
kvm              4866 arch/x86/kvm/x86.c 			kvm->arch.mwait_in_guest = true;
kvm              4868 arch/x86/kvm/x86.c 			kvm->arch.hlt_in_guest = true;
kvm              4870 arch/x86/kvm/x86.c 			kvm->arch.pause_in_guest = true;
kvm              4872 arch/x86/kvm/x86.c 			kvm->arch.cstate_in_guest = true;
kvm              4876 arch/x86/kvm/x86.c 		kvm->arch.guest_can_read_msr_platform_info = cap->args[0];
kvm              4880 arch/x86/kvm/x86.c 		kvm->arch.exception_payload_enabled = cap->args[0];
kvm              4893 arch/x86/kvm/x86.c 	struct kvm *kvm = filp->private_data;
kvm              4909 arch/x86/kvm/x86.c 		r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
kvm              4914 arch/x86/kvm/x86.c 		mutex_lock(&kvm->lock);
kvm              4916 arch/x86/kvm/x86.c 		if (kvm->created_vcpus)
kvm              4921 arch/x86/kvm/x86.c 		r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
kvm              4923 arch/x86/kvm/x86.c 		mutex_unlock(&kvm->lock);
kvm              4927 arch/x86/kvm/x86.c 		r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
kvm              4930 arch/x86/kvm/x86.c 		r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
kvm              4933 arch/x86/kvm/x86.c 		mutex_lock(&kvm->lock);
kvm              4936 arch/x86/kvm/x86.c 		if (irqchip_in_kernel(kvm))
kvm              4940 arch/x86/kvm/x86.c 		if (kvm->created_vcpus)
kvm              4943 arch/x86/kvm/x86.c 		r = kvm_pic_init(kvm);
kvm              4947 arch/x86/kvm/x86.c 		r = kvm_ioapic_init(kvm);
kvm              4949 arch/x86/kvm/x86.c 			kvm_pic_destroy(kvm);
kvm              4953 arch/x86/kvm/x86.c 		r = kvm_setup_default_irq_routing(kvm);
kvm              4955 arch/x86/kvm/x86.c 			kvm_ioapic_destroy(kvm);
kvm              4956 arch/x86/kvm/x86.c 			kvm_pic_destroy(kvm);
kvm              4961 arch/x86/kvm/x86.c 		kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL;
kvm              4963 arch/x86/kvm/x86.c 		mutex_unlock(&kvm->lock);
kvm              4975 arch/x86/kvm/x86.c 		mutex_lock(&kvm->lock);
kvm              4977 arch/x86/kvm/x86.c 		if (kvm->arch.vpit)
kvm              4980 arch/x86/kvm/x86.c 		kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
kvm              4981 arch/x86/kvm/x86.c 		if (kvm->arch.vpit)
kvm              4984 arch/x86/kvm/x86.c 		mutex_unlock(&kvm->lock);
kvm              4997 arch/x86/kvm/x86.c 		if (!irqchip_kernel(kvm))
kvm              4999 arch/x86/kvm/x86.c 		r = kvm_vm_ioctl_get_irqchip(kvm, chip);
kvm              5021 arch/x86/kvm/x86.c 		if (!irqchip_kernel(kvm))
kvm              5023 arch/x86/kvm/x86.c 		r = kvm_vm_ioctl_set_irqchip(kvm, chip);
kvm              5036 arch/x86/kvm/x86.c 		if (!kvm->arch.vpit)
kvm              5038 arch/x86/kvm/x86.c 		r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
kvm              5052 arch/x86/kvm/x86.c 		if (!kvm->arch.vpit)
kvm              5054 arch/x86/kvm/x86.c 		r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
kvm              5059 arch/x86/kvm/x86.c 		if (!kvm->arch.vpit)
kvm              5061 arch/x86/kvm/x86.c 		r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
kvm              5075 arch/x86/kvm/x86.c 		if (!kvm->arch.vpit)
kvm              5077 arch/x86/kvm/x86.c 		r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
kvm              5085 arch/x86/kvm/x86.c 		r = kvm_vm_ioctl_reinject(kvm, &control);
kvm              5090 arch/x86/kvm/x86.c 		mutex_lock(&kvm->lock);
kvm              5091 arch/x86/kvm/x86.c 		if (kvm->created_vcpus)
kvm              5094 arch/x86/kvm/x86.c 			kvm->arch.bsp_vcpu_id = arg;
kvm              5095 arch/x86/kvm/x86.c 		mutex_unlock(&kvm->lock);
kvm              5105 arch/x86/kvm/x86.c 		memcpy(&kvm->arch.xen_hvm_config, &xhc, sizeof(xhc));
kvm              5127 arch/x86/kvm/x86.c 		kvm_gen_update_masterclock(kvm);
kvm              5128 arch/x86/kvm/x86.c 		now_ns = get_kvmclock_ns(kvm);
kvm              5129 arch/x86/kvm/x86.c 		kvm->arch.kvmclock_offset += user_ns.clock - now_ns;
kvm              5130 arch/x86/kvm/x86.c 		kvm_make_all_cpus_request(kvm, KVM_REQ_CLOCK_UPDATE);
kvm              5137 arch/x86/kvm/x86.c 		now_ns = get_kvmclock_ns(kvm);
kvm              5139 arch/x86/kvm/x86.c 		user_ns.flags = kvm->arch.use_master_clock ? KVM_CLOCK_TSC_STABLE : 0;
kvm              5151 arch/x86/kvm/x86.c 			r = kvm_x86_ops->mem_enc_op(kvm, argp);
kvm              5163 arch/x86/kvm/x86.c 			r = kvm_x86_ops->mem_enc_reg_region(kvm, &region);
kvm              5175 arch/x86/kvm/x86.c 			r = kvm_x86_ops->mem_enc_unreg_region(kvm, &region);
kvm              5184 arch/x86/kvm/x86.c 		r = kvm_vm_ioctl_hv_eventfd(kvm, &hvevfd);
kvm              5188 arch/x86/kvm/x86.c 		r = kvm_vm_ioctl_set_pmu_event_filter(kvm, argp);
kvm              6468 arch/x86/kvm/x86.c 	pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
kvm              6483 arch/x86/kvm/x86.c 		spin_lock(&vcpu->kvm->mmu_lock);
kvm              6484 arch/x86/kvm/x86.c 		indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
kvm              6485 arch/x86/kvm/x86.c 		spin_unlock(&vcpu->kvm->mmu_lock);
kvm              6488 arch/x86/kvm/x86.c 			kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
kvm              6498 arch/x86/kvm/x86.c 	kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
kvm              6550 arch/x86/kvm/x86.c 	kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
kvm              6898 arch/x86/kvm/x86.c 	    kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_OUT_7E_INC_RIP)) {
kvm              6992 arch/x86/kvm/x86.c 	struct kvm *kvm;
kvm              6997 arch/x86/kvm/x86.c 	list_for_each_entry(kvm, &vm_list, vm_list)
kvm              6998 arch/x86/kvm/x86.c 		kvm_make_mclock_inprogress_request(kvm);
kvm              7007 arch/x86/kvm/x86.c 	list_for_each_entry(kvm, &vm_list, vm_list) {
kvm              7008 arch/x86/kvm/x86.c 		struct kvm_arch *ka = &kvm->arch;
kvm              7012 arch/x86/kvm/x86.c 		pvclock_update_vm_gtod_copy(kvm);
kvm              7014 arch/x86/kvm/x86.c 		kvm_for_each_vcpu(cpu, vcpu, kvm)
kvm              7017 arch/x86/kvm/x86.c 		kvm_for_each_vcpu(cpu, vcpu, kvm)
kvm              7028 arch/x86/kvm/x86.c 	struct kvm *kvm;
kvm              7074 arch/x86/kvm/x86.c 	list_for_each_entry(kvm, &vm_list, vm_list) {
kvm              7075 arch/x86/kvm/x86.c 		kvm_for_each_vcpu(i, vcpu, kvm) {
kvm              7200 arch/x86/kvm/x86.c 	struct kvm *kvm;
kvm              7206 arch/x86/kvm/x86.c 	list_for_each_entry(kvm, &vm_list, vm_list)
kvm              7207 arch/x86/kvm/x86.c 		kvm_for_each_vcpu(i, vcpu, kvm)
kvm              7393 arch/x86/kvm/x86.c 	if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing,
kvm              7406 arch/x86/kvm/x86.c static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid)
kvm              7417 arch/x86/kvm/x86.c 	kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL);
kvm              7433 arch/x86/kvm/x86.c static void kvm_sched_yield(struct kvm *kvm, unsigned long dest_id)
kvm              7439 arch/x86/kvm/x86.c 	map = rcu_dereference(kvm->arch.apic_map);
kvm              7455 arch/x86/kvm/x86.c 	if (kvm_hv_hypercall_enabled(vcpu->kvm))
kvm              7485 arch/x86/kvm/x86.c 		kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1);
kvm              7486 arch/x86/kvm/x86.c 		kvm_sched_yield(vcpu->kvm, a1);
kvm              7495 arch/x86/kvm/x86.c 		ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit);
kvm              7498 arch/x86/kvm/x86.c 		kvm_sched_yield(vcpu->kvm, a0);
kvm              7530 arch/x86/kvm/x86.c 		likely(!pic_in_kernel(vcpu->kvm));
kvm              7542 arch/x86/kvm/x86.c 		pic_in_kernel(vcpu->kvm) ||
kvm              7942 arch/x86/kvm/x86.c void kvm_make_scan_ioapic_request(struct kvm *kvm)
kvm              7944 arch/x86/kvm/x86.c 	kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC);
kvm              7954 arch/x86/kvm/x86.c 	if (irqchip_split(vcpu->kvm))
kvm              7959 arch/x86/kvm/x86.c 		if (ioapic_in_kernel(vcpu->kvm))
kvm              7981 arch/x86/kvm/x86.c void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
kvm              7990 arch/x86/kvm/x86.c 	apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
kvm              7992 arch/x86/kvm/x86.c 		kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
kvm              8005 arch/x86/kvm/x86.c 	page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
kvm              8050 arch/x86/kvm/x86.c 			kvm_gen_update_masterclock(vcpu->kvm);
kvm              8194 arch/x86/kvm/x86.c 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
kvm              8223 arch/x86/kvm/x86.c 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm              8312 arch/x86/kvm/x86.c 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm              8340 arch/x86/kvm/x86.c static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
kvm              8344 arch/x86/kvm/x86.c 		srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
kvm              8346 arch/x86/kvm/x86.c 		vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
kvm              8386 arch/x86/kvm/x86.c 	struct kvm *kvm = vcpu->kvm;
kvm              8388 arch/x86/kvm/x86.c 	vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
kvm              8395 arch/x86/kvm/x86.c 			r = vcpu_block(kvm, vcpu);
kvm              8422 arch/x86/kvm/x86.c 			srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
kvm              8424 arch/x86/kvm/x86.c 			vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
kvm              8428 arch/x86/kvm/x86.c 	srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
kvm              8437 arch/x86/kvm/x86.c 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm              8439 arch/x86/kvm/x86.c 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
kvm              8909 arch/x86/kvm/x86.c 	idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm              8914 arch/x86/kvm/x86.c 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
kvm              9032 arch/x86/kvm/x86.c 	idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm              9034 arch/x86/kvm/x86.c 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
kvm              9152 arch/x86/kvm/x86.c struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
kvm              9157 arch/x86/kvm/x86.c 	if (kvm_check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
kvm              9162 arch/x86/kvm/x86.c 	vcpu = kvm_x86_ops->vcpu_create(kvm, id);
kvm              9182 arch/x86/kvm/x86.c 	struct kvm *kvm = vcpu->kvm;
kvm              9203 arch/x86/kvm/x86.c 	schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
kvm              9298 arch/x86/kvm/x86.c 	struct kvm *kvm;
kvm              9313 arch/x86/kvm/x86.c 	list_for_each_entry(kvm, &vm_list, vm_list) {
kvm              9314 arch/x86/kvm/x86.c 		kvm_for_each_vcpu(i, vcpu, kvm) {
kvm              9365 arch/x86/kvm/x86.c 		list_for_each_entry(kvm, &vm_list, vm_list) {
kvm              9366 arch/x86/kvm/x86.c 			kvm->arch.backwards_tsc_observed = true;
kvm              9367 arch/x86/kvm/x86.c 			kvm_for_each_vcpu(i, vcpu, kvm) {
kvm              9379 arch/x86/kvm/x86.c 			kvm->arch.last_tsc_nsec = 0;
kvm              9380 arch/x86/kvm/x86.c 			kvm->arch.last_tsc_write = 0;
kvm              9433 arch/x86/kvm/x86.c 	return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id;
kvm              9451 arch/x86/kvm/x86.c 	if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
kvm              9469 arch/x86/kvm/x86.c 	if (irqchip_in_kernel(vcpu->kvm)) {
kvm              9529 arch/x86/kvm/x86.c 	idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm              9531 arch/x86/kvm/x86.c 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
kvm              9543 arch/x86/kvm/x86.c int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm              9548 arch/x86/kvm/x86.c 	INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
kvm              9549 arch/x86/kvm/x86.c 	INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
kvm              9550 arch/x86/kvm/x86.c 	INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
kvm              9551 arch/x86/kvm/x86.c 	INIT_LIST_HEAD(&kvm->arch.lpage_disallowed_mmu_pages);
kvm              9552 arch/x86/kvm/x86.c 	INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
kvm              9553 arch/x86/kvm/x86.c 	atomic_set(&kvm->arch.noncoherent_dma_count, 0);
kvm              9556 arch/x86/kvm/x86.c 	set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
kvm              9559 arch/x86/kvm/x86.c 		&kvm->arch.irq_sources_bitmap);
kvm              9561 arch/x86/kvm/x86.c 	raw_spin_lock_init(&kvm->arch.tsc_write_lock);
kvm              9562 arch/x86/kvm/x86.c 	mutex_init(&kvm->arch.apic_map_lock);
kvm              9563 arch/x86/kvm/x86.c 	spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock);
kvm              9565 arch/x86/kvm/x86.c 	kvm->arch.kvmclock_offset = -ktime_get_boottime_ns();
kvm              9566 arch/x86/kvm/x86.c 	pvclock_update_vm_gtod_copy(kvm);
kvm              9568 arch/x86/kvm/x86.c 	kvm->arch.guest_can_read_msr_platform_info = true;
kvm              9570 arch/x86/kvm/x86.c 	INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
kvm              9571 arch/x86/kvm/x86.c 	INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
kvm              9573 arch/x86/kvm/x86.c 	kvm_hv_init_vm(kvm);
kvm              9574 arch/x86/kvm/x86.c 	kvm_page_track_init(kvm);
kvm              9575 arch/x86/kvm/x86.c 	kvm_mmu_init_vm(kvm);
kvm              9577 arch/x86/kvm/x86.c 	return kvm_x86_ops->vm_init(kvm);
kvm              9580 arch/x86/kvm/x86.c int kvm_arch_post_init_vm(struct kvm *kvm)
kvm              9582 arch/x86/kvm/x86.c 	return kvm_mmu_post_init_vm(kvm);
kvm              9592 arch/x86/kvm/x86.c static void kvm_free_vcpus(struct kvm *kvm)
kvm              9600 arch/x86/kvm/x86.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
kvm              9604 arch/x86/kvm/x86.c 	kvm_for_each_vcpu(i, vcpu, kvm)
kvm              9607 arch/x86/kvm/x86.c 	mutex_lock(&kvm->lock);
kvm              9608 arch/x86/kvm/x86.c 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
kvm              9609 arch/x86/kvm/x86.c 		kvm->vcpus[i] = NULL;
kvm              9611 arch/x86/kvm/x86.c 	atomic_set(&kvm->online_vcpus, 0);
kvm              9612 arch/x86/kvm/x86.c 	mutex_unlock(&kvm->lock);
kvm              9615 arch/x86/kvm/x86.c void kvm_arch_sync_events(struct kvm *kvm)
kvm              9617 arch/x86/kvm/x86.c 	cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work);
kvm              9618 arch/x86/kvm/x86.c 	cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work);
kvm              9619 arch/x86/kvm/x86.c 	kvm_free_pit(kvm);
kvm              9622 arch/x86/kvm/x86.c int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
kvm              9626 arch/x86/kvm/x86.c 	struct kvm_memslots *slots = kvm_memslots(kvm);
kvm              9662 arch/x86/kvm/x86.c 		r = __kvm_set_memory_region(kvm, &m);
kvm              9674 arch/x86/kvm/x86.c int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
kvm              9678 arch/x86/kvm/x86.c 	mutex_lock(&kvm->slots_lock);
kvm              9679 arch/x86/kvm/x86.c 	r = __x86_set_memory_region(kvm, id, gpa, size);
kvm              9680 arch/x86/kvm/x86.c 	mutex_unlock(&kvm->slots_lock);
kvm              9686 arch/x86/kvm/x86.c void kvm_arch_pre_destroy_vm(struct kvm *kvm)
kvm              9688 arch/x86/kvm/x86.c 	kvm_mmu_pre_destroy_vm(kvm);
kvm              9691 arch/x86/kvm/x86.c void kvm_arch_destroy_vm(struct kvm *kvm)
kvm              9693 arch/x86/kvm/x86.c 	if (current->mm == kvm->mm) {
kvm              9699 arch/x86/kvm/x86.c 		x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 0, 0);
kvm              9700 arch/x86/kvm/x86.c 		x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 0, 0);
kvm              9701 arch/x86/kvm/x86.c 		x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
kvm              9704 arch/x86/kvm/x86.c 		kvm_x86_ops->vm_destroy(kvm);
kvm              9705 arch/x86/kvm/x86.c 	kvm_pic_destroy(kvm);
kvm              9706 arch/x86/kvm/x86.c 	kvm_ioapic_destroy(kvm);
kvm              9707 arch/x86/kvm/x86.c 	kvm_free_vcpus(kvm);
kvm              9708 arch/x86/kvm/x86.c 	kvfree(rcu_dereference_check(kvm->arch.apic_map, 1));
kvm              9709 arch/x86/kvm/x86.c 	kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1));
kvm              9710 arch/x86/kvm/x86.c 	kvm_mmu_uninit_vm(kvm);
kvm              9711 arch/x86/kvm/x86.c 	kvm_page_track_cleanup(kvm);
kvm              9712 arch/x86/kvm/x86.c 	kvm_hv_destroy_vm(kvm);
kvm              9715 arch/x86/kvm/x86.c void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
kvm              9738 arch/x86/kvm/x86.c int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
kvm              9810 arch/x86/kvm/x86.c void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
kvm              9819 arch/x86/kvm/x86.c 	kvm_mmu_invalidate_mmio_sptes(kvm, gen);
kvm              9822 arch/x86/kvm/x86.c 	kvm_for_each_vcpu(i, vcpu, kvm)
kvm              9826 arch/x86/kvm/x86.c int kvm_arch_prepare_memory_region(struct kvm *kvm,
kvm              9832 arch/x86/kvm/x86.c 		return kvm_arch_create_memslot(kvm, memslot,
kvm              9838 arch/x86/kvm/x86.c static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
kvm              9843 arch/x86/kvm/x86.c 		kvm_mmu_slot_remove_write_access(kvm, new);
kvm              9879 arch/x86/kvm/x86.c 			kvm_x86_ops->slot_enable_log_dirty(kvm, new);
kvm              9881 arch/x86/kvm/x86.c 			kvm_mmu_slot_remove_write_access(kvm, new);
kvm              9884 arch/x86/kvm/x86.c 			kvm_x86_ops->slot_disable_log_dirty(kvm, new);
kvm              9888 arch/x86/kvm/x86.c void kvm_arch_commit_memory_region(struct kvm *kvm,
kvm              9894 arch/x86/kvm/x86.c 	if (!kvm->arch.n_requested_mmu_pages)
kvm              9895 arch/x86/kvm/x86.c 		kvm_mmu_change_mmu_pages(kvm,
kvm              9896 arch/x86/kvm/x86.c 				kvm_mmu_calculate_default_mmu_pages(kvm));
kvm              9918 arch/x86/kvm/x86.c 		kvm_mmu_zap_collapsible_sptes(kvm, new);
kvm              9931 arch/x86/kvm/x86.c 		kvm_mmu_slot_apply_flags(kvm, (struct kvm_memory_slot *) new);
kvm              9934 arch/x86/kvm/x86.c void kvm_arch_flush_shadow_all(struct kvm *kvm)
kvm              9936 arch/x86/kvm/x86.c 	kvm_mmu_zap_all(kvm);
kvm              9939 arch/x86/kvm/x86.c void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
kvm              9942 arch/x86/kvm/x86.c 	kvm_page_track_flush_slot(kvm, slot);
kvm              10146 arch/x86/kvm/x86.c 	return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
kvm              10153 arch/x86/kvm/x86.c 	return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, val,
kvm              10177 arch/x86/kvm/x86.c 	if (kvm_hlt_in_guest(vcpu->kvm) && !kvm_can_deliver_async_pf(vcpu))
kvm              10264 arch/x86/kvm/x86.c void kvm_arch_start_assignment(struct kvm *kvm)
kvm              10266 arch/x86/kvm/x86.c 	atomic_inc(&kvm->arch.assigned_device_count);
kvm              10270 arch/x86/kvm/x86.c void kvm_arch_end_assignment(struct kvm *kvm)
kvm              10272 arch/x86/kvm/x86.c 	atomic_dec(&kvm->arch.assigned_device_count);
kvm              10276 arch/x86/kvm/x86.c bool kvm_arch_has_assigned_device(struct kvm *kvm)
kvm              10278 arch/x86/kvm/x86.c 	return atomic_read(&kvm->arch.assigned_device_count);
kvm              10282 arch/x86/kvm/x86.c void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
kvm              10284 arch/x86/kvm/x86.c 	atomic_inc(&kvm->arch.noncoherent_dma_count);
kvm              10288 arch/x86/kvm/x86.c void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
kvm              10290 arch/x86/kvm/x86.c 	atomic_dec(&kvm->arch.noncoherent_dma_count);
kvm              10294 arch/x86/kvm/x86.c bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
kvm              10296 arch/x86/kvm/x86.c 	return atomic_read(&kvm->arch.noncoherent_dma_count);
kvm              10313 arch/x86/kvm/x86.c 	return kvm_x86_ops->update_pi_irte(irqfd->kvm,
kvm              10333 arch/x86/kvm/x86.c 	ret = kvm_x86_ops->update_pi_irte(irqfd->kvm, prod->irq, irqfd->gsi, 0);
kvm              10339 arch/x86/kvm/x86.c int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
kvm              10342 arch/x86/kvm/x86.c 	return kvm_x86_ops->update_pi_irte(kvm, host_irq, guest_irq, set);
kvm               189 arch/x86/kvm/x86.h 	u64 gen = kvm_memslots(vcpu->kvm)->generation;
kvm               206 arch/x86/kvm/x86.h 	return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
kvm               258 arch/x86/kvm/x86.h static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
kvm               260 arch/x86/kvm/x86.h 	return !(kvm->arch.disabled_quirks & quirk);
kvm               267 arch/x86/kvm/x86.h u64 get_kvmclock_ns(struct kvm *kvm);
kvm               328 arch/x86/kvm/x86.h static inline bool kvm_mwait_in_guest(struct kvm *kvm)
kvm               330 arch/x86/kvm/x86.h 	return kvm->arch.mwait_in_guest;
kvm               333 arch/x86/kvm/x86.h static inline bool kvm_hlt_in_guest(struct kvm *kvm)
kvm               335 arch/x86/kvm/x86.h 	return kvm->arch.hlt_in_guest;
kvm               338 arch/x86/kvm/x86.h static inline bool kvm_pause_in_guest(struct kvm *kvm)
kvm               340 arch/x86/kvm/x86.h 	return kvm->arch.pause_in_guest;
kvm               343 arch/x86/kvm/x86.h static inline bool kvm_cstate_in_guest(struct kvm *kvm)
kvm               345 arch/x86/kvm/x86.h 	return kvm->arch.cstate_in_guest;
kvm               218 drivers/gpu/drm/i915/gvt/gvt.h 		struct kvm *kvm;
kvm                92 drivers/gpu/drm/i915/gvt/kvmgt.c 	struct kvm *kvm;
kvm               736 drivers/gpu/drm/i915/gvt/kvmgt.c 		vgpu->vdev.kvm = data;
kvm               838 drivers/gpu/drm/i915/gvt/kvmgt.c 	vgpu->vdev.kvm = NULL;
kvm              1637 drivers/gpu/drm/i915/gvt/kvmgt.c 	struct kvm *kvm;
kvm              1645 drivers/gpu/drm/i915/gvt/kvmgt.c 	kvm = info->kvm;
kvm              1647 drivers/gpu/drm/i915/gvt/kvmgt.c 	idx = srcu_read_lock(&kvm->srcu);
kvm              1648 drivers/gpu/drm/i915/gvt/kvmgt.c 	slot = gfn_to_memslot(kvm, gfn);
kvm              1650 drivers/gpu/drm/i915/gvt/kvmgt.c 		srcu_read_unlock(&kvm->srcu, idx);
kvm              1654 drivers/gpu/drm/i915/gvt/kvmgt.c 	spin_lock(&kvm->mmu_lock);
kvm              1659 drivers/gpu/drm/i915/gvt/kvmgt.c 	kvm_slot_page_track_add_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
kvm              1663 drivers/gpu/drm/i915/gvt/kvmgt.c 	spin_unlock(&kvm->mmu_lock);
kvm              1664 drivers/gpu/drm/i915/gvt/kvmgt.c 	srcu_read_unlock(&kvm->srcu, idx);
kvm              1671 drivers/gpu/drm/i915/gvt/kvmgt.c 	struct kvm *kvm;
kvm              1679 drivers/gpu/drm/i915/gvt/kvmgt.c 	kvm = info->kvm;
kvm              1681 drivers/gpu/drm/i915/gvt/kvmgt.c 	idx = srcu_read_lock(&kvm->srcu);
kvm              1682 drivers/gpu/drm/i915/gvt/kvmgt.c 	slot = gfn_to_memslot(kvm, gfn);
kvm              1684 drivers/gpu/drm/i915/gvt/kvmgt.c 		srcu_read_unlock(&kvm->srcu, idx);
kvm              1688 drivers/gpu/drm/i915/gvt/kvmgt.c 	spin_lock(&kvm->mmu_lock);
kvm              1693 drivers/gpu/drm/i915/gvt/kvmgt.c 	kvm_slot_page_track_remove_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
kvm              1697 drivers/gpu/drm/i915/gvt/kvmgt.c 	spin_unlock(&kvm->mmu_lock);
kvm              1698 drivers/gpu/drm/i915/gvt/kvmgt.c 	srcu_read_unlock(&kvm->srcu, idx);
kvm              1714 drivers/gpu/drm/i915/gvt/kvmgt.c static void kvmgt_page_track_flush_slot(struct kvm *kvm,
kvm              1723 drivers/gpu/drm/i915/gvt/kvmgt.c 	spin_lock(&kvm->mmu_lock);
kvm              1727 drivers/gpu/drm/i915/gvt/kvmgt.c 			kvm_slot_page_track_remove_page(kvm, slot, gfn,
kvm              1732 drivers/gpu/drm/i915/gvt/kvmgt.c 	spin_unlock(&kvm->mmu_lock);
kvm              1735 drivers/gpu/drm/i915/gvt/kvmgt.c static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm)
kvm              1748 drivers/gpu/drm/i915/gvt/kvmgt.c 		if (kvm && kvm == info->kvm) {
kvm              1762 drivers/gpu/drm/i915/gvt/kvmgt.c 	struct kvm *kvm;
kvm              1768 drivers/gpu/drm/i915/gvt/kvmgt.c 	kvm = vgpu->vdev.kvm;
kvm              1769 drivers/gpu/drm/i915/gvt/kvmgt.c 	if (!kvm || kvm->mm != current->mm) {
kvm              1774 drivers/gpu/drm/i915/gvt/kvmgt.c 	if (__kvmgt_vgpu_exist(vgpu, kvm))
kvm              1783 drivers/gpu/drm/i915/gvt/kvmgt.c 	info->kvm = kvm;
kvm              1784 drivers/gpu/drm/i915/gvt/kvmgt.c 	kvm_get_kvm(info->kvm);
kvm              1793 drivers/gpu/drm/i915/gvt/kvmgt.c 	kvm_page_track_register_notifier(kvm, &info->track_node);
kvm              1806 drivers/gpu/drm/i915/gvt/kvmgt.c 	kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
kvm              1807 drivers/gpu/drm/i915/gvt/kvmgt.c 	kvm_put_kvm(info->kvm);
kvm              1877 drivers/gpu/drm/i915/gvt/kvmgt.c 	pfn = gfn_to_pfn(info->kvm, gfn);
kvm              1966 drivers/gpu/drm/i915/gvt/kvmgt.c 	struct kvm *kvm;
kvm              1974 drivers/gpu/drm/i915/gvt/kvmgt.c 	kvm = info->kvm;
kvm              1977 drivers/gpu/drm/i915/gvt/kvmgt.c 		if (!mmget_not_zero(kvm->mm))
kvm              1979 drivers/gpu/drm/i915/gvt/kvmgt.c 		use_mm(kvm->mm);
kvm              1982 drivers/gpu/drm/i915/gvt/kvmgt.c 	idx = srcu_read_lock(&kvm->srcu);
kvm              1983 drivers/gpu/drm/i915/gvt/kvmgt.c 	ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
kvm              1984 drivers/gpu/drm/i915/gvt/kvmgt.c 		      kvm_read_guest(kvm, gpa, buf, len);
kvm              1985 drivers/gpu/drm/i915/gvt/kvmgt.c 	srcu_read_unlock(&kvm->srcu, idx);
kvm              1988 drivers/gpu/drm/i915/gvt/kvmgt.c 		unuse_mm(kvm->mm);
kvm              1989 drivers/gpu/drm/i915/gvt/kvmgt.c 		mmput(kvm->mm);
kvm              2015 drivers/gpu/drm/i915/gvt/kvmgt.c 	struct kvm *kvm;
kvm              2023 drivers/gpu/drm/i915/gvt/kvmgt.c 	kvm = info->kvm;
kvm              2025 drivers/gpu/drm/i915/gvt/kvmgt.c 	idx = srcu_read_lock(&kvm->srcu);
kvm              2026 drivers/gpu/drm/i915/gvt/kvmgt.c 	ret = kvm_is_visible_gfn(kvm, gfn);
kvm              2027 drivers/gpu/drm/i915/gvt/kvmgt.c 	srcu_read_unlock(&kvm->srcu, idx);
kvm               123 drivers/s390/crypto/vfio_ap_ops.c 		kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc);
kvm               207 drivers/s390/crypto/vfio_ap_ops.c 	struct kvm *kvm;
kvm               222 drivers/s390/crypto/vfio_ap_ops.c 	kvm = q->matrix_mdev->kvm;
kvm               223 drivers/s390/crypto/vfio_ap_ops.c 	gisa = kvm->arch.gisa_int.origin;
kvm               227 drivers/s390/crypto/vfio_ap_ops.c 	aqic_gisa.isc = kvm_s390_gisc_register(kvm, isc);
kvm               242 drivers/s390/crypto/vfio_ap_ops.c 		kvm_s390_gisc_unregister(kvm, isc);
kvm               291 drivers/s390/crypto/vfio_ap_ops.c 	if (!vcpu->kvm->arch.crypto.pqap_hook)
kvm               293 drivers/s390/crypto/vfio_ap_ops.c 	matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook,
kvm               353 drivers/s390/crypto/vfio_ap_ops.c 	if (matrix_mdev->kvm)
kvm               609 drivers/s390/crypto/vfio_ap_ops.c 	if (matrix_mdev->kvm)
kvm               675 drivers/s390/crypto/vfio_ap_ops.c 	if (matrix_mdev->kvm)
kvm               756 drivers/s390/crypto/vfio_ap_ops.c 	if (matrix_mdev->kvm)
kvm               817 drivers/s390/crypto/vfio_ap_ops.c 	if (matrix_mdev->kvm)
kvm               861 drivers/s390/crypto/vfio_ap_ops.c 	if (matrix_mdev->kvm)
kvm               911 drivers/s390/crypto/vfio_ap_ops.c 	if (matrix_mdev->kvm)
kvm              1036 drivers/s390/crypto/vfio_ap_ops.c 				struct kvm *kvm)
kvm              1043 drivers/s390/crypto/vfio_ap_ops.c 		if ((m != matrix_mdev) && (m->kvm == kvm)) {
kvm              1049 drivers/s390/crypto/vfio_ap_ops.c 	matrix_mdev->kvm = kvm;
kvm              1050 drivers/s390/crypto/vfio_ap_ops.c 	kvm_get_kvm(kvm);
kvm              1051 drivers/s390/crypto/vfio_ap_ops.c 	kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
kvm              1098 drivers/s390/crypto/vfio_ap_ops.c 		matrix_mdev->kvm = NULL;
kvm              1107 drivers/s390/crypto/vfio_ap_ops.c 	if (!matrix_mdev->kvm->arch.crypto.crycbd)
kvm              1110 drivers/s390/crypto/vfio_ap_ops.c 	kvm_arch_crypto_set_masks(matrix_mdev->kvm, matrix_mdev->matrix.apm,
kvm              1225 drivers/s390/crypto/vfio_ap_ops.c 	if (matrix_mdev->kvm) {
kvm              1226 drivers/s390/crypto/vfio_ap_ops.c 		kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
kvm              1227 drivers/s390/crypto/vfio_ap_ops.c 		matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
kvm              1229 drivers/s390/crypto/vfio_ap_ops.c 		kvm_put_kvm(matrix_mdev->kvm);
kvm              1230 drivers/s390/crypto/vfio_ap_ops.c 		matrix_mdev->kvm = NULL;
kvm                86 drivers/s390/crypto/vfio_ap_private.h 	struct kvm *kvm;
kvm               197 drivers/vfio/pci/vfio_pci_nvlink2.c 	struct kvm *kvm = opaque;
kvm               202 drivers/vfio/pci/vfio_pci_nvlink2.c 	if (action == VFIO_GROUP_NOTIFY_SET_KVM && kvm &&
kvm               204 drivers/vfio/pci/vfio_pci_nvlink2.c 				kvm->arch.lpid, MSR_DR | MSR_PR))
kvm                88 drivers/vfio/vfio.c 	struct kvm			*kvm;
kvm              2046 drivers/vfio/vfio.c void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm)
kvm              2048 drivers/vfio/vfio.c 	group->kvm = kvm;
kvm              2050 drivers/vfio/vfio.c 				VFIO_GROUP_NOTIFY_SET_KVM, kvm);
kvm              2081 drivers/vfio/vfio.c 	if (!ret && set_kvm && group->kvm)
kvm              2083 drivers/vfio/vfio.c 					VFIO_GROUP_NOTIFY_SET_KVM, group->kvm);
kvm                23 include/kvm/arm_psci.h static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
kvm                33 include/kvm/arm_psci.h 		if (vcpu->kvm->arch.psci_version)
kvm                34 include/kvm/arm_psci.h 			return vcpu->kvm->arch.psci_version;
kvm               334 include/kvm/arm_vgic.h int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
kvm               335 include/kvm/arm_vgic.h void kvm_vgic_early_init(struct kvm *kvm);
kvm               337 include/kvm/arm_vgic.h int kvm_vgic_create(struct kvm *kvm, u32 type);
kvm               338 include/kvm/arm_vgic.h void kvm_vgic_destroy(struct kvm *kvm);
kvm               340 include/kvm/arm_vgic.h int kvm_vgic_map_resources(struct kvm *kvm);
kvm               344 include/kvm/arm_vgic.h int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
kvm               381 include/kvm/arm_vgic.h int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
kvm               387 include/kvm/arm_vgic.h int kvm_vgic_setup_default_irq_routing(struct kvm *kvm);
kvm               393 include/kvm/arm_vgic.h int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int irq,
kvm               396 include/kvm/arm_vgic.h int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int irq,
kvm               193 include/linux/kvm_host.h int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
kvm               195 include/linux/kvm_host.h void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
kvm               197 include/linux/kvm_host.h struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
kvm               264 include/linux/kvm_host.h 	struct kvm *kvm;
kvm               382 include/linux/kvm_host.h 		   struct kvm *kvm, int irq_source_id, int level,
kvm               532 include/linux/kvm_host.h static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
kvm               534 include/linux/kvm_host.h 	return srcu_dereference_check(kvm->buses[idx], &kvm->srcu,
kvm               535 include/linux/kvm_host.h 				      lockdep_is_held(&kvm->slots_lock) ||
kvm               536 include/linux/kvm_host.h 				      !refcount_read(&kvm->users_count));
kvm               539 include/linux/kvm_host.h static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
kvm               541 include/linux/kvm_host.h 	int num_vcpus = atomic_read(&kvm->online_vcpus);
kvm               546 include/linux/kvm_host.h 	return kvm->vcpus[i];
kvm               549 include/linux/kvm_host.h #define kvm_for_each_vcpu(idx, vcpup, kvm) \
kvm               551 include/linux/kvm_host.h 	     idx < atomic_read(&kvm->online_vcpus) && \
kvm               552 include/linux/kvm_host.h 	     (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
kvm               555 include/linux/kvm_host.h static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
kvm               563 include/linux/kvm_host.h 		vcpu = kvm_get_vcpu(kvm, id);
kvm               566 include/linux/kvm_host.h 	kvm_for_each_vcpu(i, vcpu, kvm)
kvm               577 include/linux/kvm_host.h 	kvm_for_each_vcpu(idx, tmp, vcpu->kvm)
kvm               588 include/linux/kvm_host.h int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
kvm               595 include/linux/kvm_host.h void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm);
kvm               596 include/linux/kvm_host.h void kvm_arch_post_irq_routing_update(struct kvm *kvm);
kvm               598 include/linux/kvm_host.h static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
kvm               601 include/linux/kvm_host.h static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm)
kvm               623 include/linux/kvm_host.h void kvm_get_kvm(struct kvm *kvm);
kvm               624 include/linux/kvm_host.h void kvm_put_kvm(struct kvm *kvm);
kvm               626 include/linux/kvm_host.h static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
kvm               629 include/linux/kvm_host.h 	return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
kvm               630 include/linux/kvm_host.h 			lockdep_is_held(&kvm->slots_lock) ||
kvm               631 include/linux/kvm_host.h 			!refcount_read(&kvm->users_count));
kvm               634 include/linux/kvm_host.h static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
kvm               636 include/linux/kvm_host.h 	return __kvm_memslots(kvm, 0);
kvm               643 include/linux/kvm_host.h 	return __kvm_memslots(vcpu->kvm, as_id);
kvm               676 include/linux/kvm_host.h int kvm_set_memory_region(struct kvm *kvm,
kvm               678 include/linux/kvm_host.h int __kvm_set_memory_region(struct kvm *kvm,
kvm               680 include/linux/kvm_host.h void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
kvm               682 include/linux/kvm_host.h int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
kvm               684 include/linux/kvm_host.h void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
kvm               685 include/linux/kvm_host.h int kvm_arch_prepare_memory_region(struct kvm *kvm,
kvm               689 include/linux/kvm_host.h void kvm_arch_commit_memory_region(struct kvm *kvm,
kvm               697 include/linux/kvm_host.h void kvm_arch_flush_shadow_all(struct kvm *kvm);
kvm               699 include/linux/kvm_host.h void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
kvm               705 include/linux/kvm_host.h struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
kvm               706 include/linux/kvm_host.h unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
kvm               707 include/linux/kvm_host.h unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
kvm               715 include/linux/kvm_host.h kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
kvm               716 include/linux/kvm_host.h kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
kvm               717 include/linux/kvm_host.h kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
kvm               732 include/linux/kvm_host.h int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
kvm               734 include/linux/kvm_host.h int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
kvm               736 include/linux/kvm_host.h int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
kvm               737 include/linux/kvm_host.h int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
kvm               739 include/linux/kvm_host.h int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
kvm               741 include/linux/kvm_host.h int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
kvm               743 include/linux/kvm_host.h int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
kvm               745 include/linux/kvm_host.h int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
kvm               748 include/linux/kvm_host.h int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
kvm               750 include/linux/kvm_host.h int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
kvm               751 include/linux/kvm_host.h int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
kvm               752 include/linux/kvm_host.h struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
kvm               753 include/linux/kvm_host.h bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
kvm               755 include/linux/kvm_host.h void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
kvm               793 include/linux/kvm_host.h void kvm_flush_remote_tlbs(struct kvm *kvm);
kvm               794 include/linux/kvm_host.h void kvm_reload_remote_mmus(struct kvm *kvm);
kvm               796 include/linux/kvm_host.h bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
kvm               798 include/linux/kvm_host.h bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
kvm               806 include/linux/kvm_host.h int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
kvm               808 include/linux/kvm_host.h int kvm_get_dirty_log(struct kvm *kvm,
kvm               811 include/linux/kvm_host.h int kvm_get_dirty_log_protect(struct kvm *kvm,
kvm               813 include/linux/kvm_host.h int kvm_clear_dirty_log_protect(struct kvm *kvm,
kvm               816 include/linux/kvm_host.h void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
kvm               821 include/linux/kvm_host.h int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
kvm               823 include/linux/kvm_host.h int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm,
kvm               826 include/linux/kvm_host.h int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
kvm               828 include/linux/kvm_host.h int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
kvm               864 include/linux/kvm_host.h struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
kvm               888 include/linux/kvm_host.h static inline struct kvm *kvm_arch_alloc_vm(void)
kvm               890 include/linux/kvm_host.h 	return kzalloc(sizeof(struct kvm), GFP_KERNEL);
kvm               893 include/linux/kvm_host.h static inline void kvm_arch_free_vm(struct kvm *kvm)
kvm               895 include/linux/kvm_host.h 	kfree(kvm);
kvm               900 include/linux/kvm_host.h static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
kvm               907 include/linux/kvm_host.h void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
kvm               908 include/linux/kvm_host.h void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
kvm               909 include/linux/kvm_host.h bool kvm_arch_has_noncoherent_dma(struct kvm *kvm);
kvm               911 include/linux/kvm_host.h static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
kvm               915 include/linux/kvm_host.h static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
kvm               919 include/linux/kvm_host.h static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
kvm               925 include/linux/kvm_host.h void kvm_arch_start_assignment(struct kvm *kvm);
kvm               926 include/linux/kvm_host.h void kvm_arch_end_assignment(struct kvm *kvm);
kvm               927 include/linux/kvm_host.h bool kvm_arch_has_assigned_device(struct kvm *kvm);
kvm               929 include/linux/kvm_host.h static inline void kvm_arch_start_assignment(struct kvm *kvm)
kvm               933 include/linux/kvm_host.h static inline void kvm_arch_end_assignment(struct kvm *kvm)
kvm               937 include/linux/kvm_host.h static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
kvm               958 include/linux/kvm_host.h bool kvm_arch_intc_initialized(struct kvm *kvm);
kvm               960 include/linux/kvm_host.h static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
kvm               966 include/linux/kvm_host.h int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
kvm               967 include/linux/kvm_host.h void kvm_arch_destroy_vm(struct kvm *kvm);
kvm               968 include/linux/kvm_host.h void kvm_arch_sync_events(struct kvm *kvm);
kvm               982 include/linux/kvm_host.h int kvm_irq_map_gsi(struct kvm *kvm,
kvm               984 include/linux/kvm_host.h int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
kvm               986 include/linux/kvm_host.h int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
kvm               988 include/linux/kvm_host.h int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
kvm               991 include/linux/kvm_host.h 			       struct kvm *kvm, int irq_source_id,
kvm               993 include/linux/kvm_host.h bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
kvm               994 include/linux/kvm_host.h void kvm_notify_acked_gsi(struct kvm *kvm, int gsi);
kvm               995 include/linux/kvm_host.h void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
kvm               996 include/linux/kvm_host.h void kvm_register_irq_ack_notifier(struct kvm *kvm,
kvm               998 include/linux/kvm_host.h void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
kvm              1000 include/linux/kvm_host.h int kvm_request_irq_source_id(struct kvm *kvm);
kvm              1001 include/linux/kvm_host.h void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
kvm              1002 include/linux/kvm_host.h bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
kvm              1051 include/linux/kvm_host.h static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
kvm              1053 include/linux/kvm_host.h 	return gfn_to_memslot(kvm, gfn)->id;
kvm              1085 include/linux/kvm_host.h static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
kvm              1087 include/linux/kvm_host.h 	unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
kvm              1100 include/linux/kvm_host.h 	struct kvm *kvm;
kvm              1113 include/linux/kvm_host.h static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
kvm              1115 include/linux/kvm_host.h 	if (unlikely(kvm->mmu_notifier_count))
kvm              1128 include/linux/kvm_host.h 	if (kvm->mmu_notifier_seq != mmu_seq)
kvm              1138 include/linux/kvm_host.h bool kvm_arch_can_set_irq_routing(struct kvm *kvm);
kvm              1139 include/linux/kvm_host.h int kvm_set_irq_routing(struct kvm *kvm,
kvm              1143 include/linux/kvm_host.h int kvm_set_routing_entry(struct kvm *kvm,
kvm              1146 include/linux/kvm_host.h void kvm_free_irq_routing(struct kvm *kvm);
kvm              1150 include/linux/kvm_host.h static inline void kvm_free_irq_routing(struct kvm *kvm) {}
kvm              1154 include/linux/kvm_host.h int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
kvm              1158 include/linux/kvm_host.h void kvm_eventfd_init(struct kvm *kvm);
kvm              1159 include/linux/kvm_host.h int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
kvm              1162 include/linux/kvm_host.h int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
kvm              1163 include/linux/kvm_host.h void kvm_irqfd_release(struct kvm *kvm);
kvm              1164 include/linux/kvm_host.h void kvm_irq_routing_update(struct kvm *);
kvm              1166 include/linux/kvm_host.h static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
kvm              1171 include/linux/kvm_host.h static inline void kvm_irqfd_release(struct kvm *kvm) {}
kvm              1176 include/linux/kvm_host.h static inline void kvm_eventfd_init(struct kvm *kvm) {}
kvm              1178 include/linux/kvm_host.h static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
kvm              1183 include/linux/kvm_host.h static inline void kvm_irqfd_release(struct kvm *kvm) {}
kvm              1186 include/linux/kvm_host.h static inline void kvm_irq_routing_update(struct kvm *kvm)
kvm              1191 include/linux/kvm_host.h static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
kvm              1198 include/linux/kvm_host.h void kvm_arch_irq_routing_update(struct kvm *kvm);
kvm              1250 include/linux/kvm_host.h 	struct kvm *kvm;
kvm              1339 include/linux/kvm_host.h int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
kvm              1379 include/linux/kvm_host.h void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
kvm              1391 include/linux/kvm_host.h typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data);
kvm              1393 include/linux/kvm_host.h int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
kvm                24 include/linux/kvm_irqfd.h 	struct kvm *kvm;
kvm                41 include/linux/kvm_irqfd.h 	struct kvm *kvm;
kvm                 6 include/linux/kvm_types.h struct kvm;
kvm               130 include/linux/vfio.h struct kvm;
kvm               131 include/linux/vfio.h extern void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm);
kvm                 8 include/trace/events/kvm.h #define TRACE_SYSTEM kvm
kvm                77 tools/perf/arch/powerpc/util/kvm-stat.c static void hcall_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
kvm               123 tools/perf/arch/powerpc/util/kvm-stat.c static int ppc__setup_book3s_hv(struct perf_kvm_stat *kvm,
kvm               142 tools/perf/arch/powerpc/util/kvm-stat.c 	kvm->exit_reasons = hv_exit_reasons;
kvm               143 tools/perf/arch/powerpc/util/kvm-stat.c 	kvm->exit_reasons_isa = "HV";
kvm               149 tools/perf/arch/powerpc/util/kvm-stat.c static int ppc__setup_kvm_tp(struct perf_kvm_stat *kvm)
kvm               157 tools/perf/arch/powerpc/util/kvm-stat.c 	return ppc__setup_book3s_hv(kvm, evlist);
kvm               160 tools/perf/arch/powerpc/util/kvm-stat.c int setup_kvm_events_tp(struct perf_kvm_stat *kvm)
kvm               162 tools/perf/arch/powerpc/util/kvm-stat.c 	return ppc__setup_kvm_tp(kvm);
kvm               165 tools/perf/arch/powerpc/util/kvm-stat.c int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid __maybe_unused)
kvm               169 tools/perf/arch/powerpc/util/kvm-stat.c 	ret = ppc__setup_kvm_tp(kvm);
kvm               171 tools/perf/arch/powerpc/util/kvm-stat.c 		kvm->exit_reasons = NULL;
kvm               172 tools/perf/arch/powerpc/util/kvm-stat.c 		kvm->exit_reasons_isa = NULL;
kvm               102 tools/perf/arch/s390/util/kvm-stat.c int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid)
kvm               105 tools/perf/arch/s390/util/kvm-stat.c 		kvm->exit_reasons = sie_exit_reasons;
kvm               106 tools/perf/arch/s390/util/kvm-stat.c 		kvm->exit_reasons_isa = "SIE";
kvm                76 tools/perf/arch/x86/util/kvm-stat.c static void mmio_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
kvm               120 tools/perf/arch/x86/util/kvm-stat.c static void ioport_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
kvm               156 tools/perf/arch/x86/util/kvm-stat.c int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid)
kvm               159 tools/perf/arch/x86/util/kvm-stat.c 		kvm->exit_reasons = vmx_exit_reasons;
kvm               160 tools/perf/arch/x86/util/kvm-stat.c 		kvm->exit_reasons_isa = "VMX";
kvm               162 tools/perf/arch/x86/util/kvm-stat.c 		kvm->exit_reasons = svm_exit_reasons;
kvm               163 tools/perf/arch/x86/util/kvm-stat.c 		kvm->exit_reasons_isa = "SVM";
kvm               102 tools/perf/builtin-kvm.c static const char *get_exit_reason(struct perf_kvm_stat *kvm,
kvm               113 tools/perf/builtin-kvm.c 		(unsigned long long)exit_code, kvm->exit_reasons_isa);
kvm               117 tools/perf/builtin-kvm.c void exit_event_decode_key(struct perf_kvm_stat *kvm,
kvm               121 tools/perf/builtin-kvm.c 	const char *exit_reason = get_exit_reason(kvm, key->exit_reasons,
kvm               127 tools/perf/builtin-kvm.c static bool register_kvm_events_ops(struct perf_kvm_stat *kvm)
kvm               132 tools/perf/builtin-kvm.c 		if (!strcmp(events_ops->name, kvm->report_event)) {
kvm               133 tools/perf/builtin-kvm.c 			kvm->events_ops = events_ops->ops;
kvm               148 tools/perf/builtin-kvm.c static void init_kvm_event_record(struct perf_kvm_stat *kvm)
kvm               153 tools/perf/builtin-kvm.c 		INIT_LIST_HEAD(&kvm->kvm_events_cache[i]);
kvm               225 tools/perf/builtin-kvm.c static struct kvm_event *find_create_kvm_event(struct perf_kvm_stat *kvm,
kvm               233 tools/perf/builtin-kvm.c 	head = &kvm->kvm_events_cache[kvm_events_hash_fn(key->key)];
kvm               247 tools/perf/builtin-kvm.c static bool handle_begin_event(struct perf_kvm_stat *kvm,
kvm               254 tools/perf/builtin-kvm.c 		event = find_create_kvm_event(kvm, key);
kvm               294 tools/perf/builtin-kvm.c static bool is_child_event(struct perf_kvm_stat *kvm,
kvm               301 tools/perf/builtin-kvm.c 	child_ops = kvm->events_ops->child_ops;
kvm               316 tools/perf/builtin-kvm.c static bool handle_child_event(struct perf_kvm_stat *kvm,
kvm               324 tools/perf/builtin-kvm.c 		event = find_create_kvm_event(kvm, key);
kvm               342 tools/perf/builtin-kvm.c static bool handle_end_event(struct perf_kvm_stat *kvm,
kvm               351 tools/perf/builtin-kvm.c 	if (kvm->trace_vcpu == -1)
kvm               373 tools/perf/builtin-kvm.c 		event = find_create_kvm_event(kvm, key);
kvm               389 tools/perf/builtin-kvm.c 	if (kvm->duration && time_diff > kvm->duration) {
kvm               392 tools/perf/builtin-kvm.c 		kvm->events_ops->decode_key(kvm, &event->key, decode);
kvm               426 tools/perf/builtin-kvm.c static bool handle_kvm_event(struct perf_kvm_stat *kvm,
kvm               433 tools/perf/builtin-kvm.c 				 .exit_reasons = kvm->exit_reasons };
kvm               440 tools/perf/builtin-kvm.c 	if ((kvm->trace_vcpu != -1) &&
kvm               441 tools/perf/builtin-kvm.c 	    (kvm->trace_vcpu != vcpu_record->vcpu_id))
kvm               444 tools/perf/builtin-kvm.c 	if (kvm->events_ops->is_begin_event(evsel, sample, &key))
kvm               445 tools/perf/builtin-kvm.c 		return handle_begin_event(kvm, vcpu_record, &key, sample->time);
kvm               447 tools/perf/builtin-kvm.c 	if (is_child_event(kvm, evsel, sample, &key))
kvm               448 tools/perf/builtin-kvm.c 		return handle_child_event(kvm, vcpu_record, &key, sample);
kvm               450 tools/perf/builtin-kvm.c 	if (kvm->events_ops->is_end_event(evsel, sample, &key))
kvm               451 tools/perf/builtin-kvm.c 		return handle_end_event(kvm, vcpu_record, &key, sample);
kvm               492 tools/perf/builtin-kvm.c static bool select_key(struct perf_kvm_stat *kvm)
kvm               497 tools/perf/builtin-kvm.c 		if (!strcmp(keys[i].name, kvm->sort_key)) {
kvm               498 tools/perf/builtin-kvm.c 			kvm->compare = keys[i].key;
kvm               503 tools/perf/builtin-kvm.c 	pr_err("Unknown compare key:%s\n", kvm->sort_key);
kvm               529 tools/perf/builtin-kvm.c update_total_count(struct perf_kvm_stat *kvm, struct kvm_event *event)
kvm               531 tools/perf/builtin-kvm.c 	int vcpu = kvm->trace_vcpu;
kvm               533 tools/perf/builtin-kvm.c 	kvm->total_count += get_event_count(event, vcpu);
kvm               534 tools/perf/builtin-kvm.c 	kvm->total_time += get_event_time(event, vcpu);
kvm               542 tools/perf/builtin-kvm.c static void sort_result(struct perf_kvm_stat *kvm)
kvm               545 tools/perf/builtin-kvm.c 	int vcpu = kvm->trace_vcpu;
kvm               549 tools/perf/builtin-kvm.c 		list_for_each_entry(event, &kvm->kvm_events_cache[i], hash_entry) {
kvm               551 tools/perf/builtin-kvm.c 				update_total_count(kvm, event);
kvm               552 tools/perf/builtin-kvm.c 				insert_to_result(&kvm->result, event,
kvm               553 tools/perf/builtin-kvm.c 						 kvm->compare, vcpu);
kvm               571 tools/perf/builtin-kvm.c static void print_vcpu_info(struct perf_kvm_stat *kvm)
kvm               573 tools/perf/builtin-kvm.c 	int vcpu = kvm->trace_vcpu;
kvm               577 tools/perf/builtin-kvm.c 	if (kvm->opts.target.system_wide)
kvm               579 tools/perf/builtin-kvm.c 	else if (kvm->opts.target.pid)
kvm               580 tools/perf/builtin-kvm.c 		pr_info("pid(s) %s, ", kvm->opts.target.pid);
kvm               606 tools/perf/builtin-kvm.c static void print_result(struct perf_kvm_stat *kvm)
kvm               610 tools/perf/builtin-kvm.c 	int vcpu = kvm->trace_vcpu;
kvm               612 tools/perf/builtin-kvm.c 	if (kvm->live) {
kvm               618 tools/perf/builtin-kvm.c 	print_vcpu_info(kvm);
kvm               619 tools/perf/builtin-kvm.c 	pr_info("%*s ", decode_str_len, kvm->events_ops->name);
kvm               629 tools/perf/builtin-kvm.c 	while ((event = pop_from_result(&kvm->result))) {
kvm               637 tools/perf/builtin-kvm.c 		kvm->events_ops->decode_key(kvm, &event->key, decode);
kvm               640 tools/perf/builtin-kvm.c 		pr_info("%8.2f%% ", (double)ecount / kvm->total_count * 100);
kvm               641 tools/perf/builtin-kvm.c 		pr_info("%8.2f%% ", (double)etime / kvm->total_time * 100);
kvm               650 tools/perf/builtin-kvm.c 		kvm->total_count, kvm->total_time / (double)NSEC_PER_USEC);
kvm               652 tools/perf/builtin-kvm.c 	if (kvm->lost_events)
kvm               653 tools/perf/builtin-kvm.c 		pr_info("\nLost events: %" PRIu64 "\n\n", kvm->lost_events);
kvm               662 tools/perf/builtin-kvm.c 	struct perf_kvm_stat *kvm = container_of(tool, struct perf_kvm_stat, tool);
kvm               664 tools/perf/builtin-kvm.c 	kvm->lost_events++;
kvm               669 tools/perf/builtin-kvm.c static bool skip_sample(struct perf_kvm_stat *kvm,
kvm               672 tools/perf/builtin-kvm.c 	if (kvm->pid_list && intlist__find(kvm->pid_list, sample->pid) == NULL)
kvm               686 tools/perf/builtin-kvm.c 	struct perf_kvm_stat *kvm = container_of(tool, struct perf_kvm_stat,
kvm               689 tools/perf/builtin-kvm.c 	if (skip_sample(kvm, sample))
kvm               699 tools/perf/builtin-kvm.c 	if (!handle_kvm_event(kvm, thread, evsel, sample))
kvm               706 tools/perf/builtin-kvm.c static int cpu_isa_config(struct perf_kvm_stat *kvm)
kvm               711 tools/perf/builtin-kvm.c 	if (kvm->live) {
kvm               720 tools/perf/builtin-kvm.c 		cpuid = kvm->session->header.env.cpuid;
kvm               727 tools/perf/builtin-kvm.c 	err = cpu_isa_init(kvm, cpuid);
kvm               750 tools/perf/builtin-kvm.c static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
kvm               753 tools/perf/builtin-kvm.c 	struct evlist *evlist = kvm->evlist;
kvm               774 tools/perf/builtin-kvm.c 		err = perf_session__queue_event(kvm->session, event, timestamp, 0);
kvm               800 tools/perf/builtin-kvm.c static int perf_kvm__mmap_read(struct perf_kvm_stat *kvm)
kvm               806 tools/perf/builtin-kvm.c 	for (i = 0; i < kvm->evlist->core.nr_mmaps; i++) {
kvm               807 tools/perf/builtin-kvm.c 		n = perf_kvm__mmap_read_idx(kvm, i, &mmap_time);
kvm               828 tools/perf/builtin-kvm.c 		struct ordered_events *oe = &kvm->session->ordered_events;
kvm               833 tools/perf/builtin-kvm.c 			if (kvm->lost_events)
kvm               835 tools/perf/builtin-kvm.c 					kvm->lost_events);
kvm               850 tools/perf/builtin-kvm.c static int perf_kvm__timerfd_create(struct perf_kvm_stat *kvm)
kvm               855 tools/perf/builtin-kvm.c 	kvm->timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK);
kvm               856 tools/perf/builtin-kvm.c 	if (kvm->timerfd < 0) {
kvm               861 tools/perf/builtin-kvm.c 	new_value.it_value.tv_sec = kvm->display_time;
kvm               863 tools/perf/builtin-kvm.c 	new_value.it_interval.tv_sec = kvm->display_time;
kvm               866 tools/perf/builtin-kvm.c 	if (timerfd_settime(kvm->timerfd, 0, &new_value, NULL) != 0) {
kvm               868 tools/perf/builtin-kvm.c 		close(kvm->timerfd);
kvm               877 tools/perf/builtin-kvm.c static int perf_kvm__handle_timerfd(struct perf_kvm_stat *kvm)
kvm               882 tools/perf/builtin-kvm.c 	rc = read(kvm->timerfd, &c, sizeof(uint64_t));
kvm               900 tools/perf/builtin-kvm.c 	sort_result(kvm);
kvm               901 tools/perf/builtin-kvm.c 	print_result(kvm);
kvm               904 tools/perf/builtin-kvm.c 	clear_events_cache_stats(kvm->kvm_events_cache);
kvm               905 tools/perf/builtin-kvm.c 	kvm->total_count = 0;
kvm               906 tools/perf/builtin-kvm.c 	kvm->total_time = 0;
kvm               907 tools/perf/builtin-kvm.c 	kvm->lost_events = 0;
kvm               941 tools/perf/builtin-kvm.c static int kvm_events_live_report(struct perf_kvm_stat *kvm)
kvm               947 tools/perf/builtin-kvm.c 	kvm->live = true;
kvm               949 tools/perf/builtin-kvm.c 	ret = cpu_isa_config(kvm);
kvm               953 tools/perf/builtin-kvm.c 	if (!verify_vcpu(kvm->trace_vcpu) ||
kvm               954 tools/perf/builtin-kvm.c 	    !select_key(kvm) ||
kvm               955 tools/perf/builtin-kvm.c 	    !register_kvm_events_ops(kvm)) {
kvm               960 tools/perf/builtin-kvm.c 	init_kvm_event_record(kvm);
kvm               966 tools/perf/builtin-kvm.c 	if (perf_kvm__timerfd_create(kvm) < 0) {
kvm               971 tools/perf/builtin-kvm.c 	if (evlist__add_pollfd(kvm->evlist, kvm->timerfd) < 0)
kvm               974 tools/perf/builtin-kvm.c 	nr_stdin = evlist__add_pollfd(kvm->evlist, fileno(stdin));
kvm               982 tools/perf/builtin-kvm.c 	evlist__enable(kvm->evlist);
kvm               985 tools/perf/builtin-kvm.c 		struct fdarray *fda = &kvm->evlist->core.pollfd;
kvm               988 tools/perf/builtin-kvm.c 		rc = perf_kvm__mmap_read(kvm);
kvm               992 tools/perf/builtin-kvm.c 		err = perf_kvm__handle_timerfd(kvm);
kvm              1003 tools/perf/builtin-kvm.c 	evlist__disable(kvm->evlist);
kvm              1006 tools/perf/builtin-kvm.c 		sort_result(kvm);
kvm              1007 tools/perf/builtin-kvm.c 		print_result(kvm);
kvm              1011 tools/perf/builtin-kvm.c 	if (kvm->timerfd >= 0)
kvm              1012 tools/perf/builtin-kvm.c 		close(kvm->timerfd);
kvm              1018 tools/perf/builtin-kvm.c static int kvm_live_open_events(struct perf_kvm_stat *kvm)
kvm              1022 tools/perf/builtin-kvm.c 	struct evlist *evlist = kvm->evlist;
kvm              1025 tools/perf/builtin-kvm.c 	perf_evlist__config(evlist, &kvm->opts, NULL);
kvm              1065 tools/perf/builtin-kvm.c 	if (evlist__mmap(evlist, kvm->opts.mmap_pages) < 0) {
kvm              1079 tools/perf/builtin-kvm.c static int read_events(struct perf_kvm_stat *kvm)
kvm              1090 tools/perf/builtin-kvm.c 		.path  = kvm->file_name,
kvm              1092 tools/perf/builtin-kvm.c 		.force = kvm->force,
kvm              1095 tools/perf/builtin-kvm.c 	kvm->tool = eops;
kvm              1096 tools/perf/builtin-kvm.c 	kvm->session = perf_session__new(&file, false, &kvm->tool);
kvm              1097 tools/perf/builtin-kvm.c 	if (IS_ERR(kvm->session)) {
kvm              1099 tools/perf/builtin-kvm.c 		return PTR_ERR(kvm->session);
kvm              1102 tools/perf/builtin-kvm.c 	symbol__init(&kvm->session->header.env);
kvm              1104 tools/perf/builtin-kvm.c 	if (!perf_session__has_traces(kvm->session, "kvm record")) {
kvm              1113 tools/perf/builtin-kvm.c 	ret = cpu_isa_config(kvm);
kvm              1117 tools/perf/builtin-kvm.c 	ret = perf_session__process_events(kvm->session);
kvm              1120 tools/perf/builtin-kvm.c 	perf_session__delete(kvm->session);
kvm              1124 tools/perf/builtin-kvm.c static int parse_target_str(struct perf_kvm_stat *kvm)
kvm              1126 tools/perf/builtin-kvm.c 	if (kvm->opts.target.pid) {
kvm              1127 tools/perf/builtin-kvm.c 		kvm->pid_list = intlist__new(kvm->opts.target.pid);
kvm              1128 tools/perf/builtin-kvm.c 		if (kvm->pid_list == NULL) {
kvm              1137 tools/perf/builtin-kvm.c static int kvm_events_report_vcpu(struct perf_kvm_stat *kvm)
kvm              1140 tools/perf/builtin-kvm.c 	int vcpu = kvm->trace_vcpu;
kvm              1142 tools/perf/builtin-kvm.c 	if (parse_target_str(kvm) != 0)
kvm              1148 tools/perf/builtin-kvm.c 	if (!select_key(kvm))
kvm              1151 tools/perf/builtin-kvm.c 	if (!register_kvm_events_ops(kvm))
kvm              1154 tools/perf/builtin-kvm.c 	init_kvm_event_record(kvm);
kvm              1157 tools/perf/builtin-kvm.c 	ret = read_events(kvm);
kvm              1161 tools/perf/builtin-kvm.c 	sort_result(kvm);
kvm              1162 tools/perf/builtin-kvm.c 	print_result(kvm);
kvm              1176 tools/perf/builtin-kvm.c int __weak setup_kvm_events_tp(struct perf_kvm_stat *kvm __maybe_unused)
kvm              1182 tools/perf/builtin-kvm.c kvm_events_record(struct perf_kvm_stat *kvm, int argc, const char **argv)
kvm              1200 tools/perf/builtin-kvm.c 	ret = setup_kvm_events_tp(kvm);
kvm              1225 tools/perf/builtin-kvm.c 	rec_argv[i++] = STRDUP_FAIL_EXIT(kvm->file_name);
kvm              1255 tools/perf/builtin-kvm.c kvm_events_report(struct perf_kvm_stat *kvm, int argc, const char **argv)
kvm              1258 tools/perf/builtin-kvm.c 		OPT_STRING(0, "event", &kvm->report_event, "report event",
kvm              1261 tools/perf/builtin-kvm.c 		OPT_INTEGER(0, "vcpu", &kvm->trace_vcpu,
kvm              1263 tools/perf/builtin-kvm.c 		OPT_STRING('k', "key", &kvm->sort_key, "sort-key",
kvm              1266 tools/perf/builtin-kvm.c 		OPT_STRING('p', "pid", &kvm->opts.target.pid, "pid",
kvm              1268 tools/perf/builtin-kvm.c 		OPT_BOOLEAN('f', "force", &kvm->force, "don't complain, do it"),
kvm              1286 tools/perf/builtin-kvm.c 	if (!kvm->opts.target.pid)
kvm              1287 tools/perf/builtin-kvm.c 		kvm->opts.target.system_wide = true;
kvm              1289 tools/perf/builtin-kvm.c 	return kvm_events_report_vcpu(kvm);
kvm              1342 tools/perf/builtin-kvm.c static int kvm_events_live(struct perf_kvm_stat *kvm,
kvm              1349 tools/perf/builtin-kvm.c 		OPT_STRING('p', "pid", &kvm->opts.target.pid, "pid",
kvm              1351 tools/perf/builtin-kvm.c 		OPT_CALLBACK('m', "mmap-pages", &kvm->opts.mmap_pages, "pages",
kvm              1356 tools/perf/builtin-kvm.c 		OPT_BOOLEAN('a', "all-cpus", &kvm->opts.target.system_wide,
kvm              1358 tools/perf/builtin-kvm.c 		OPT_UINTEGER('d', "display", &kvm->display_time,
kvm              1360 tools/perf/builtin-kvm.c 		OPT_STRING(0, "event", &kvm->report_event, "report event",
kvm              1363 tools/perf/builtin-kvm.c 		OPT_INTEGER(0, "vcpu", &kvm->trace_vcpu,
kvm              1365 tools/perf/builtin-kvm.c 		OPT_STRING('k', "key", &kvm->sort_key, "sort-key",
kvm              1368 tools/perf/builtin-kvm.c 		OPT_U64(0, "duration", &kvm->duration,
kvm              1386 tools/perf/builtin-kvm.c 	kvm->tool.sample = process_sample_event;
kvm              1387 tools/perf/builtin-kvm.c 	kvm->tool.comm   = perf_event__process_comm;
kvm              1388 tools/perf/builtin-kvm.c 	kvm->tool.exit   = perf_event__process_exit;
kvm              1389 tools/perf/builtin-kvm.c 	kvm->tool.fork   = perf_event__process_fork;
kvm              1390 tools/perf/builtin-kvm.c 	kvm->tool.lost   = process_lost_event;
kvm              1391 tools/perf/builtin-kvm.c 	kvm->tool.namespaces  = perf_event__process_namespaces;
kvm              1392 tools/perf/builtin-kvm.c 	kvm->tool.ordered_events = true;
kvm              1393 tools/perf/builtin-kvm.c 	perf_tool__fill_defaults(&kvm->tool);
kvm              1396 tools/perf/builtin-kvm.c 	kvm->display_time = 1;
kvm              1397 tools/perf/builtin-kvm.c 	kvm->opts.user_interval = 1;
kvm              1398 tools/perf/builtin-kvm.c 	kvm->opts.mmap_pages = 512;
kvm              1399 tools/perf/builtin-kvm.c 	kvm->opts.target.uses_mmap = false;
kvm              1400 tools/perf/builtin-kvm.c 	kvm->opts.target.uid_str = NULL;
kvm              1401 tools/perf/builtin-kvm.c 	kvm->opts.target.uid = UINT_MAX;
kvm              1415 tools/perf/builtin-kvm.c 	kvm->duration *= NSEC_PER_USEC;   /* convert usec to nsec */
kvm              1420 tools/perf/builtin-kvm.c 	err = target__validate(&kvm->opts.target);
kvm              1422 tools/perf/builtin-kvm.c 		target__strerror(&kvm->opts.target, err, errbuf, BUFSIZ);
kvm              1426 tools/perf/builtin-kvm.c 	if (target__none(&kvm->opts.target))
kvm              1427 tools/perf/builtin-kvm.c 		kvm->opts.target.system_wide = true;
kvm              1433 tools/perf/builtin-kvm.c 	err = setup_kvm_events_tp(kvm);
kvm              1439 tools/perf/builtin-kvm.c 	kvm->evlist = kvm_live_event_list();
kvm              1440 tools/perf/builtin-kvm.c 	if (kvm->evlist == NULL) {
kvm              1445 tools/perf/builtin-kvm.c 	if (perf_evlist__create_maps(kvm->evlist, &kvm->opts.target) < 0)
kvm              1451 tools/perf/builtin-kvm.c 	kvm->session = perf_session__new(&data, false, &kvm->tool);
kvm              1452 tools/perf/builtin-kvm.c 	if (IS_ERR(kvm->session)) {
kvm              1453 tools/perf/builtin-kvm.c 		err = PTR_ERR(kvm->session);
kvm              1456 tools/perf/builtin-kvm.c 	kvm->session->evlist = kvm->evlist;
kvm              1457 tools/perf/builtin-kvm.c 	perf_session__set_id_hdr_size(kvm->session);
kvm              1458 tools/perf/builtin-kvm.c 	ordered_events__set_copy_on_queue(&kvm->session->ordered_events, true);
kvm              1459 tools/perf/builtin-kvm.c 	machine__synthesize_threads(&kvm->session->machines.host, &kvm->opts.target,
kvm              1460 tools/perf/builtin-kvm.c 				    kvm->evlist->core.threads, false, 1);
kvm              1461 tools/perf/builtin-kvm.c 	err = kvm_live_open_events(kvm);
kvm              1465 tools/perf/builtin-kvm.c 	err = kvm_events_live_report(kvm);
kvm              1468 tools/perf/builtin-kvm.c 	perf_session__delete(kvm->session);
kvm              1469 tools/perf/builtin-kvm.c 	kvm->session = NULL;
kvm              1470 tools/perf/builtin-kvm.c 	evlist__delete(kvm->evlist);
kvm              1490 tools/perf/builtin-kvm.c 	struct perf_kvm_stat kvm = {
kvm              1505 tools/perf/builtin-kvm.c 		return kvm_events_record(&kvm, argc - 1, argv + 1);
kvm              1508 tools/perf/builtin-kvm.c 		return kvm_events_report(&kvm, argc - 1 , argv + 1);
kvm              1512 tools/perf/builtin-kvm.c 		return kvm_events_live(&kvm, argc - 1 , argv + 1);
kvm                63 tools/perf/util/kvm-stat.h 	void (*decode_key)(struct perf_kvm_stat *kvm, struct event_key *key,
kvm               123 tools/perf/util/kvm-stat.h void exit_event_decode_key(struct perf_kvm_stat *kvm,
kvm               129 tools/perf/util/kvm-stat.h int setup_kvm_events_tp(struct perf_kvm_stat *kvm);
kvm               139 tools/perf/util/kvm-stat.h int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid);
kvm                55 tools/testing/selftests/kvm/x86_64/mmio_warning_test.c 	int i, kvm, kvmvm, kvmcpu;
kvm                60 tools/testing/selftests/kvm/x86_64/mmio_warning_test.c 	kvm = open("/dev/kvm", O_RDWR);
kvm                61 tools/testing/selftests/kvm/x86_64/mmio_warning_test.c 	TEST_ASSERT(kvm != -1, "failed to open /dev/kvm");
kvm                62 tools/testing/selftests/kvm/x86_64/mmio_warning_test.c 	kvmvm = ioctl(kvm, KVM_CREATE_VM, 0);
kvm                74 virt/kvm/arm/arch_timer.c static inline bool userspace_irqchip(struct kvm *kvm)
kvm                77 virt/kvm/arm/arch_timer.c 		unlikely(!irqchip_in_kernel(kvm));
kvm               116 virt/kvm/arm/arch_timer.c 	if (userspace_irqchip(vcpu->kvm) &&
kvm               303 virt/kvm/arm/arch_timer.c 	if (!userspace_irqchip(vcpu->kvm)) {
kvm               304 virt/kvm/arm/arch_timer.c 		ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
kvm               487 virt/kvm/arm/arch_timer.c 	if (irqchip_in_kernel(vcpu->kvm))
kvm               560 virt/kvm/arm/arch_timer.c 	if (likely(irqchip_in_kernel(vcpu->kvm)))
kvm               634 virt/kvm/arm/arch_timer.c 	if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
kvm               658 virt/kvm/arm/arch_timer.c 		if (irqchip_in_kernel(vcpu->kvm)) {
kvm               675 virt/kvm/arm/arch_timer.c 	struct kvm *kvm = vcpu->kvm;
kvm               678 virt/kvm/arm/arch_timer.c 	mutex_lock(&kvm->lock);
kvm               679 virt/kvm/arm/arch_timer.c 	kvm_for_each_vcpu(i, tmp, kvm)
kvm               687 virt/kvm/arm/arch_timer.c 	mutex_unlock(&kvm->lock);
kvm              1015 virt/kvm/arm/arch_timer.c 	kvm_for_each_vcpu(i, vcpu, vcpu->kvm) {
kvm              1049 virt/kvm/arm/arch_timer.c 	if (!irqchip_in_kernel(vcpu->kvm))
kvm              1052 virt/kvm/arm/arch_timer.c 	if (!vgic_initialized(vcpu->kvm))
kvm              1106 virt/kvm/arm/arch_timer.c static void set_timer_irqs(struct kvm *kvm, int vtimer_irq, int ptimer_irq)
kvm              1111 virt/kvm/arm/arch_timer.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
kvm              1124 virt/kvm/arm/arch_timer.c 	if (!irqchip_in_kernel(vcpu->kvm))
kvm              1138 virt/kvm/arm/arch_timer.c 		set_timer_irqs(vcpu->kvm, irq, ptimer->irq.irq);
kvm              1141 virt/kvm/arm/arch_timer.c 		set_timer_irqs(vcpu->kvm, vtimer->irq.irq, irq);
kvm               106 virt/kvm/arm/arm.c int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm               110 virt/kvm/arm/arm.c 	ret = kvm_arm_setup_stage2(kvm, type);
kvm               114 virt/kvm/arm/arm.c 	kvm->arch.last_vcpu_ran = alloc_percpu(typeof(*kvm->arch.last_vcpu_ran));
kvm               115 virt/kvm/arm/arm.c 	if (!kvm->arch.last_vcpu_ran)
kvm               119 virt/kvm/arm/arm.c 		*per_cpu_ptr(kvm->arch.last_vcpu_ran, cpu) = -1;
kvm               121 virt/kvm/arm/arm.c 	ret = kvm_alloc_stage2_pgd(kvm);
kvm               125 virt/kvm/arm/arm.c 	ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP);
kvm               129 virt/kvm/arm/arm.c 	kvm_vgic_early_init(kvm);
kvm               132 virt/kvm/arm/arm.c 	kvm->arch.vmid.vmid_gen = 0;
kvm               135 virt/kvm/arm/arm.c 	kvm->arch.max_vcpus = vgic_present ?
kvm               140 virt/kvm/arm/arm.c 	kvm_free_stage2_pgd(kvm);
kvm               142 virt/kvm/arm/arm.c 	free_percpu(kvm->arch.last_vcpu_ran);
kvm               143 virt/kvm/arm/arm.c 	kvm->arch.last_vcpu_ran = NULL;
kvm               162 virt/kvm/arm/arm.c void kvm_arch_destroy_vm(struct kvm *kvm)
kvm               166 virt/kvm/arm/arm.c 	kvm_vgic_destroy(kvm);
kvm               168 virt/kvm/arm/arm.c 	free_percpu(kvm->arch.last_vcpu_ran);
kvm               169 virt/kvm/arm/arm.c 	kvm->arch.last_vcpu_ran = NULL;
kvm               172 virt/kvm/arm/arm.c 		if (kvm->vcpus[i]) {
kvm               173 virt/kvm/arm/arm.c 			kvm_arch_vcpu_free(kvm->vcpus[i]);
kvm               174 virt/kvm/arm/arm.c 			kvm->vcpus[i] = NULL;
kvm               177 virt/kvm/arm/arm.c 	atomic_set(&kvm->online_vcpus, 0);
kvm               180 virt/kvm/arm/arm.c int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
kvm               215 virt/kvm/arm/arm.c 		if (!kvm)
kvm               218 virt/kvm/arm/arm.c 			r = kvm->arch.vgic.msis_require_devid;
kvm               228 virt/kvm/arm/arm.c 		r = kvm_arch_vm_ioctl_check_extension(kvm, ext);
kvm               240 virt/kvm/arm/arm.c struct kvm *kvm_arch_alloc_vm(void)
kvm               243 virt/kvm/arm/arm.c 		return kzalloc(sizeof(struct kvm), GFP_KERNEL);
kvm               245 virt/kvm/arm/arm.c 	return vzalloc(sizeof(struct kvm));
kvm               248 virt/kvm/arm/arm.c void kvm_arch_free_vm(struct kvm *kvm)
kvm               251 virt/kvm/arm/arm.c 		kfree(kvm);
kvm               253 virt/kvm/arm/arm.c 		vfree(kvm);
kvm               256 virt/kvm/arm/arm.c struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
kvm               261 virt/kvm/arm/arm.c 	if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) {
kvm               266 virt/kvm/arm/arm.c 	if (id >= kvm->arch.max_vcpus) {
kvm               277 virt/kvm/arm/arm.c 	err = kvm_vcpu_init(vcpu, kvm, id);
kvm               300 virt/kvm/arm/arm.c 	if (vcpu->arch.has_run_once && unlikely(!irqchip_in_kernel(vcpu->kvm)))
kvm               372 virt/kvm/arm/arm.c 	last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
kvm               565 virt/kvm/arm/arm.c 	struct kvm *kvm = vcpu->kvm;
kvm               576 virt/kvm/arm/arm.c 	if (likely(irqchip_in_kernel(kvm))) {
kvm               581 virt/kvm/arm/arm.c 		if (unlikely(!vgic_ready(kvm))) {
kvm               582 virt/kvm/arm/arm.c 			ret = kvm_vgic_map_resources(kvm);
kvm               603 virt/kvm/arm/arm.c bool kvm_arch_intc_initialized(struct kvm *kvm)
kvm               605 virt/kvm/arm/arm.c 	return vgic_initialized(kvm);
kvm               608 virt/kvm/arm/arm.c void kvm_arm_halt_guest(struct kvm *kvm)
kvm               613 virt/kvm/arm/arm.c 	kvm_for_each_vcpu(i, vcpu, kvm)
kvm               615 virt/kvm/arm/arm.c 	kvm_make_all_cpus_request(kvm, KVM_REQ_SLEEP);
kvm               618 virt/kvm/arm/arm.c void kvm_arm_resume_guest(struct kvm *kvm)
kvm               623 virt/kvm/arm/arm.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
kvm               714 virt/kvm/arm/arm.c 		update_vmid(&vcpu->kvm->arch.vmid);
kvm               763 virt/kvm/arm/arm.c 		if (ret <= 0 || need_new_vmid_gen(&vcpu->kvm->arch.vmid) ||
kvm               856 virt/kvm/arm/arm.c 	if (unlikely(!irqchip_in_kernel(vcpu->kvm))) {
kvm               901 virt/kvm/arm/arm.c int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
kvm               906 virt/kvm/arm/arm.c 	int nrcpus = atomic_read(&kvm->online_vcpus);
kvm               919 virt/kvm/arm/arm.c 		if (irqchip_in_kernel(kvm))
kvm               925 virt/kvm/arm/arm.c 		vcpu = kvm_get_vcpu(kvm, vcpu_idx);
kvm               934 virt/kvm/arm/arm.c 		if (!irqchip_in_kernel(kvm))
kvm               940 virt/kvm/arm/arm.c 		vcpu = kvm_get_vcpu(kvm, vcpu_idx);
kvm               947 virt/kvm/arm/arm.c 		return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level, NULL);
kvm               949 virt/kvm/arm/arm.c 		if (!irqchip_in_kernel(kvm))
kvm               955 virt/kvm/arm/arm.c 		return kvm_vgic_inject_irq(kvm, 0, irq_num, level, NULL);
kvm              1022 virt/kvm/arm/arm.c 		stage2_unmap_vm(vcpu->kvm);
kvm              1245 virt/kvm/arm/arm.c int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
kvm              1250 virt/kvm/arm/arm.c 	mutex_lock(&kvm->slots_lock);
kvm              1252 virt/kvm/arm/arm.c 	r = kvm_get_dirty_log_protect(kvm, log, &flush);
kvm              1255 virt/kvm/arm/arm.c 		kvm_flush_remote_tlbs(kvm);
kvm              1257 virt/kvm/arm/arm.c 	mutex_unlock(&kvm->slots_lock);
kvm              1261 virt/kvm/arm/arm.c int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, struct kvm_clear_dirty_log *log)
kvm              1266 virt/kvm/arm/arm.c 	mutex_lock(&kvm->slots_lock);
kvm              1268 virt/kvm/arm/arm.c 	r = kvm_clear_dirty_log_protect(kvm, log, &flush);
kvm              1271 virt/kvm/arm/arm.c 		kvm_flush_remote_tlbs(kvm);
kvm              1273 virt/kvm/arm/arm.c 	mutex_unlock(&kvm->slots_lock);
kvm              1277 virt/kvm/arm/arm.c static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
kvm              1291 virt/kvm/arm/arm.c 		return kvm_vgic_addr(kvm, type, &dev_addr->addr, true);
kvm              1300 virt/kvm/arm/arm.c 	struct kvm *kvm = filp->private_data;
kvm              1308 virt/kvm/arm/arm.c 		mutex_lock(&kvm->lock);
kvm              1309 virt/kvm/arm/arm.c 		ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
kvm              1310 virt/kvm/arm/arm.c 		mutex_unlock(&kvm->lock);
kvm              1318 virt/kvm/arm/arm.c 		return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
kvm              1629 virt/kvm/arm/arm.c struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
kvm              1635 virt/kvm/arm/arm.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
kvm              1653 virt/kvm/arm/arm.c 	return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq,
kvm              1662 virt/kvm/arm/arm.c 	kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq,
kvm              1671 virt/kvm/arm/arm.c 	kvm_arm_halt_guest(irqfd->kvm);
kvm              1679 virt/kvm/arm/arm.c 	kvm_arm_resume_guest(irqfd->kvm);
kvm                57 virt/kvm/arm/mmu.c void kvm_flush_remote_tlbs(struct kvm *kvm)
kvm                59 virt/kvm/arm/mmu.c 	kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
kvm                62 virt/kvm/arm/mmu.c static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
kvm                64 virt/kvm/arm/mmu.c 	kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
kvm               100 virt/kvm/arm/mmu.c static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd)
kvm               106 virt/kvm/arm/mmu.c 	kvm_tlb_flush_vmid_ipa(kvm, addr);
kvm               118 virt/kvm/arm/mmu.c static void stage2_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp)
kvm               120 virt/kvm/arm/mmu.c 	if (!stage2_pud_huge(kvm, *pudp))
kvm               123 virt/kvm/arm/mmu.c 	stage2_pud_clear(kvm, pudp);
kvm               124 virt/kvm/arm/mmu.c 	kvm_tlb_flush_vmid_ipa(kvm, addr);
kvm               160 virt/kvm/arm/mmu.c static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
kvm               162 virt/kvm/arm/mmu.c 	pud_t *pud_table __maybe_unused = stage2_pud_offset(kvm, pgd, 0UL);
kvm               163 virt/kvm/arm/mmu.c 	stage2_pgd_clear(kvm, pgd);
kvm               164 virt/kvm/arm/mmu.c 	kvm_tlb_flush_vmid_ipa(kvm, addr);
kvm               165 virt/kvm/arm/mmu.c 	stage2_pud_free(kvm, pud_table);
kvm               169 virt/kvm/arm/mmu.c static void clear_stage2_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
kvm               171 virt/kvm/arm/mmu.c 	pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(kvm, pud, 0);
kvm               172 virt/kvm/arm/mmu.c 	VM_BUG_ON(stage2_pud_huge(kvm, *pud));
kvm               173 virt/kvm/arm/mmu.c 	stage2_pud_clear(kvm, pud);
kvm               174 virt/kvm/arm/mmu.c 	kvm_tlb_flush_vmid_ipa(kvm, addr);
kvm               175 virt/kvm/arm/mmu.c 	stage2_pmd_free(kvm, pmd_table);
kvm               179 virt/kvm/arm/mmu.c static void clear_stage2_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
kvm               184 virt/kvm/arm/mmu.c 	kvm_tlb_flush_vmid_ipa(kvm, addr);
kvm               242 virt/kvm/arm/mmu.c static void unmap_stage2_ptes(struct kvm *kvm, pmd_t *pmd,
kvm               254 virt/kvm/arm/mmu.c 			kvm_tlb_flush_vmid_ipa(kvm, addr);
kvm               264 virt/kvm/arm/mmu.c 	if (stage2_pte_table_empty(kvm, start_pte))
kvm               265 virt/kvm/arm/mmu.c 		clear_stage2_pmd_entry(kvm, pmd, start_addr);
kvm               268 virt/kvm/arm/mmu.c static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud,
kvm               274 virt/kvm/arm/mmu.c 	start_pmd = pmd = stage2_pmd_offset(kvm, pud, addr);
kvm               276 virt/kvm/arm/mmu.c 		next = stage2_pmd_addr_end(kvm, addr, end);
kvm               282 virt/kvm/arm/mmu.c 				kvm_tlb_flush_vmid_ipa(kvm, addr);
kvm               288 virt/kvm/arm/mmu.c 				unmap_stage2_ptes(kvm, pmd, addr, next);
kvm               293 virt/kvm/arm/mmu.c 	if (stage2_pmd_table_empty(kvm, start_pmd))
kvm               294 virt/kvm/arm/mmu.c 		clear_stage2_pud_entry(kvm, pud, start_addr);
kvm               297 virt/kvm/arm/mmu.c static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
kvm               303 virt/kvm/arm/mmu.c 	start_pud = pud = stage2_pud_offset(kvm, pgd, addr);
kvm               305 virt/kvm/arm/mmu.c 		next = stage2_pud_addr_end(kvm, addr, end);
kvm               306 virt/kvm/arm/mmu.c 		if (!stage2_pud_none(kvm, *pud)) {
kvm               307 virt/kvm/arm/mmu.c 			if (stage2_pud_huge(kvm, *pud)) {
kvm               310 virt/kvm/arm/mmu.c 				stage2_pud_clear(kvm, pud);
kvm               311 virt/kvm/arm/mmu.c 				kvm_tlb_flush_vmid_ipa(kvm, addr);
kvm               315 virt/kvm/arm/mmu.c 				unmap_stage2_pmds(kvm, pud, addr, next);
kvm               320 virt/kvm/arm/mmu.c 	if (stage2_pud_table_empty(kvm, start_pud))
kvm               321 virt/kvm/arm/mmu.c 		clear_stage2_pgd_entry(kvm, pgd, start_addr);
kvm               335 virt/kvm/arm/mmu.c static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
kvm               341 virt/kvm/arm/mmu.c 	assert_spin_locked(&kvm->mmu_lock);
kvm               344 virt/kvm/arm/mmu.c 	pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
kvm               351 virt/kvm/arm/mmu.c 		if (!READ_ONCE(kvm->arch.pgd))
kvm               353 virt/kvm/arm/mmu.c 		next = stage2_pgd_addr_end(kvm, addr, end);
kvm               354 virt/kvm/arm/mmu.c 		if (!stage2_pgd_none(kvm, *pgd))
kvm               355 virt/kvm/arm/mmu.c 			unmap_stage2_puds(kvm, pgd, addr, next);
kvm               361 virt/kvm/arm/mmu.c 			cond_resched_lock(&kvm->mmu_lock);
kvm               365 virt/kvm/arm/mmu.c static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
kvm               377 virt/kvm/arm/mmu.c static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
kvm               383 virt/kvm/arm/mmu.c 	pmd = stage2_pmd_offset(kvm, pud, addr);
kvm               385 virt/kvm/arm/mmu.c 		next = stage2_pmd_addr_end(kvm, addr, end);
kvm               390 virt/kvm/arm/mmu.c 				stage2_flush_ptes(kvm, pmd, addr, next);
kvm               395 virt/kvm/arm/mmu.c static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
kvm               401 virt/kvm/arm/mmu.c 	pud = stage2_pud_offset(kvm, pgd, addr);
kvm               403 virt/kvm/arm/mmu.c 		next = stage2_pud_addr_end(kvm, addr, end);
kvm               404 virt/kvm/arm/mmu.c 		if (!stage2_pud_none(kvm, *pud)) {
kvm               405 virt/kvm/arm/mmu.c 			if (stage2_pud_huge(kvm, *pud))
kvm               408 virt/kvm/arm/mmu.c 				stage2_flush_pmds(kvm, pud, addr, next);
kvm               413 virt/kvm/arm/mmu.c static void stage2_flush_memslot(struct kvm *kvm,
kvm               421 virt/kvm/arm/mmu.c 	pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
kvm               423 virt/kvm/arm/mmu.c 		next = stage2_pgd_addr_end(kvm, addr, end);
kvm               424 virt/kvm/arm/mmu.c 		if (!stage2_pgd_none(kvm, *pgd))
kvm               425 virt/kvm/arm/mmu.c 			stage2_flush_puds(kvm, pgd, addr, next);
kvm               436 virt/kvm/arm/mmu.c static void stage2_flush_vm(struct kvm *kvm)
kvm               442 virt/kvm/arm/mmu.c 	idx = srcu_read_lock(&kvm->srcu);
kvm               443 virt/kvm/arm/mmu.c 	spin_lock(&kvm->mmu_lock);
kvm               445 virt/kvm/arm/mmu.c 	slots = kvm_memslots(kvm);
kvm               447 virt/kvm/arm/mmu.c 		stage2_flush_memslot(kvm, memslot);
kvm               449 virt/kvm/arm/mmu.c 	spin_unlock(&kvm->mmu_lock);
kvm               450 virt/kvm/arm/mmu.c 	srcu_read_unlock(&kvm->srcu, idx);
kvm               899 virt/kvm/arm/mmu.c int kvm_alloc_stage2_pgd(struct kvm *kvm)
kvm               904 virt/kvm/arm/mmu.c 	if (kvm->arch.pgd != NULL) {
kvm               910 virt/kvm/arm/mmu.c 	pgd = alloc_pages_exact(stage2_pgd_size(kvm), GFP_KERNEL | __GFP_ZERO);
kvm               915 virt/kvm/arm/mmu.c 	if (WARN_ON(pgd_phys & ~kvm_vttbr_baddr_mask(kvm)))
kvm               918 virt/kvm/arm/mmu.c 	kvm->arch.pgd = pgd;
kvm               919 virt/kvm/arm/mmu.c 	kvm->arch.pgd_phys = pgd_phys;
kvm               923 virt/kvm/arm/mmu.c static void stage2_unmap_memslot(struct kvm *kvm,
kvm               958 virt/kvm/arm/mmu.c 			unmap_stage2_range(kvm, gpa, vm_end - vm_start);
kvm               971 virt/kvm/arm/mmu.c void stage2_unmap_vm(struct kvm *kvm)
kvm               977 virt/kvm/arm/mmu.c 	idx = srcu_read_lock(&kvm->srcu);
kvm               979 virt/kvm/arm/mmu.c 	spin_lock(&kvm->mmu_lock);
kvm               981 virt/kvm/arm/mmu.c 	slots = kvm_memslots(kvm);
kvm               983 virt/kvm/arm/mmu.c 		stage2_unmap_memslot(kvm, memslot);
kvm               985 virt/kvm/arm/mmu.c 	spin_unlock(&kvm->mmu_lock);
kvm               987 virt/kvm/arm/mmu.c 	srcu_read_unlock(&kvm->srcu, idx);
kvm               998 virt/kvm/arm/mmu.c void kvm_free_stage2_pgd(struct kvm *kvm)
kvm              1002 virt/kvm/arm/mmu.c 	spin_lock(&kvm->mmu_lock);
kvm              1003 virt/kvm/arm/mmu.c 	if (kvm->arch.pgd) {
kvm              1004 virt/kvm/arm/mmu.c 		unmap_stage2_range(kvm, 0, kvm_phys_size(kvm));
kvm              1005 virt/kvm/arm/mmu.c 		pgd = READ_ONCE(kvm->arch.pgd);
kvm              1006 virt/kvm/arm/mmu.c 		kvm->arch.pgd = NULL;
kvm              1007 virt/kvm/arm/mmu.c 		kvm->arch.pgd_phys = 0;
kvm              1009 virt/kvm/arm/mmu.c 	spin_unlock(&kvm->mmu_lock);
kvm              1013 virt/kvm/arm/mmu.c 		free_pages_exact(pgd, stage2_pgd_size(kvm));
kvm              1016 virt/kvm/arm/mmu.c static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
kvm              1022 virt/kvm/arm/mmu.c 	pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
kvm              1023 virt/kvm/arm/mmu.c 	if (stage2_pgd_none(kvm, *pgd)) {
kvm              1027 virt/kvm/arm/mmu.c 		stage2_pgd_populate(kvm, pgd, pud);
kvm              1031 virt/kvm/arm/mmu.c 	return stage2_pud_offset(kvm, pgd, addr);
kvm              1034 virt/kvm/arm/mmu.c static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
kvm              1040 virt/kvm/arm/mmu.c 	pud = stage2_get_pud(kvm, cache, addr);
kvm              1041 virt/kvm/arm/mmu.c 	if (!pud || stage2_pud_huge(kvm, *pud))
kvm              1044 virt/kvm/arm/mmu.c 	if (stage2_pud_none(kvm, *pud)) {
kvm              1048 virt/kvm/arm/mmu.c 		stage2_pud_populate(kvm, pud, pmd);
kvm              1052 virt/kvm/arm/mmu.c 	return stage2_pmd_offset(kvm, pud, addr);
kvm              1055 virt/kvm/arm/mmu.c static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
kvm              1061 virt/kvm/arm/mmu.c 	pmd = stage2_get_pmd(kvm, cache, addr);
kvm              1094 virt/kvm/arm/mmu.c 			unmap_stage2_range(kvm, addr & S2_PMD_MASK, S2_PMD_SIZE);
kvm              1110 virt/kvm/arm/mmu.c 		kvm_tlb_flush_vmid_ipa(kvm, addr);
kvm              1119 virt/kvm/arm/mmu.c static int stage2_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
kvm              1125 virt/kvm/arm/mmu.c 	pudp = stage2_get_pud(kvm, cache, addr);
kvm              1138 virt/kvm/arm/mmu.c 	if (stage2_pud_present(kvm, old_pud)) {
kvm              1143 virt/kvm/arm/mmu.c 		if (!stage2_pud_huge(kvm, old_pud)) {
kvm              1144 virt/kvm/arm/mmu.c 			unmap_stage2_range(kvm, addr & S2_PUD_MASK, S2_PUD_SIZE);
kvm              1149 virt/kvm/arm/mmu.c 		stage2_pud_clear(kvm, pudp);
kvm              1150 virt/kvm/arm/mmu.c 		kvm_tlb_flush_vmid_ipa(kvm, addr);
kvm              1165 virt/kvm/arm/mmu.c static bool stage2_get_leaf_entry(struct kvm *kvm, phys_addr_t addr,
kvm              1176 virt/kvm/arm/mmu.c 	pudp = stage2_get_pud(kvm, NULL, addr);
kvm              1177 virt/kvm/arm/mmu.c 	if (!pudp || stage2_pud_none(kvm, *pudp) || !stage2_pud_present(kvm, *pudp))
kvm              1180 virt/kvm/arm/mmu.c 	if (stage2_pud_huge(kvm, *pudp)) {
kvm              1185 virt/kvm/arm/mmu.c 	pmdp = stage2_pmd_offset(kvm, pudp, addr);
kvm              1202 virt/kvm/arm/mmu.c static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
kvm              1209 virt/kvm/arm/mmu.c 	found = stage2_get_leaf_entry(kvm, addr, &pudp, &pmdp, &ptep);
kvm              1221 virt/kvm/arm/mmu.c static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
kvm              1234 virt/kvm/arm/mmu.c 	pud = stage2_get_pud(kvm, cache, addr);
kvm              1248 virt/kvm/arm/mmu.c 		stage2_dissolve_pud(kvm, addr, pud);
kvm              1250 virt/kvm/arm/mmu.c 	if (stage2_pud_none(kvm, *pud)) {
kvm              1254 virt/kvm/arm/mmu.c 		stage2_pud_populate(kvm, pud, pmd);
kvm              1258 virt/kvm/arm/mmu.c 	pmd = stage2_pmd_offset(kvm, pud, addr);
kvm              1272 virt/kvm/arm/mmu.c 		stage2_dissolve_pmd(kvm, addr, pmd);
kvm              1296 virt/kvm/arm/mmu.c 		kvm_tlb_flush_vmid_ipa(kvm, addr);
kvm              1339 virt/kvm/arm/mmu.c int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
kvm              1357 virt/kvm/arm/mmu.c 					     kvm_mmu_cache_min_pages(kvm),
kvm              1361 virt/kvm/arm/mmu.c 		spin_lock(&kvm->mmu_lock);
kvm              1362 virt/kvm/arm/mmu.c 		ret = stage2_set_pte(kvm, &cache, addr, &pte,
kvm              1364 virt/kvm/arm/mmu.c 		spin_unlock(&kvm->mmu_lock);
kvm              1449 virt/kvm/arm/mmu.c static void stage2_wp_pmds(struct kvm *kvm, pud_t *pud,
kvm              1455 virt/kvm/arm/mmu.c 	pmd = stage2_pmd_offset(kvm, pud, addr);
kvm              1458 virt/kvm/arm/mmu.c 		next = stage2_pmd_addr_end(kvm, addr, end);
kvm              1476 virt/kvm/arm/mmu.c static void  stage2_wp_puds(struct kvm *kvm, pgd_t *pgd,
kvm              1482 virt/kvm/arm/mmu.c 	pud = stage2_pud_offset(kvm, pgd, addr);
kvm              1484 virt/kvm/arm/mmu.c 		next = stage2_pud_addr_end(kvm, addr, end);
kvm              1485 virt/kvm/arm/mmu.c 		if (!stage2_pud_none(kvm, *pud)) {
kvm              1486 virt/kvm/arm/mmu.c 			if (stage2_pud_huge(kvm, *pud)) {
kvm              1490 virt/kvm/arm/mmu.c 				stage2_wp_pmds(kvm, pud, addr, next);
kvm              1502 virt/kvm/arm/mmu.c static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
kvm              1507 virt/kvm/arm/mmu.c 	pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
kvm              1518 virt/kvm/arm/mmu.c 		cond_resched_lock(&kvm->mmu_lock);
kvm              1519 virt/kvm/arm/mmu.c 		if (!READ_ONCE(kvm->arch.pgd))
kvm              1521 virt/kvm/arm/mmu.c 		next = stage2_pgd_addr_end(kvm, addr, end);
kvm              1522 virt/kvm/arm/mmu.c 		if (stage2_pgd_present(kvm, *pgd))
kvm              1523 virt/kvm/arm/mmu.c 			stage2_wp_puds(kvm, pgd, addr, next);
kvm              1540 virt/kvm/arm/mmu.c void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
kvm              1542 virt/kvm/arm/mmu.c 	struct kvm_memslots *slots = kvm_memslots(kvm);
kvm              1547 virt/kvm/arm/mmu.c 	spin_lock(&kvm->mmu_lock);
kvm              1548 virt/kvm/arm/mmu.c 	stage2_wp_range(kvm, start, end);
kvm              1549 virt/kvm/arm/mmu.c 	spin_unlock(&kvm->mmu_lock);
kvm              1550 virt/kvm/arm/mmu.c 	kvm_flush_remote_tlbs(kvm);
kvm              1564 virt/kvm/arm/mmu.c static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
kvm              1572 virt/kvm/arm/mmu.c 	stage2_wp_range(kvm, start, end);
kvm              1582 virt/kvm/arm/mmu.c void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
kvm              1586 virt/kvm/arm/mmu.c 	kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
kvm              1678 virt/kvm/arm/mmu.c 	struct kvm *kvm = vcpu->kvm;
kvm              1720 virt/kvm/arm/mmu.c 	    (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm)))
kvm              1725 virt/kvm/arm/mmu.c 	ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm),
kvm              1730 virt/kvm/arm/mmu.c 	mmu_seq = vcpu->kvm->mmu_notifier_seq;
kvm              1742 virt/kvm/arm/mmu.c 	pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
kvm              1772 virt/kvm/arm/mmu.c 	spin_lock(&kvm->mmu_lock);
kvm              1773 virt/kvm/arm/mmu.c 	if (mmu_notifier_retry(kvm, mmu_seq))
kvm              1808 virt/kvm/arm/mmu.c 		(fault_status == FSC_PERM && stage2_is_exec(kvm, fault_ipa));
kvm              1820 virt/kvm/arm/mmu.c 		ret = stage2_set_pud_huge(kvm, memcache, fault_ipa, &new_pud);
kvm              1832 virt/kvm/arm/mmu.c 		ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
kvm              1838 virt/kvm/arm/mmu.c 			mark_page_dirty(kvm, gfn);
kvm              1844 virt/kvm/arm/mmu.c 		ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
kvm              1848 virt/kvm/arm/mmu.c 	spin_unlock(&kvm->mmu_lock);
kvm              1871 virt/kvm/arm/mmu.c 	spin_lock(&vcpu->kvm->mmu_lock);
kvm              1873 virt/kvm/arm/mmu.c 	if (!stage2_get_leaf_entry(vcpu->kvm, fault_ipa, &pud, &pmd, &pte))
kvm              1891 virt/kvm/arm/mmu.c 	spin_unlock(&vcpu->kvm->mmu_lock);
kvm              1951 virt/kvm/arm/mmu.c 	idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm              1954 virt/kvm/arm/mmu.c 	memslot = gfn_to_memslot(vcpu->kvm, gfn);
kvm              1992 virt/kvm/arm/mmu.c 	VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm));
kvm              2009 virt/kvm/arm/mmu.c 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
kvm              2013 virt/kvm/arm/mmu.c static int handle_hva_to_gpa(struct kvm *kvm,
kvm              2016 virt/kvm/arm/mmu.c 			     int (*handler)(struct kvm *kvm,
kvm              2025 virt/kvm/arm/mmu.c 	slots = kvm_memslots(kvm);
kvm              2039 virt/kvm/arm/mmu.c 		ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data);
kvm              2045 virt/kvm/arm/mmu.c static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
kvm              2047 virt/kvm/arm/mmu.c 	unmap_stage2_range(kvm, gpa, size);
kvm              2051 virt/kvm/arm/mmu.c int kvm_unmap_hva_range(struct kvm *kvm,
kvm              2054 virt/kvm/arm/mmu.c 	if (!kvm->arch.pgd)
kvm              2058 virt/kvm/arm/mmu.c 	handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
kvm              2062 virt/kvm/arm/mmu.c static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
kvm              2074 virt/kvm/arm/mmu.c 	stage2_set_pte(kvm, NULL, gpa, pte, 0);
kvm              2079 virt/kvm/arm/mmu.c int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
kvm              2085 virt/kvm/arm/mmu.c 	if (!kvm->arch.pgd)
kvm              2096 virt/kvm/arm/mmu.c 	handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
kvm              2101 virt/kvm/arm/mmu.c static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
kvm              2108 virt/kvm/arm/mmu.c 	if (!stage2_get_leaf_entry(kvm, gpa, &pud, &pmd, &pte))
kvm              2119 virt/kvm/arm/mmu.c static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
kvm              2126 virt/kvm/arm/mmu.c 	if (!stage2_get_leaf_entry(kvm, gpa, &pud, &pmd, &pte))
kvm              2137 virt/kvm/arm/mmu.c int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
kvm              2139 virt/kvm/arm/mmu.c 	if (!kvm->arch.pgd)
kvm              2142 virt/kvm/arm/mmu.c 	return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
kvm              2145 virt/kvm/arm/mmu.c int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
kvm              2147 virt/kvm/arm/mmu.c 	if (!kvm->arch.pgd)
kvm              2150 virt/kvm/arm/mmu.c 	return handle_hva_to_gpa(kvm, hva, hva + PAGE_SIZE,
kvm              2261 virt/kvm/arm/mmu.c void kvm_arch_commit_memory_region(struct kvm *kvm,
kvm              2273 virt/kvm/arm/mmu.c 		kvm_mmu_wp_memory_region(kvm, mem->slot);
kvm              2276 virt/kvm/arm/mmu.c int kvm_arch_prepare_memory_region(struct kvm *kvm,
kvm              2295 virt/kvm/arm/mmu.c 	    (kvm_phys_size(kvm) >> PAGE_SHIFT))
kvm              2347 virt/kvm/arm/mmu.c 			ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
kvm              2359 virt/kvm/arm/mmu.c 	spin_lock(&kvm->mmu_lock);
kvm              2361 virt/kvm/arm/mmu.c 		unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size);
kvm              2363 virt/kvm/arm/mmu.c 		stage2_flush_memslot(kvm, memslot);
kvm              2364 virt/kvm/arm/mmu.c 	spin_unlock(&kvm->mmu_lock);
kvm              2370 virt/kvm/arm/mmu.c void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
kvm              2375 virt/kvm/arm/mmu.c int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
kvm              2381 virt/kvm/arm/mmu.c void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
kvm              2385 virt/kvm/arm/mmu.c void kvm_arch_flush_shadow_all(struct kvm *kvm)
kvm              2387 virt/kvm/arm/mmu.c 	kvm_free_stage2_pgd(kvm);
kvm              2390 virt/kvm/arm/mmu.c void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
kvm              2396 virt/kvm/arm/mmu.c 	spin_lock(&kvm->mmu_lock);
kvm              2397 virt/kvm/arm/mmu.c 	unmap_stage2_range(kvm, gpa, size);
kvm              2398 virt/kvm/arm/mmu.c 	spin_unlock(&kvm->mmu_lock);
kvm              2445 virt/kvm/arm/mmu.c 		stage2_flush_vm(vcpu->kvm);
kvm              2460 virt/kvm/arm/mmu.c 		stage2_flush_vm(vcpu->kvm);
kvm               382 virt/kvm/arm/pmu.c 	if (likely(irqchip_in_kernel(vcpu->kvm))) {
kvm               383 virt/kvm/arm/pmu.c 		int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
kvm               395 virt/kvm/arm/pmu.c 	if (likely(irqchip_in_kernel(vcpu->kvm)))
kvm               709 virt/kvm/arm/pmu.c 	if (irqchip_in_kernel(vcpu->kvm)) {
kvm               720 virt/kvm/arm/pmu.c 		if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq))
kvm               743 virt/kvm/arm/pmu.c 	if (irqchip_in_kernel(vcpu->kvm)) {
kvm               751 virt/kvm/arm/pmu.c 		if (!vgic_initialized(vcpu->kvm))
kvm               772 virt/kvm/arm/pmu.c static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
kvm               777 virt/kvm/arm/pmu.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
kvm               800 virt/kvm/arm/pmu.c 		if (!irqchip_in_kernel(vcpu->kvm))
kvm               813 virt/kvm/arm/pmu.c 		if (!pmu_irq_is_valid(vcpu->kvm, irq))
kvm               837 virt/kvm/arm/pmu.c 		if (!irqchip_in_kernel(vcpu->kvm))
kvm                97 virt/kvm/arm/psci.c 	struct kvm *kvm = source_vcpu->kvm;
kvm               105 virt/kvm/arm/psci.c 	vcpu = kvm_mpidr_to_vcpu(kvm, cpu_id);
kvm               114 virt/kvm/arm/psci.c 		if (kvm_psci_version(source_vcpu, kvm) != KVM_ARM_PSCI_0_1)
kvm               155 virt/kvm/arm/psci.c 	struct kvm *kvm = vcpu->kvm;
kvm               173 virt/kvm/arm/psci.c 	kvm_for_each_vcpu(i, tmp, kvm) {
kvm               202 virt/kvm/arm/psci.c 	kvm_for_each_vcpu(i, tmp, vcpu->kvm)
kvm               204 virt/kvm/arm/psci.c 	kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
kvm               223 virt/kvm/arm/psci.c 	struct kvm *kvm = vcpu->kvm;
kvm               246 virt/kvm/arm/psci.c 		mutex_lock(&kvm->lock);
kvm               248 virt/kvm/arm/psci.c 		mutex_unlock(&kvm->lock);
kvm               339 virt/kvm/arm/psci.c 	struct kvm *kvm = vcpu->kvm;
kvm               349 virt/kvm/arm/psci.c 		mutex_lock(&kvm->lock);
kvm               351 virt/kvm/arm/psci.c 		mutex_unlock(&kvm->lock);
kvm               378 virt/kvm/arm/psci.c 	switch (kvm_psci_version(vcpu, vcpu->kvm)) {
kvm               503 virt/kvm/arm/psci.c 		val = kvm_psci_version(vcpu, vcpu->kvm);
kvm               545 virt/kvm/arm/psci.c 			vcpu->kvm->arch.psci_version = val;
kvm               551 virt/kvm/arm/psci.c 			vcpu->kvm->arch.psci_version = val;
kvm                 9 virt/kvm/arm/trace.h #define TRACE_SYSTEM kvm
kvm               373 virt/kvm/arm/trace.h #define TRACE_INCLUDE_PATH ../../virt/kvm/arm
kvm                 8 virt/kvm/arm/vgic/trace.h #define TRACE_SYSTEM kvm
kvm                33 virt/kvm/arm/vgic/trace.h #define TRACE_INCLUDE_PATH ../../virt/kvm/arm/vgic
kvm                55 virt/kvm/arm/vgic/vgic-debug.c static void iter_init(struct kvm *kvm, struct vgic_state_iter *iter,
kvm                58 virt/kvm/arm/vgic/vgic-debug.c 	int nr_cpus = atomic_read(&kvm->online_vcpus);
kvm                63 virt/kvm/arm/vgic/vgic-debug.c 	iter->nr_spis = kvm->arch.vgic.nr_spis;
kvm                64 virt/kvm/arm/vgic/vgic-debug.c 	if (kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
kvm                65 virt/kvm/arm/vgic/vgic-debug.c 		iter->nr_lpis = vgic_copy_lpi_list(kvm, NULL, &iter->lpi_array);
kvm                85 virt/kvm/arm/vgic/vgic-debug.c 	struct kvm *kvm = (struct kvm *)s->private;
kvm                88 virt/kvm/arm/vgic/vgic-debug.c 	mutex_lock(&kvm->lock);
kvm                89 virt/kvm/arm/vgic/vgic-debug.c 	iter = kvm->arch.vgic.iter;
kvm               101 virt/kvm/arm/vgic/vgic-debug.c 	iter_init(kvm, iter, *pos);
kvm               102 virt/kvm/arm/vgic/vgic-debug.c 	kvm->arch.vgic.iter = iter;
kvm               107 virt/kvm/arm/vgic/vgic-debug.c 	mutex_unlock(&kvm->lock);
kvm               113 virt/kvm/arm/vgic/vgic-debug.c 	struct kvm *kvm = (struct kvm *)s->private;
kvm               114 virt/kvm/arm/vgic/vgic-debug.c 	struct vgic_state_iter *iter = kvm->arch.vgic.iter;
kvm               125 virt/kvm/arm/vgic/vgic-debug.c 	struct kvm *kvm = (struct kvm *)s->private;
kvm               135 virt/kvm/arm/vgic/vgic-debug.c 	mutex_lock(&kvm->lock);
kvm               136 virt/kvm/arm/vgic/vgic-debug.c 	iter = kvm->arch.vgic.iter;
kvm               139 virt/kvm/arm/vgic/vgic-debug.c 	kvm->arch.vgic.iter = NULL;
kvm               140 virt/kvm/arm/vgic/vgic-debug.c 	mutex_unlock(&kvm->lock);
kvm               220 virt/kvm/arm/vgic/vgic-debug.c 	struct kvm *kvm = (struct kvm *)s->private;
kvm               227 virt/kvm/arm/vgic/vgic-debug.c 		print_dist_state(s, &kvm->arch.vgic);
kvm               231 virt/kvm/arm/vgic/vgic-debug.c 	if (!kvm->arch.vgic.initialized)
kvm               235 virt/kvm/arm/vgic/vgic-debug.c 		vcpu = kvm_get_vcpu(kvm, iter->vcpu_id);
kvm               237 virt/kvm/arm/vgic/vgic-debug.c 	irq = vgic_get_irq(kvm, vcpu, iter->intid);
kvm               247 virt/kvm/arm/vgic/vgic-debug.c 	vgic_put_irq(kvm, irq);
kvm               280 virt/kvm/arm/vgic/vgic-debug.c void vgic_debug_init(struct kvm *kvm)
kvm               282 virt/kvm/arm/vgic/vgic-debug.c 	debugfs_create_file("vgic-state", 0444, kvm->debugfs_dentry, kvm,
kvm               286 virt/kvm/arm/vgic/vgic-debug.c void vgic_debug_destroy(struct kvm *kvm)
kvm                52 virt/kvm/arm/vgic/vgic-init.c void kvm_vgic_early_init(struct kvm *kvm)
kvm                54 virt/kvm/arm/vgic/vgic-init.c 	struct vgic_dist *dist = &kvm->arch.vgic;
kvm                71 virt/kvm/arm/vgic/vgic-init.c int kvm_vgic_create(struct kvm *kvm, u32 type)
kvm                76 virt/kvm/arm/vgic/vgic-init.c 	if (irqchip_in_kernel(kvm))
kvm                95 virt/kvm/arm/vgic/vgic-init.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
kvm               101 virt/kvm/arm/vgic/vgic-init.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
kvm               108 virt/kvm/arm/vgic/vgic-init.c 		kvm->arch.max_vcpus = VGIC_V2_MAX_CPUS;
kvm               110 virt/kvm/arm/vgic/vgic-init.c 		kvm->arch.max_vcpus = VGIC_V3_MAX_CPUS;
kvm               112 virt/kvm/arm/vgic/vgic-init.c 	if (atomic_read(&kvm->online_vcpus) > kvm->arch.max_vcpus) {
kvm               117 virt/kvm/arm/vgic/vgic-init.c 	kvm->arch.vgic.in_kernel = true;
kvm               118 virt/kvm/arm/vgic/vgic-init.c 	kvm->arch.vgic.vgic_model = type;
kvm               120 virt/kvm/arm/vgic/vgic-init.c 	kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
kvm               123 virt/kvm/arm/vgic/vgic-init.c 		kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
kvm               125 virt/kvm/arm/vgic/vgic-init.c 		INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions);
kvm               129 virt/kvm/arm/vgic/vgic-init.c 		vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
kvm               142 virt/kvm/arm/vgic/vgic-init.c static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
kvm               144 virt/kvm/arm/vgic/vgic-init.c 	struct vgic_dist *dist = &kvm->arch.vgic;
kvm               145 virt/kvm/arm/vgic/vgic-init.c 	struct kvm_vcpu *vcpu0 = kvm_get_vcpu(kvm, 0);
kvm               198 virt/kvm/arm/vgic/vgic-init.c 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
kvm               230 virt/kvm/arm/vgic/vgic-init.c 	if (!irqchip_in_kernel(vcpu->kvm))
kvm               238 virt/kvm/arm/vgic/vgic-init.c 		mutex_lock(&vcpu->kvm->lock);
kvm               240 virt/kvm/arm/vgic/vgic-init.c 		mutex_unlock(&vcpu->kvm->lock);
kvm               263 virt/kvm/arm/vgic/vgic-init.c int vgic_init(struct kvm *kvm)
kvm               265 virt/kvm/arm/vgic/vgic-init.c 	struct vgic_dist *dist = &kvm->arch.vgic;
kvm               269 virt/kvm/arm/vgic/vgic-init.c 	if (vgic_initialized(kvm))
kvm               273 virt/kvm/arm/vgic/vgic-init.c 	if (kvm->created_vcpus != atomic_read(&kvm->online_vcpus))
kvm               280 virt/kvm/arm/vgic/vgic-init.c 	ret = kvm_vgic_dist_init(kvm, dist->nr_spis);
kvm               285 virt/kvm/arm/vgic/vgic-init.c 	kvm_for_each_vcpu(idx, vcpu, kvm) {
kvm               306 virt/kvm/arm/vgic/vgic-init.c 	if (vgic_has_its(kvm)) {
kvm               307 virt/kvm/arm/vgic/vgic-init.c 		vgic_lpi_translation_cache_init(kvm);
kvm               308 virt/kvm/arm/vgic/vgic-init.c 		ret = vgic_v4_init(kvm);
kvm               313 virt/kvm/arm/vgic/vgic-init.c 	kvm_for_each_vcpu(i, vcpu, kvm)
kvm               316 virt/kvm/arm/vgic/vgic-init.c 	ret = kvm_vgic_setup_default_irq_routing(kvm);
kvm               320 virt/kvm/arm/vgic/vgic-init.c 	vgic_debug_init(kvm);
kvm               329 virt/kvm/arm/vgic/vgic-init.c static void kvm_vgic_dist_destroy(struct kvm *kvm)
kvm               331 virt/kvm/arm/vgic/vgic-init.c 	struct vgic_dist *dist = &kvm->arch.vgic;
kvm               341 virt/kvm/arm/vgic/vgic-init.c 	if (kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
kvm               349 virt/kvm/arm/vgic/vgic-init.c 	if (vgic_has_its(kvm))
kvm               350 virt/kvm/arm/vgic/vgic-init.c 		vgic_lpi_translation_cache_destroy(kvm);
kvm               352 virt/kvm/arm/vgic/vgic-init.c 	if (vgic_supports_direct_msis(kvm))
kvm               353 virt/kvm/arm/vgic/vgic-init.c 		vgic_v4_teardown(kvm);
kvm               364 virt/kvm/arm/vgic/vgic-init.c static void __kvm_vgic_destroy(struct kvm *kvm)
kvm               369 virt/kvm/arm/vgic/vgic-init.c 	vgic_debug_destroy(kvm);
kvm               371 virt/kvm/arm/vgic/vgic-init.c 	kvm_vgic_dist_destroy(kvm);
kvm               373 virt/kvm/arm/vgic/vgic-init.c 	kvm_for_each_vcpu(i, vcpu, kvm)
kvm               377 virt/kvm/arm/vgic/vgic-init.c void kvm_vgic_destroy(struct kvm *kvm)
kvm               379 virt/kvm/arm/vgic/vgic-init.c 	mutex_lock(&kvm->lock);
kvm               380 virt/kvm/arm/vgic/vgic-init.c 	__kvm_vgic_destroy(kvm);
kvm               381 virt/kvm/arm/vgic/vgic-init.c 	mutex_unlock(&kvm->lock);
kvm               390 virt/kvm/arm/vgic/vgic-init.c int vgic_lazy_init(struct kvm *kvm)
kvm               394 virt/kvm/arm/vgic/vgic-init.c 	if (unlikely(!vgic_initialized(kvm))) {
kvm               401 virt/kvm/arm/vgic/vgic-init.c 		if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2)
kvm               404 virt/kvm/arm/vgic/vgic-init.c 		mutex_lock(&kvm->lock);
kvm               405 virt/kvm/arm/vgic/vgic-init.c 		ret = vgic_init(kvm);
kvm               406 virt/kvm/arm/vgic/vgic-init.c 		mutex_unlock(&kvm->lock);
kvm               422 virt/kvm/arm/vgic/vgic-init.c int kvm_vgic_map_resources(struct kvm *kvm)
kvm               424 virt/kvm/arm/vgic/vgic-init.c 	struct vgic_dist *dist = &kvm->arch.vgic;
kvm               427 virt/kvm/arm/vgic/vgic-init.c 	mutex_lock(&kvm->lock);
kvm               428 virt/kvm/arm/vgic/vgic-init.c 	if (!irqchip_in_kernel(kvm))
kvm               432 virt/kvm/arm/vgic/vgic-init.c 		ret = vgic_v2_map_resources(kvm);
kvm               434 virt/kvm/arm/vgic/vgic-init.c 		ret = vgic_v3_map_resources(kvm);
kvm               437 virt/kvm/arm/vgic/vgic-init.c 		__kvm_vgic_destroy(kvm);
kvm               440 virt/kvm/arm/vgic/vgic-init.c 	mutex_unlock(&kvm->lock);
kvm                19 virt/kvm/arm/vgic/vgic-irqfd.c 			struct kvm *kvm, int irq_source_id,
kvm                24 virt/kvm/arm/vgic/vgic-irqfd.c 	if (!vgic_valid_spi(kvm, spi_id))
kvm                26 virt/kvm/arm/vgic/vgic-irqfd.c 	return kvm_vgic_inject_irq(kvm, 0, spi_id, level, NULL);
kvm                38 virt/kvm/arm/vgic/vgic-irqfd.c int kvm_set_routing_entry(struct kvm *kvm,
kvm                86 virt/kvm/arm/vgic/vgic-irqfd.c 		struct kvm *kvm, int irq_source_id,
kvm                91 virt/kvm/arm/vgic/vgic-irqfd.c 	if (!vgic_has_its(kvm))
kvm                98 virt/kvm/arm/vgic/vgic-irqfd.c 	return vgic_its_inject_msi(kvm, &msi);
kvm               107 virt/kvm/arm/vgic/vgic-irqfd.c 			      struct kvm *kvm, int irq_source_id, int level,
kvm               110 virt/kvm/arm/vgic/vgic-irqfd.c 	if (e->type == KVM_IRQ_ROUTING_MSI && vgic_has_its(kvm) && level) {
kvm               114 virt/kvm/arm/vgic/vgic-irqfd.c 		if (!vgic_its_inject_cached_translation(kvm, &msi))
kvm               121 virt/kvm/arm/vgic/vgic-irqfd.c int kvm_vgic_setup_default_irq_routing(struct kvm *kvm)
kvm               124 virt/kvm/arm/vgic/vgic-irqfd.c 	struct vgic_dist *dist = &kvm->arch.vgic;
kvm               138 virt/kvm/arm/vgic/vgic-irqfd.c 	ret = kvm_set_irq_routing(kvm, entries, nr, 0);
kvm                29 virt/kvm/arm/vgic/vgic-its.c static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
kvm                39 virt/kvm/arm/vgic/vgic-its.c static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
kvm                42 virt/kvm/arm/vgic/vgic-its.c 	struct vgic_dist *dist = &kvm->arch.vgic;
kvm                43 virt/kvm/arm/vgic/vgic-its.c 	struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
kvm               100 virt/kvm/arm/vgic/vgic-its.c 	ret = update_lpi_config(kvm, irq, NULL, false);
kvm               104 virt/kvm/arm/vgic/vgic-its.c 	ret = vgic_v3_lpi_sync_pending_status(kvm, irq);
kvm               273 virt/kvm/arm/vgic/vgic-its.c static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
kvm               276 virt/kvm/arm/vgic/vgic-its.c 	u64 propbase = GICR_PROPBASER_ADDRESS(kvm->arch.vgic.propbaser);
kvm               281 virt/kvm/arm/vgic/vgic-its.c 	ret = kvm_read_guest_lock(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
kvm               294 virt/kvm/arm/vgic/vgic-its.c 			vgic_queue_irq_unlock(kvm, irq, flags);
kvm               312 virt/kvm/arm/vgic/vgic-its.c int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
kvm               314 virt/kvm/arm/vgic/vgic-its.c 	struct vgic_dist *dist = &kvm->arch.vgic;
kvm               377 virt/kvm/arm/vgic/vgic-its.c static void update_affinity_ite(struct kvm *kvm, struct its_ite *ite)
kvm               384 virt/kvm/arm/vgic/vgic-its.c 	vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
kvm               392 virt/kvm/arm/vgic/vgic-its.c static void update_affinity_collection(struct kvm *kvm, struct vgic_its *its,
kvm               402 virt/kvm/arm/vgic/vgic-its.c 		update_affinity_ite(kvm, ite);
kvm               429 virt/kvm/arm/vgic/vgic-its.c 	nr_irqs = vgic_copy_lpi_list(vcpu->kvm, vcpu, &intids);
kvm               444 virt/kvm/arm/vgic/vgic-its.c 			ret = kvm_read_guest_lock(vcpu->kvm,
kvm               454 virt/kvm/arm/vgic/vgic-its.c 		irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
kvm               457 virt/kvm/arm/vgic/vgic-its.c 		vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
kvm               458 virt/kvm/arm/vgic/vgic-its.c 		vgic_put_irq(vcpu->kvm, irq);
kvm               466 virt/kvm/arm/vgic/vgic-its.c static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm,
kvm               488 virt/kvm/arm/vgic/vgic-its.c static unsigned long vgic_mmio_read_its_iidr(struct kvm *kvm,
kvm               499 virt/kvm/arm/vgic/vgic-its.c static int vgic_mmio_uaccess_write_its_iidr(struct kvm *kvm,
kvm               511 virt/kvm/arm/vgic/vgic-its.c static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm,
kvm               569 virt/kvm/arm/vgic/vgic-its.c static struct vgic_irq *vgic_its_check_cache(struct kvm *kvm, phys_addr_t db,
kvm               572 virt/kvm/arm/vgic/vgic-its.c 	struct vgic_dist *dist = &kvm->arch.vgic;
kvm               583 virt/kvm/arm/vgic/vgic-its.c static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its,
kvm               587 virt/kvm/arm/vgic/vgic-its.c 	struct vgic_dist *dist = &kvm->arch.vgic;
kvm               620 virt/kvm/arm/vgic/vgic-its.c 		__vgic_put_lpi_locked(kvm, cte->irq);
kvm               636 virt/kvm/arm/vgic/vgic-its.c void vgic_its_invalidate_cache(struct kvm *kvm)
kvm               638 virt/kvm/arm/vgic/vgic-its.c 	struct vgic_dist *dist = &kvm->arch.vgic;
kvm               652 virt/kvm/arm/vgic/vgic-its.c 		__vgic_put_lpi_locked(kvm, cte->irq);
kvm               659 virt/kvm/arm/vgic/vgic-its.c int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
kvm               672 virt/kvm/arm/vgic/vgic-its.c 	vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
kvm               679 virt/kvm/arm/vgic/vgic-its.c 	vgic_its_cache_translation(kvm, its, devid, eventid, ite->irq);
kvm               685 virt/kvm/arm/vgic/vgic-its.c struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi)
kvm               691 virt/kvm/arm/vgic/vgic-its.c 	if (!vgic_has_its(kvm))
kvm               699 virt/kvm/arm/vgic/vgic-its.c 	kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address);
kvm               720 virt/kvm/arm/vgic/vgic-its.c static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
kvm               727 virt/kvm/arm/vgic/vgic-its.c 	err = vgic_its_resolve_lpi(kvm, its, devid, eventid, &irq);
kvm               737 virt/kvm/arm/vgic/vgic-its.c 	vgic_queue_irq_unlock(kvm, irq, flags);
kvm               742 virt/kvm/arm/vgic/vgic-its.c int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi)
kvm               749 virt/kvm/arm/vgic/vgic-its.c 	irq = vgic_its_check_cache(kvm, db, msi->devid, msi->data);
kvm               756 virt/kvm/arm/vgic/vgic-its.c 	vgic_queue_irq_unlock(kvm, irq, flags);
kvm               767 virt/kvm/arm/vgic/vgic-its.c int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
kvm               772 virt/kvm/arm/vgic/vgic-its.c 	if (!vgic_its_inject_cached_translation(kvm, msi))
kvm               775 virt/kvm/arm/vgic/vgic-its.c 	its = vgic_msi_to_its(kvm, msi);
kvm               780 virt/kvm/arm/vgic/vgic-its.c 	ret = vgic_its_trigger_msi(kvm, its, msi->devid, msi->data);
kvm               798 virt/kvm/arm/vgic/vgic-its.c static void its_free_ite(struct kvm *kvm, struct its_ite *ite)
kvm               807 virt/kvm/arm/vgic/vgic-its.c 		vgic_put_irq(kvm, ite->irq);
kvm               832 virt/kvm/arm/vgic/vgic-its.c static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its,
kvm               847 virt/kvm/arm/vgic/vgic-its.c 		vgic_its_invalidate_cache(kvm);
kvm               849 virt/kvm/arm/vgic/vgic-its.c 		its_free_ite(kvm, ite);
kvm               860 virt/kvm/arm/vgic/vgic-its.c static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its,
kvm               882 virt/kvm/arm/vgic/vgic-its.c 	vcpu = kvm_get_vcpu(kvm, collection->target_addr);
kvm               884 virt/kvm/arm/vgic/vgic-its.c 	vgic_its_invalidate_cache(kvm);
kvm               942 virt/kvm/arm/vgic/vgic-its.c 	if (kvm_read_guest_lock(its->dev->kvm,
kvm               965 virt/kvm/arm/vgic/vgic-its.c 	idx = srcu_read_lock(&its->dev->kvm->srcu);
kvm               966 virt/kvm/arm/vgic/vgic-its.c 	ret = kvm_is_visible_gfn(its->dev->kvm, gfn);
kvm               967 virt/kvm/arm/vgic/vgic-its.c 	srcu_read_unlock(&its->dev->kvm->srcu, idx);
kvm              1039 virt/kvm/arm/vgic/vgic-its.c static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
kvm              1064 virt/kvm/arm/vgic/vgic-its.c 	    lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser))
kvm              1087 virt/kvm/arm/vgic/vgic-its.c 		vcpu = kvm_get_vcpu(kvm, collection->target_addr);
kvm              1089 virt/kvm/arm/vgic/vgic-its.c 	irq = vgic_add_lpi(kvm, lpi_nr, vcpu);
kvm              1093 virt/kvm/arm/vgic/vgic-its.c 		its_free_ite(kvm, ite);
kvm              1102 virt/kvm/arm/vgic/vgic-its.c static void vgic_its_free_device(struct kvm *kvm, struct its_device *device)
kvm              1112 virt/kvm/arm/vgic/vgic-its.c 		its_free_ite(kvm, ite);
kvm              1114 virt/kvm/arm/vgic/vgic-its.c 	vgic_its_invalidate_cache(kvm);
kvm              1121 virt/kvm/arm/vgic/vgic-its.c static void vgic_its_free_device_list(struct kvm *kvm, struct vgic_its *its)
kvm              1126 virt/kvm/arm/vgic/vgic-its.c 		vgic_its_free_device(kvm, cur);
kvm              1130 virt/kvm/arm/vgic/vgic-its.c static void vgic_its_free_collection_list(struct kvm *kvm, struct vgic_its *its)
kvm              1162 virt/kvm/arm/vgic/vgic-its.c static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
kvm              1185 virt/kvm/arm/vgic/vgic-its.c 		vgic_its_free_device(kvm, device);
kvm              1204 virt/kvm/arm/vgic/vgic-its.c static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its,
kvm              1216 virt/kvm/arm/vgic/vgic-its.c 	if (target_addr >= atomic_read(&kvm->online_vcpus))
kvm              1221 virt/kvm/arm/vgic/vgic-its.c 		vgic_its_invalidate_cache(kvm);
kvm              1235 virt/kvm/arm/vgic/vgic-its.c 			update_affinity_collection(kvm, its, collection);
kvm              1246 virt/kvm/arm/vgic/vgic-its.c static int vgic_its_cmd_handle_clear(struct kvm *kvm, struct vgic_its *its,
kvm              1271 virt/kvm/arm/vgic/vgic-its.c static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
kvm              1283 virt/kvm/arm/vgic/vgic-its.c 	return update_lpi_config(kvm, ite->irq, NULL, true);
kvm              1294 virt/kvm/arm/vgic/vgic-its.c static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
kvm              1308 virt/kvm/arm/vgic/vgic-its.c 	vcpu = kvm_get_vcpu(kvm, collection->target_addr);
kvm              1310 virt/kvm/arm/vgic/vgic-its.c 	irq_count = vgic_copy_lpi_list(kvm, vcpu, &intids);
kvm              1315 virt/kvm/arm/vgic/vgic-its.c 		irq = vgic_get_irq(kvm, NULL, intids[i]);
kvm              1318 virt/kvm/arm/vgic/vgic-its.c 		update_lpi_config(kvm, irq, vcpu, false);
kvm              1319 virt/kvm/arm/vgic/vgic-its.c 		vgic_put_irq(kvm, irq);
kvm              1338 virt/kvm/arm/vgic/vgic-its.c static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
kvm              1348 virt/kvm/arm/vgic/vgic-its.c 	if (target1_addr >= atomic_read(&kvm->online_vcpus) ||
kvm              1349 virt/kvm/arm/vgic/vgic-its.c 	    target2_addr >= atomic_read(&kvm->online_vcpus))
kvm              1355 virt/kvm/arm/vgic/vgic-its.c 	vcpu1 = kvm_get_vcpu(kvm, target1_addr);
kvm              1356 virt/kvm/arm/vgic/vgic-its.c 	vcpu2 = kvm_get_vcpu(kvm, target2_addr);
kvm              1358 virt/kvm/arm/vgic/vgic-its.c 	irq_count = vgic_copy_lpi_list(kvm, vcpu1, &intids);
kvm              1363 virt/kvm/arm/vgic/vgic-its.c 		irq = vgic_get_irq(kvm, NULL, intids[i]);
kvm              1367 virt/kvm/arm/vgic/vgic-its.c 		vgic_put_irq(kvm, irq);
kvm              1370 virt/kvm/arm/vgic/vgic-its.c 	vgic_its_invalidate_cache(kvm);
kvm              1380 virt/kvm/arm/vgic/vgic-its.c static int vgic_its_cmd_handle_int(struct kvm *kvm, struct vgic_its *its,
kvm              1386 virt/kvm/arm/vgic/vgic-its.c 	return vgic_its_trigger_msi(kvm, its, msi_devid, msi_data);
kvm              1393 virt/kvm/arm/vgic/vgic-its.c static int vgic_its_handle_command(struct kvm *kvm, struct vgic_its *its,
kvm              1401 virt/kvm/arm/vgic/vgic-its.c 		ret = vgic_its_cmd_handle_mapd(kvm, its, its_cmd);
kvm              1404 virt/kvm/arm/vgic/vgic-its.c 		ret = vgic_its_cmd_handle_mapc(kvm, its, its_cmd);
kvm              1407 virt/kvm/arm/vgic/vgic-its.c 		ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
kvm              1410 virt/kvm/arm/vgic/vgic-its.c 		ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
kvm              1413 virt/kvm/arm/vgic/vgic-its.c 		ret = vgic_its_cmd_handle_movi(kvm, its, its_cmd);
kvm              1416 virt/kvm/arm/vgic/vgic-its.c 		ret = vgic_its_cmd_handle_discard(kvm, its, its_cmd);
kvm              1419 virt/kvm/arm/vgic/vgic-its.c 		ret = vgic_its_cmd_handle_clear(kvm, its, its_cmd);
kvm              1422 virt/kvm/arm/vgic/vgic-its.c 		ret = vgic_its_cmd_handle_movall(kvm, its, its_cmd);
kvm              1425 virt/kvm/arm/vgic/vgic-its.c 		ret = vgic_its_cmd_handle_int(kvm, its, its_cmd);
kvm              1428 virt/kvm/arm/vgic/vgic-its.c 		ret = vgic_its_cmd_handle_inv(kvm, its, its_cmd);
kvm              1431 virt/kvm/arm/vgic/vgic-its.c 		ret = vgic_its_cmd_handle_invall(kvm, its, its_cmd);
kvm              1479 virt/kvm/arm/vgic/vgic-its.c static unsigned long vgic_mmio_read_its_cbaser(struct kvm *kvm,
kvm              1486 virt/kvm/arm/vgic/vgic-its.c static void vgic_mmio_write_its_cbaser(struct kvm *kvm, struct vgic_its *its,
kvm              1511 virt/kvm/arm/vgic/vgic-its.c static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
kvm              1523 virt/kvm/arm/vgic/vgic-its.c 		int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr,
kvm              1533 virt/kvm/arm/vgic/vgic-its.c 			vgic_its_handle_command(kvm, its, cmd_buf);
kvm              1547 virt/kvm/arm/vgic/vgic-its.c static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
kvm              1566 virt/kvm/arm/vgic/vgic-its.c 	vgic_its_process_commands(kvm, its);
kvm              1571 virt/kvm/arm/vgic/vgic-its.c static unsigned long vgic_mmio_read_its_cwriter(struct kvm *kvm,
kvm              1578 virt/kvm/arm/vgic/vgic-its.c static unsigned long vgic_mmio_read_its_creadr(struct kvm *kvm,
kvm              1585 virt/kvm/arm/vgic/vgic-its.c static int vgic_mmio_uaccess_write_its_creadr(struct kvm *kvm,
kvm              1613 virt/kvm/arm/vgic/vgic-its.c static unsigned long vgic_mmio_read_its_baser(struct kvm *kvm,
kvm              1635 virt/kvm/arm/vgic/vgic-its.c static void vgic_mmio_write_its_baser(struct kvm *kvm,
kvm              1679 virt/kvm/arm/vgic/vgic-its.c 			vgic_its_free_device_list(kvm, its);
kvm              1682 virt/kvm/arm/vgic/vgic-its.c 			vgic_its_free_collection_list(kvm, its);
kvm              1689 virt/kvm/arm/vgic/vgic-its.c static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
kvm              1705 virt/kvm/arm/vgic/vgic-its.c static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
kvm              1723 virt/kvm/arm/vgic/vgic-its.c 		vgic_its_invalidate_cache(kvm);
kvm              1729 virt/kvm/arm/vgic/vgic-its.c 	vgic_its_process_commands(kvm, its);
kvm              1754 virt/kvm/arm/vgic/vgic-its.c static void its_mmio_write_wi(struct kvm *kvm, struct vgic_its *its,
kvm              1796 virt/kvm/arm/vgic/vgic-its.c static int vgic_register_its_iodev(struct kvm *kvm, struct vgic_its *its,
kvm              1802 virt/kvm/arm/vgic/vgic-its.c 	mutex_lock(&kvm->slots_lock);
kvm              1816 virt/kvm/arm/vgic/vgic-its.c 	ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, iodev->base_addr,
kvm              1819 virt/kvm/arm/vgic/vgic-its.c 	mutex_unlock(&kvm->slots_lock);
kvm              1827 virt/kvm/arm/vgic/vgic-its.c void vgic_lpi_translation_cache_init(struct kvm *kvm)
kvm              1829 virt/kvm/arm/vgic/vgic-its.c 	struct vgic_dist *dist = &kvm->arch.vgic;
kvm              1836 virt/kvm/arm/vgic/vgic-its.c 	sz = atomic_read(&kvm->online_vcpus) * LPI_DEFAULT_PCPU_CACHE_SIZE;
kvm              1851 virt/kvm/arm/vgic/vgic-its.c void vgic_lpi_translation_cache_destroy(struct kvm *kvm)
kvm              1853 virt/kvm/arm/vgic/vgic-its.c 	struct vgic_dist *dist = &kvm->arch.vgic;
kvm              1856 virt/kvm/arm/vgic/vgic-its.c 	vgic_its_invalidate_cache(kvm);
kvm              1887 virt/kvm/arm/vgic/vgic-its.c 	if (vgic_initialized(dev->kvm)) {
kvm              1888 virt/kvm/arm/vgic/vgic-its.c 		int ret = vgic_v4_init(dev->kvm);
kvm              1894 virt/kvm/arm/vgic/vgic-its.c 		vgic_lpi_translation_cache_init(dev->kvm);
kvm              1905 virt/kvm/arm/vgic/vgic-its.c 	dev->kvm->arch.vgic.msis_require_devid = true;
kvm              1906 virt/kvm/arm/vgic/vgic-its.c 	dev->kvm->arch.vgic.has_its = true;
kvm              1914 virt/kvm/arm/vgic/vgic-its.c 	dev->kvm->arch.vgic.propbaser = INITIAL_PROPBASER_VALUE;
kvm              1923 virt/kvm/arm/vgic/vgic-its.c 	struct kvm *kvm = kvm_dev->kvm;
kvm              1928 virt/kvm/arm/vgic/vgic-its.c 	vgic_its_free_device_list(kvm, its);
kvm              1929 virt/kvm/arm/vgic/vgic-its.c 	vgic_its_free_collection_list(kvm, its);
kvm              1985 virt/kvm/arm/vgic/vgic-its.c 	mutex_lock(&dev->kvm->lock);
kvm              2000 virt/kvm/arm/vgic/vgic-its.c 	if (!lock_all_vcpus(dev->kvm)) {
kvm              2011 virt/kvm/arm/vgic/vgic-its.c 			ret = region->uaccess_its_write(dev->kvm, its, addr,
kvm              2014 virt/kvm/arm/vgic/vgic-its.c 			region->its_write(dev->kvm, its, addr, len, *reg);
kvm              2016 virt/kvm/arm/vgic/vgic-its.c 		*reg = region->its_read(dev->kvm, its, addr, len);
kvm              2018 virt/kvm/arm/vgic/vgic-its.c 	unlock_all_vcpus(dev->kvm);
kvm              2020 virt/kvm/arm/vgic/vgic-its.c 	mutex_unlock(&dev->kvm->lock);
kvm              2082 virt/kvm/arm/vgic/vgic-its.c 	struct kvm *kvm = its->dev->kvm;
kvm              2095 virt/kvm/arm/vgic/vgic-its.c 		ret = kvm_read_guest_lock(kvm, gpa, entry, esz);
kvm              2117 virt/kvm/arm/vgic/vgic-its.c 	struct kvm *kvm = its->dev->kvm;
kvm              2126 virt/kvm/arm/vgic/vgic-its.c 	return kvm_write_guest_lock(kvm, gpa, &val, ite_esz);
kvm              2140 virt/kvm/arm/vgic/vgic-its.c 	struct kvm *kvm = its->dev->kvm;
kvm              2175 virt/kvm/arm/vgic/vgic-its.c 		vcpu = kvm_get_vcpu(kvm, collection->target_addr);
kvm              2177 virt/kvm/arm/vgic/vgic-its.c 	irq = vgic_add_lpi(kvm, lpi_id, vcpu);
kvm              2262 virt/kvm/arm/vgic/vgic-its.c 	struct kvm *kvm = its->dev->kvm;
kvm              2273 virt/kvm/arm/vgic/vgic-its.c 	return kvm_write_guest_lock(kvm, ptr, &val, dte_esz);
kvm              2317 virt/kvm/arm/vgic/vgic-its.c 		vgic_its_free_device(its->dev->kvm, dev);
kvm              2453 virt/kvm/arm/vgic/vgic-its.c 	return kvm_write_guest_lock(its->dev->kvm, gpa, &val, esz);
kvm              2459 virt/kvm/arm/vgic/vgic-its.c 	struct kvm *kvm = its->dev->kvm;
kvm              2465 virt/kvm/arm/vgic/vgic-its.c 	ret = kvm_read_guest_lock(kvm, gpa, &val, esz);
kvm              2476 virt/kvm/arm/vgic/vgic-its.c 	    target_addr >= atomic_read(&kvm->online_vcpus))
kvm              2525 virt/kvm/arm/vgic/vgic-its.c 	ret = kvm_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz);
kvm              2611 virt/kvm/arm/vgic/vgic-its.c static void vgic_its_reset(struct kvm *kvm, struct vgic_its *its)
kvm              2620 virt/kvm/arm/vgic/vgic-its.c 	vgic_its_free_device_list(kvm, its);
kvm              2621 virt/kvm/arm/vgic/vgic-its.c 	vgic_its_free_collection_list(kvm, its);
kvm              2652 virt/kvm/arm/vgic/vgic-its.c static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
kvm              2660 virt/kvm/arm/vgic/vgic-its.c 	mutex_lock(&kvm->lock);
kvm              2663 virt/kvm/arm/vgic/vgic-its.c 	if (!lock_all_vcpus(kvm)) {
kvm              2665 virt/kvm/arm/vgic/vgic-its.c 		mutex_unlock(&kvm->lock);
kvm              2671 virt/kvm/arm/vgic/vgic-its.c 		vgic_its_reset(kvm, its);
kvm              2681 virt/kvm/arm/vgic/vgic-its.c 	unlock_all_vcpus(kvm);
kvm              2683 virt/kvm/arm/vgic/vgic-its.c 	mutex_unlock(&kvm->lock);
kvm              2705 virt/kvm/arm/vgic/vgic-its.c 		ret = vgic_check_ioaddr(dev->kvm, &its->vgic_its_base,
kvm              2710 virt/kvm/arm/vgic/vgic-its.c 		return vgic_register_its_iodev(dev->kvm, its, addr);
kvm              2713 virt/kvm/arm/vgic/vgic-its.c 		return vgic_its_ctrl(dev->kvm, its, attr->attr);
kvm                17 virt/kvm/arm/vgic/vgic-kvm-device.c int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr,
kvm                20 virt/kvm/arm/vgic/vgic-kvm-device.c 	if (addr & ~kvm_phys_mask(kvm))
kvm                32 virt/kvm/arm/vgic/vgic-kvm-device.c static int vgic_check_type(struct kvm *kvm, int type_needed)
kvm                34 virt/kvm/arm/vgic/vgic-kvm-device.c 	if (kvm->arch.vgic.vgic_model != type_needed)
kvm                56 virt/kvm/arm/vgic/vgic-kvm-device.c int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
kvm                59 virt/kvm/arm/vgic/vgic-kvm-device.c 	struct vgic_dist *vgic = &kvm->arch.vgic;
kvm                63 virt/kvm/arm/vgic/vgic-kvm-device.c 	mutex_lock(&kvm->lock);
kvm                66 virt/kvm/arm/vgic/vgic-kvm-device.c 		r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
kvm                71 virt/kvm/arm/vgic/vgic-kvm-device.c 		r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
kvm                76 virt/kvm/arm/vgic/vgic-kvm-device.c 		r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
kvm                83 virt/kvm/arm/vgic/vgic-kvm-device.c 		r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
kvm                87 virt/kvm/arm/vgic/vgic-kvm-device.c 			r = vgic_v3_set_redist_base(kvm, 0, *addr, 0);
kvm               103 virt/kvm/arm/vgic/vgic-kvm-device.c 		r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
kvm               119 virt/kvm/arm/vgic/vgic-kvm-device.c 				r = vgic_v3_set_redist_base(kvm, index,
kvm               124 virt/kvm/arm/vgic/vgic-kvm-device.c 		rdreg = vgic_v3_rdist_region_from_index(kvm, index);
kvm               143 virt/kvm/arm/vgic/vgic-kvm-device.c 		r = vgic_check_ioaddr(kvm, addr_ptr, *addr, alignment);
kvm               151 virt/kvm/arm/vgic/vgic-kvm-device.c 	mutex_unlock(&kvm->lock);
kvm               169 virt/kvm/arm/vgic/vgic-kvm-device.c 		r = kvm_vgic_addr(dev->kvm, type, &addr, true);
kvm               191 virt/kvm/arm/vgic/vgic-kvm-device.c 		mutex_lock(&dev->kvm->lock);
kvm               193 virt/kvm/arm/vgic/vgic-kvm-device.c 		if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis)
kvm               196 virt/kvm/arm/vgic/vgic-kvm-device.c 			dev->kvm->arch.vgic.nr_spis =
kvm               199 virt/kvm/arm/vgic/vgic-kvm-device.c 		mutex_unlock(&dev->kvm->lock);
kvm               206 virt/kvm/arm/vgic/vgic-kvm-device.c 			mutex_lock(&dev->kvm->lock);
kvm               207 virt/kvm/arm/vgic/vgic-kvm-device.c 			r = vgic_init(dev->kvm);
kvm               208 virt/kvm/arm/vgic/vgic-kvm-device.c 			mutex_unlock(&dev->kvm->lock);
kvm               229 virt/kvm/arm/vgic/vgic-kvm-device.c 		r = kvm_vgic_addr(dev->kvm, type, &addr, false);
kvm               240 virt/kvm/arm/vgic/vgic-kvm-device.c 		r = put_user(dev->kvm->arch.vgic.nr_spis +
kvm               251 virt/kvm/arm/vgic/vgic-kvm-device.c 	return kvm_vgic_create(dev->kvm, type);
kvm               289 virt/kvm/arm/vgic/vgic-kvm-device.c 	if (cpuid >= atomic_read(&dev->kvm->online_vcpus))
kvm               292 virt/kvm/arm/vgic/vgic-kvm-device.c 	reg_attr->vcpu = kvm_get_vcpu(dev->kvm, cpuid);
kvm               299 virt/kvm/arm/vgic/vgic-kvm-device.c static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
kvm               304 virt/kvm/arm/vgic/vgic-kvm-device.c 		tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
kvm               309 virt/kvm/arm/vgic/vgic-kvm-device.c void unlock_all_vcpus(struct kvm *kvm)
kvm               311 virt/kvm/arm/vgic/vgic-kvm-device.c 	unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
kvm               315 virt/kvm/arm/vgic/vgic-kvm-device.c bool lock_all_vcpus(struct kvm *kvm)
kvm               326 virt/kvm/arm/vgic/vgic-kvm-device.c 	kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
kvm               328 virt/kvm/arm/vgic/vgic-kvm-device.c 			unlock_vcpus(kvm, c - 1);
kvm               360 virt/kvm/arm/vgic/vgic-kvm-device.c 	mutex_lock(&dev->kvm->lock);
kvm               362 virt/kvm/arm/vgic/vgic-kvm-device.c 	ret = vgic_init(dev->kvm);
kvm               366 virt/kvm/arm/vgic/vgic-kvm-device.c 	if (!lock_all_vcpus(dev->kvm)) {
kvm               383 virt/kvm/arm/vgic/vgic-kvm-device.c 	unlock_all_vcpus(dev->kvm);
kvm               385 virt/kvm/arm/vgic/vgic-kvm-device.c 	mutex_unlock(&dev->kvm->lock);
kvm               487 virt/kvm/arm/vgic/vgic-kvm-device.c 		reg_attr->vcpu = kvm_mpidr_to_vcpu(dev->kvm, mpidr_reg);
kvm               489 virt/kvm/arm/vgic/vgic-kvm-device.c 		reg_attr->vcpu = kvm_get_vcpu(dev->kvm, 0);
kvm               525 virt/kvm/arm/vgic/vgic-kvm-device.c 	mutex_lock(&dev->kvm->lock);
kvm               527 virt/kvm/arm/vgic/vgic-kvm-device.c 	if (unlikely(!vgic_initialized(dev->kvm))) {
kvm               532 virt/kvm/arm/vgic/vgic-kvm-device.c 	if (!lock_all_vcpus(dev->kvm)) {
kvm               582 virt/kvm/arm/vgic/vgic-kvm-device.c 	unlock_all_vcpus(dev->kvm);
kvm               584 virt/kvm/arm/vgic/vgic-kvm-device.c 	mutex_unlock(&dev->kvm->lock);
kvm               635 virt/kvm/arm/vgic/vgic-kvm-device.c 			mutex_lock(&dev->kvm->lock);
kvm               637 virt/kvm/arm/vgic/vgic-kvm-device.c 			if (!lock_all_vcpus(dev->kvm)) {
kvm               638 virt/kvm/arm/vgic/vgic-kvm-device.c 				mutex_unlock(&dev->kvm->lock);
kvm               641 virt/kvm/arm/vgic/vgic-kvm-device.c 			ret = vgic_v3_save_pending_tables(dev->kvm);
kvm               642 virt/kvm/arm/vgic/vgic-kvm-device.c 			unlock_all_vcpus(dev->kvm);
kvm               643 virt/kvm/arm/vgic/vgic-kvm-device.c 			mutex_unlock(&dev->kvm->lock);
kvm                28 virt/kvm/arm/vgic/vgic-mmio-v2.c 	struct vgic_dist *vgic = &vcpu->kvm->arch.vgic;
kvm                38 virt/kvm/arm/vgic/vgic-mmio-v2.c 		value |= (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
kvm                56 virt/kvm/arm/vgic/vgic-mmio-v2.c 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
kvm                63 virt/kvm/arm/vgic/vgic-mmio-v2.c 			vgic_kick_vcpus(vcpu->kvm);
kvm                90 virt/kvm/arm/vgic/vgic-mmio-v2.c 		vcpu->kvm->arch.vgic.v2_groups_user_writable = true;
kvm               102 virt/kvm/arm/vgic/vgic-mmio-v2.c 	if (vcpu->kvm->arch.vgic.v2_groups_user_writable)
kvm               112 virt/kvm/arm/vgic/vgic-mmio-v2.c 	int nr_vcpus = atomic_read(&source_vcpu->kvm->online_vcpus);
kvm               134 virt/kvm/arm/vgic/vgic-mmio-v2.c 	kvm_for_each_vcpu(c, vcpu, source_vcpu->kvm) {
kvm               140 virt/kvm/arm/vgic/vgic-mmio-v2.c 		irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid);
kvm               146 virt/kvm/arm/vgic/vgic-mmio-v2.c 		vgic_queue_irq_unlock(source_vcpu->kvm, irq, flags);
kvm               147 virt/kvm/arm/vgic/vgic-mmio-v2.c 		vgic_put_irq(source_vcpu->kvm, irq);
kvm               159 virt/kvm/arm/vgic/vgic-mmio-v2.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
kvm               163 virt/kvm/arm/vgic/vgic-mmio-v2.c 		vgic_put_irq(vcpu->kvm, irq);
kvm               174 virt/kvm/arm/vgic/vgic-mmio-v2.c 	u8 cpu_mask = GENMASK(atomic_read(&vcpu->kvm->online_vcpus) - 1, 0);
kvm               183 virt/kvm/arm/vgic/vgic-mmio-v2.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i);
kvm               190 virt/kvm/arm/vgic/vgic-mmio-v2.c 		irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);
kvm               193 virt/kvm/arm/vgic/vgic-mmio-v2.c 		vgic_put_irq(vcpu->kvm, irq);
kvm               205 virt/kvm/arm/vgic/vgic-mmio-v2.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
kvm               209 virt/kvm/arm/vgic/vgic-mmio-v2.c 		vgic_put_irq(vcpu->kvm, irq);
kvm               223 virt/kvm/arm/vgic/vgic-mmio-v2.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
kvm               232 virt/kvm/arm/vgic/vgic-mmio-v2.c 		vgic_put_irq(vcpu->kvm, irq);
kvm               245 virt/kvm/arm/vgic/vgic-mmio-v2.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
kvm               253 virt/kvm/arm/vgic/vgic-mmio-v2.c 			vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
kvm               257 virt/kvm/arm/vgic/vgic-mmio-v2.c 		vgic_put_irq(vcpu->kvm, irq);
kvm                39 virt/kvm/arm/vgic/vgic-mmio-v3.c bool vgic_has_its(struct kvm *kvm)
kvm                41 virt/kvm/arm/vgic/vgic-mmio-v3.c 	struct vgic_dist *dist = &kvm->arch.vgic;
kvm                49 virt/kvm/arm/vgic/vgic-mmio-v3.c bool vgic_supports_direct_msis(struct kvm *kvm)
kvm                51 virt/kvm/arm/vgic/vgic-mmio-v3.c 	return kvm_vgic_global_state.has_gicv4 && vgic_has_its(kvm);
kvm                64 virt/kvm/arm/vgic/vgic-mmio-v3.c 	struct vgic_dist *vgic = &vcpu->kvm->arch.vgic;
kvm                76 virt/kvm/arm/vgic/vgic-mmio-v3.c 		if (vgic_has_its(vcpu->kvm)) {
kvm                99 virt/kvm/arm/vgic/vgic-mmio-v3.c 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
kvm               107 virt/kvm/arm/vgic/vgic-mmio-v3.c 			vgic_kick_vcpus(vcpu->kvm);
kvm               133 virt/kvm/arm/vgic/vgic-mmio-v3.c 	struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid);
kvm               143 virt/kvm/arm/vgic/vgic-mmio-v3.c 	vgic_put_irq(vcpu->kvm, irq);
kvm               159 virt/kvm/arm/vgic/vgic-mmio-v3.c 	irq = vgic_get_irq(vcpu->kvm, NULL, intid);
kvm               168 virt/kvm/arm/vgic/vgic-mmio-v3.c 	irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr);
kvm               171 virt/kvm/arm/vgic/vgic-mmio-v3.c 	vgic_put_irq(vcpu->kvm, irq);
kvm               190 virt/kvm/arm/vgic/vgic-mmio-v3.c 	if (!vgic_has_its(vcpu->kvm))
kvm               197 virt/kvm/arm/vgic/vgic-mmio-v3.c 		vgic_its_invalidate_cache(vcpu->kvm);
kvm               220 virt/kvm/arm/vgic/vgic-mmio-v3.c 	if (vgic_has_its(vcpu->kvm))
kvm               259 virt/kvm/arm/vgic/vgic-mmio-v3.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
kvm               264 virt/kvm/arm/vgic/vgic-mmio-v3.c 		vgic_put_irq(vcpu->kvm, irq);
kvm               279 virt/kvm/arm/vgic/vgic-mmio-v3.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
kvm               289 virt/kvm/arm/vgic/vgic-mmio-v3.c 			vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
kvm               295 virt/kvm/arm/vgic/vgic-mmio-v3.c 		vgic_put_irq(vcpu->kvm, irq);
kvm               387 virt/kvm/arm/vgic/vgic-mmio-v3.c 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
kvm               396 virt/kvm/arm/vgic/vgic-mmio-v3.c 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
kvm               607 virt/kvm/arm/vgic/vgic-mmio-v3.c 	struct kvm *kvm = vcpu->kvm;
kvm               608 virt/kvm/arm/vgic/vgic-mmio-v3.c 	struct vgic_dist *vgic = &kvm->arch.vgic;
kvm               628 virt/kvm/arm/vgic/vgic-mmio-v3.c 	if (!vgic_v3_check_base(kvm))
kvm               642 virt/kvm/arm/vgic/vgic-mmio-v3.c 	mutex_lock(&kvm->slots_lock);
kvm               643 virt/kvm/arm/vgic/vgic-mmio-v3.c 	ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, rd_base,
kvm               645 virt/kvm/arm/vgic/vgic-mmio-v3.c 	mutex_unlock(&kvm->slots_lock);
kvm               658 virt/kvm/arm/vgic/vgic-mmio-v3.c 	kvm_io_bus_unregister_dev(vcpu->kvm, KVM_MMIO_BUS, &rd_dev->dev);
kvm               661 virt/kvm/arm/vgic/vgic-mmio-v3.c static int vgic_register_all_redist_iodevs(struct kvm *kvm)
kvm               666 virt/kvm/arm/vgic/vgic-mmio-v3.c 	kvm_for_each_vcpu(c, vcpu, kvm) {
kvm               674 virt/kvm/arm/vgic/vgic-mmio-v3.c 		mutex_lock(&kvm->slots_lock);
kvm               676 virt/kvm/arm/vgic/vgic-mmio-v3.c 			vcpu = kvm_get_vcpu(kvm, c);
kvm               679 virt/kvm/arm/vgic/vgic-mmio-v3.c 		mutex_unlock(&kvm->slots_lock);
kvm               700 virt/kvm/arm/vgic/vgic-mmio-v3.c static int vgic_v3_insert_redist_region(struct kvm *kvm, uint32_t index,
kvm               703 virt/kvm/arm/vgic/vgic-mmio-v3.c 	struct vgic_dist *d = &kvm->arch.vgic;
kvm               737 virt/kvm/arm/vgic/vgic-mmio-v3.c 		vgic_dist_overlap(kvm, base, size))
kvm               741 virt/kvm/arm/vgic/vgic-mmio-v3.c 	if (vgic_v3_rdist_overlap(kvm, base, size))
kvm               750 virt/kvm/arm/vgic/vgic-mmio-v3.c 	ret = vgic_check_ioaddr(kvm, &rdreg->base, base, SZ_64K);
kvm               766 virt/kvm/arm/vgic/vgic-mmio-v3.c int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
kvm               770 virt/kvm/arm/vgic/vgic-mmio-v3.c 	ret = vgic_v3_insert_redist_region(kvm, index, addr, count);
kvm               778 virt/kvm/arm/vgic/vgic-mmio-v3.c 	ret = vgic_register_all_redist_iodevs(kvm);
kvm               888 virt/kvm/arm/vgic/vgic-mmio-v3.c 	struct kvm *kvm = vcpu->kvm;
kvm               910 virt/kvm/arm/vgic/vgic-mmio-v3.c 	kvm_for_each_vcpu(c, c_vcpu, kvm) {
kvm               932 virt/kvm/arm/vgic/vgic-mmio-v3.c 		irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi);
kvm               943 virt/kvm/arm/vgic/vgic-mmio-v3.c 			vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
kvm               948 virt/kvm/arm/vgic/vgic-mmio-v3.c 		vgic_put_irq(vcpu->kvm, irq);
kvm                51 virt/kvm/arm/vgic/vgic-mmio.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
kvm                56 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_put_irq(vcpu->kvm, irq);
kvm                70 virt/kvm/arm/vgic/vgic-mmio.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
kvm                74 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
kvm                76 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_put_irq(vcpu->kvm, irq);
kvm                93 virt/kvm/arm/vgic/vgic-mmio.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
kvm                98 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_put_irq(vcpu->kvm, irq);
kvm               113 virt/kvm/arm/vgic/vgic-mmio.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
kvm               133 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
kvm               135 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_put_irq(vcpu->kvm, irq);
kvm               148 virt/kvm/arm/vgic/vgic-mmio.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
kvm               155 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_put_irq(vcpu->kvm, irq);
kvm               168 virt/kvm/arm/vgic/vgic-mmio.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
kvm               176 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_put_irq(vcpu->kvm, irq);
kvm               217 virt/kvm/arm/vgic/vgic-mmio.c 		vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2);
kvm               230 virt/kvm/arm/vgic/vgic-mmio.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
kvm               234 virt/kvm/arm/vgic/vgic-mmio.c 			vgic_put_irq(vcpu->kvm, irq);
kvm               243 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
kvm               244 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_put_irq(vcpu->kvm, irq);
kvm               283 virt/kvm/arm/vgic/vgic-mmio.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
kvm               287 virt/kvm/arm/vgic/vgic-mmio.c 			vgic_put_irq(vcpu->kvm, irq);
kvm               299 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_put_irq(vcpu->kvm, irq);
kvm               321 virt/kvm/arm/vgic/vgic-mmio.c 	if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
kvm               323 virt/kvm/arm/vgic/vgic-mmio.c 		kvm_arm_halt_guest(vcpu->kvm);
kvm               329 virt/kvm/arm/vgic/vgic-mmio.c 	if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
kvm               331 virt/kvm/arm/vgic/vgic-mmio.c 		kvm_arm_resume_guest(vcpu->kvm);
kvm               343 virt/kvm/arm/vgic/vgic-mmio.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
kvm               352 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_put_irq(vcpu->kvm, irq);
kvm               364 virt/kvm/arm/vgic/vgic-mmio.c 	mutex_lock(&vcpu->kvm->lock);
kvm               370 virt/kvm/arm/vgic/vgic-mmio.c 	mutex_unlock(&vcpu->kvm->lock);
kvm               403 virt/kvm/arm/vgic/vgic-mmio.c 		u32 model = vcpu->kvm->arch.vgic.vgic_model;
kvm               427 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
kvm               440 virt/kvm/arm/vgic/vgic-mmio.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
kvm               442 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_put_irq(vcpu->kvm, irq);
kvm               452 virt/kvm/arm/vgic/vgic-mmio.c 	mutex_lock(&vcpu->kvm->lock);
kvm               458 virt/kvm/arm/vgic/vgic-mmio.c 	mutex_unlock(&vcpu->kvm->lock);
kvm               477 virt/kvm/arm/vgic/vgic-mmio.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
kvm               479 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_put_irq(vcpu->kvm, irq);
kvm               489 virt/kvm/arm/vgic/vgic-mmio.c 	mutex_lock(&vcpu->kvm->lock);
kvm               495 virt/kvm/arm/vgic/vgic-mmio.c 	mutex_unlock(&vcpu->kvm->lock);
kvm               514 virt/kvm/arm/vgic/vgic-mmio.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
kvm               518 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_put_irq(vcpu->kvm, irq);
kvm               540 virt/kvm/arm/vgic/vgic-mmio.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
kvm               547 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_put_irq(vcpu->kvm, irq);
kvm               559 virt/kvm/arm/vgic/vgic-mmio.c 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
kvm               564 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_put_irq(vcpu->kvm, irq);
kvm               590 virt/kvm/arm/vgic/vgic-mmio.c 		irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
kvm               599 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_put_irq(vcpu->kvm, irq);
kvm               607 virt/kvm/arm/vgic/vgic-mmio.c 	int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
kvm               615 virt/kvm/arm/vgic/vgic-mmio.c 		irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
kvm               619 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_put_irq(vcpu->kvm, irq);
kvm               629 virt/kvm/arm/vgic/vgic-mmio.c 	int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
kvm               639 virt/kvm/arm/vgic/vgic-mmio.c 		irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
kvm               650 virt/kvm/arm/vgic/vgic-mmio.c 			vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
kvm               654 virt/kvm/arm/vgic/vgic-mmio.c 		vgic_put_irq(vcpu->kvm, irq);
kvm               755 virt/kvm/arm/vgic/vgic-mmio.c static bool check_region(const struct kvm *kvm,
kvm               759 virt/kvm/arm/vgic/vgic-mmio.c 	int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
kvm               794 virt/kvm/arm/vgic/vgic-mmio.c 	if (!region || !check_region(vcpu->kvm, region, addr, len))
kvm               877 virt/kvm/arm/vgic/vgic-mmio.c 		data = region->its_read(vcpu->kvm, iodev->its, addr, len);
kvm               907 virt/kvm/arm/vgic/vgic-mmio.c 		region->its_write(vcpu->kvm, iodev->its, addr, len, data);
kvm               919 virt/kvm/arm/vgic/vgic-mmio.c int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
kvm               922 virt/kvm/arm/vgic/vgic-mmio.c 	struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
kvm               941 virt/kvm/arm/vgic/vgic-mmio.c 	mutex_lock(&kvm->slots_lock);
kvm               942 virt/kvm/arm/vgic/vgic-mmio.c 	ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
kvm               944 virt/kvm/arm/vgic/vgic-mmio.c 	mutex_unlock(&kvm->slots_lock);
kvm                16 virt/kvm/arm/vgic/vgic-mmio.h 		unsigned long (*its_read)(struct kvm *kvm, struct vgic_its *its,
kvm                22 virt/kvm/arm/vgic/vgic-mmio.h 		void (*its_write)(struct kvm *kvm, struct vgic_its *its,
kvm                31 virt/kvm/arm/vgic/vgic-mmio.h 		int (*uaccess_its_write)(struct kvm *kvm, struct vgic_its *its,
kvm               101 virt/kvm/arm/vgic/vgic-mmio.h int kvm_vgic_register_mmio_region(struct kvm *kvm, struct kvm_vcpu *vcpu,
kvm                70 virt/kvm/arm/vgic/vgic-v2.c 		if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
kvm                71 virt/kvm/arm/vgic/vgic-v2.c 			kvm_notify_acked_irq(vcpu->kvm, 0,
kvm                74 virt/kvm/arm/vgic/vgic-v2.c 		irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
kvm               120 virt/kvm/arm/vgic/vgic-v2.c 		vgic_put_irq(vcpu->kvm, irq);
kvm               304 virt/kvm/arm/vgic/vgic-v2.c int vgic_v2_map_resources(struct kvm *kvm)
kvm               306 virt/kvm/arm/vgic/vgic-v2.c 	struct vgic_dist *dist = &kvm->arch.vgic;
kvm               309 virt/kvm/arm/vgic/vgic-v2.c 	if (vgic_ready(kvm))
kvm               329 virt/kvm/arm/vgic/vgic-v2.c 	ret = vgic_init(kvm);
kvm               335 virt/kvm/arm/vgic/vgic-v2.c 	ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2);
kvm               342 virt/kvm/arm/vgic/vgic-v2.c 		ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
kvm                35 virt/kvm/arm/vgic/vgic-v3.c 	u32 model = vcpu->kvm->arch.vgic.vgic_model;
kvm                59 virt/kvm/arm/vgic/vgic-v3.c 		if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
kvm                60 virt/kvm/arm/vgic/vgic-v3.c 			kvm_notify_acked_irq(vcpu->kvm, 0,
kvm                63 virt/kvm/arm/vgic/vgic-v3.c 		irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
kvm               111 virt/kvm/arm/vgic/vgic-v3.c 		vgic_put_irq(vcpu->kvm, irq);
kvm               120 virt/kvm/arm/vgic/vgic-v3.c 	u32 model = vcpu->kvm->arch.vgic.vgic_model;
kvm               208 virt/kvm/arm/vgic/vgic-v3.c 	u32 model = vcpu->kvm->arch.vgic.vgic_model;
kvm               238 virt/kvm/arm/vgic/vgic-v3.c 	u32 model = vcpu->kvm->arch.vgic.vgic_model;
kvm               288 virt/kvm/arm/vgic/vgic-v3.c 	if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
kvm               314 virt/kvm/arm/vgic/vgic-v3.c int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq)
kvm               335 virt/kvm/arm/vgic/vgic-v3.c 	ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
kvm               347 virt/kvm/arm/vgic/vgic-v3.c 	vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
kvm               352 virt/kvm/arm/vgic/vgic-v3.c 		ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
kvm               363 virt/kvm/arm/vgic/vgic-v3.c int vgic_v3_save_pending_tables(struct kvm *kvm)
kvm               365 virt/kvm/arm/vgic/vgic-v3.c 	struct vgic_dist *dist = &kvm->arch.vgic;
kvm               388 virt/kvm/arm/vgic/vgic-v3.c 			ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
kvm               403 virt/kvm/arm/vgic/vgic-v3.c 		ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
kvm               420 virt/kvm/arm/vgic/vgic-v3.c bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size)
kvm               422 virt/kvm/arm/vgic/vgic-v3.c 	struct vgic_dist *d = &kvm->arch.vgic;
kvm               427 virt/kvm/arm/vgic/vgic-v3.c 			(base < rdreg->base + vgic_v3_rd_region_size(kvm, rdreg)))
kvm               437 virt/kvm/arm/vgic/vgic-v3.c bool vgic_v3_check_base(struct kvm *kvm)
kvm               439 virt/kvm/arm/vgic/vgic-v3.c 	struct vgic_dist *d = &kvm->arch.vgic;
kvm               447 virt/kvm/arm/vgic/vgic-v3.c 		if (rdreg->base + vgic_v3_rd_region_size(kvm, rdreg) <
kvm               455 virt/kvm/arm/vgic/vgic-v3.c 	return !vgic_v3_rdist_overlap(kvm, d->vgic_dist_base,
kvm               482 virt/kvm/arm/vgic/vgic-v3.c struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm,
kvm               485 virt/kvm/arm/vgic/vgic-v3.c 	struct list_head *rd_regions = &kvm->arch.vgic.rd_regions;
kvm               496 virt/kvm/arm/vgic/vgic-v3.c int vgic_v3_map_resources(struct kvm *kvm)
kvm               498 virt/kvm/arm/vgic/vgic-v3.c 	struct vgic_dist *dist = &kvm->arch.vgic;
kvm               503 virt/kvm/arm/vgic/vgic-v3.c 	if (vgic_ready(kvm))
kvm               506 virt/kvm/arm/vgic/vgic-v3.c 	kvm_for_each_vcpu(c, vcpu, kvm) {
kvm               522 virt/kvm/arm/vgic/vgic-v3.c 	if (!vgic_v3_check_base(kvm)) {
kvm               532 virt/kvm/arm/vgic/vgic-v3.c 	if (!vgic_initialized(kvm)) {
kvm               537 virt/kvm/arm/vgic/vgic-v3.c 	ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3);
kvm               104 virt/kvm/arm/vgic/vgic-v4.c int vgic_v4_init(struct kvm *kvm)
kvm               106 virt/kvm/arm/vgic/vgic-v4.c 	struct vgic_dist *dist = &kvm->arch.vgic;
kvm               116 virt/kvm/arm/vgic/vgic-v4.c 	nr_vcpus = atomic_read(&kvm->online_vcpus);
kvm               125 virt/kvm/arm/vgic/vgic-v4.c 	kvm_for_each_vcpu(i, vcpu, kvm)
kvm               137 virt/kvm/arm/vgic/vgic-v4.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
kvm               162 virt/kvm/arm/vgic/vgic-v4.c 		vgic_v4_teardown(kvm);
kvm               173 virt/kvm/arm/vgic/vgic-v4.c void vgic_v4_teardown(struct kvm *kvm)
kvm               175 virt/kvm/arm/vgic/vgic-v4.c 	struct its_vm *its_vm = &kvm->arch.vgic.its_vm;
kvm               182 virt/kvm/arm/vgic/vgic-v4.c 		struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, i);
kvm               197 virt/kvm/arm/vgic/vgic-v4.c 	if (!vgic_supports_direct_msis(vcpu->kvm))
kvm               208 virt/kvm/arm/vgic/vgic-v4.c 	if (!vgic_supports_direct_msis(vcpu->kvm))
kvm               234 virt/kvm/arm/vgic/vgic-v4.c static struct vgic_its *vgic_get_its(struct kvm *kvm,
kvm               245 virt/kvm/arm/vgic/vgic-v4.c 	return vgic_msi_to_its(kvm, &msi);
kvm               248 virt/kvm/arm/vgic/vgic-v4.c int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
kvm               256 virt/kvm/arm/vgic/vgic-v4.c 	if (!vgic_supports_direct_msis(kvm))
kvm               263 virt/kvm/arm/vgic/vgic-v4.c 	its = vgic_get_its(kvm, irq_entry);
kvm               270 virt/kvm/arm/vgic/vgic-v4.c 	ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
kvm               282 virt/kvm/arm/vgic/vgic-v4.c 		.vm		= &kvm->arch.vgic.its_vm,
kvm               303 virt/kvm/arm/vgic/vgic-v4.c int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
kvm               310 virt/kvm/arm/vgic/vgic-v4.c 	if (!vgic_supports_direct_msis(kvm))
kvm               317 virt/kvm/arm/vgic/vgic-v4.c 	its = vgic_get_its(kvm, irq_entry);
kvm               323 virt/kvm/arm/vgic/vgic-v4.c 	ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
kvm               341 virt/kvm/arm/vgic/vgic-v4.c 	if (vgic_supports_direct_msis(vcpu->kvm)) {
kvm               350 virt/kvm/arm/vgic/vgic-v4.c 	if (vgic_supports_direct_msis(vcpu->kvm)) {
kvm                58 virt/kvm/arm/vgic/vgic.c static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
kvm                60 virt/kvm/arm/vgic/vgic.c 	struct vgic_dist *dist = &kvm->arch.vgic;
kvm                90 virt/kvm/arm/vgic/vgic.c struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
kvm               100 virt/kvm/arm/vgic/vgic.c 	if (intid < (kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)) {
kvm               101 virt/kvm/arm/vgic/vgic.c 		intid = array_index_nospec(intid, kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS);
kvm               102 virt/kvm/arm/vgic/vgic.c 		return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
kvm               107 virt/kvm/arm/vgic/vgic.c 		return vgic_get_lpi(kvm, intid);
kvm               125 virt/kvm/arm/vgic/vgic.c void __vgic_put_lpi_locked(struct kvm *kvm, struct vgic_irq *irq)
kvm               127 virt/kvm/arm/vgic/vgic.c 	struct vgic_dist *dist = &kvm->arch.vgic;
kvm               138 virt/kvm/arm/vgic/vgic.c void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
kvm               140 virt/kvm/arm/vgic/vgic.c 	struct vgic_dist *dist = &kvm->arch.vgic;
kvm               147 virt/kvm/arm/vgic/vgic.c 	__vgic_put_lpi_locked(kvm, irq);
kvm               165 virt/kvm/arm/vgic/vgic.c 			vgic_put_irq(vcpu->kvm, irq);
kvm               231 virt/kvm/arm/vgic/vgic.c 			     !irq->target_vcpu->kvm->arch.vgic.enabled))
kvm               334 virt/kvm/arm/vgic/vgic.c bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
kvm               437 virt/kvm/arm/vgic/vgic.c int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
kvm               447 virt/kvm/arm/vgic/vgic.c 	ret = vgic_lazy_init(kvm);
kvm               451 virt/kvm/arm/vgic/vgic.c 	vcpu = kvm_get_vcpu(kvm, cpuid);
kvm               455 virt/kvm/arm/vgic/vgic.c 	irq = vgic_get_irq(kvm, vcpu, intid);
kvm               464 virt/kvm/arm/vgic/vgic.c 		vgic_put_irq(kvm, irq);
kvm               473 virt/kvm/arm/vgic/vgic.c 	vgic_queue_irq_unlock(kvm, irq, flags);
kvm               474 virt/kvm/arm/vgic/vgic.c 	vgic_put_irq(kvm, irq);
kvm               517 virt/kvm/arm/vgic/vgic.c 	struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
kvm               526 virt/kvm/arm/vgic/vgic.c 	vgic_put_irq(vcpu->kvm, irq);
kvm               542 virt/kvm/arm/vgic/vgic.c 	struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
kvm               554 virt/kvm/arm/vgic/vgic.c 	vgic_put_irq(vcpu->kvm, irq);
kvm               562 virt/kvm/arm/vgic/vgic.c 	if (!vgic_initialized(vcpu->kvm))
kvm               565 virt/kvm/arm/vgic/vgic.c 	irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
kvm               571 virt/kvm/arm/vgic/vgic.c 	vgic_put_irq(vcpu->kvm, irq);
kvm               592 virt/kvm/arm/vgic/vgic.c 	if (!vgic_initialized(vcpu->kvm))
kvm               596 virt/kvm/arm/vgic/vgic.c 	if (!irq_is_ppi(intid) && !vgic_valid_spi(vcpu->kvm, intid))
kvm               599 virt/kvm/arm/vgic/vgic.c 	irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
kvm               654 virt/kvm/arm/vgic/vgic.c 			vgic_put_irq(vcpu->kvm, irq);
kvm               900 virt/kvm/arm/vgic/vgic.c 	    !vgic_supports_direct_msis(vcpu->kvm))
kvm               917 virt/kvm/arm/vgic/vgic.c 	if (unlikely(!vgic_initialized(vcpu->kvm)))
kvm               928 virt/kvm/arm/vgic/vgic.c 	if (unlikely(!vgic_initialized(vcpu->kvm)))
kvm               939 virt/kvm/arm/vgic/vgic.c 	if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
kvm               956 virt/kvm/arm/vgic/vgic.c 	if (!vcpu->kvm->arch.vgic.enabled)
kvm               982 virt/kvm/arm/vgic/vgic.c void vgic_kick_vcpus(struct kvm *kvm)
kvm               991 virt/kvm/arm/vgic/vgic.c 	kvm_for_each_vcpu(c, vcpu, kvm) {
kvm              1005 virt/kvm/arm/vgic/vgic.c 	if (!vgic_initialized(vcpu->kvm))
kvm              1008 virt/kvm/arm/vgic/vgic.c 	irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
kvm              1012 virt/kvm/arm/vgic/vgic.c 	vgic_put_irq(vcpu->kvm, irq);
kvm               162 virt/kvm/arm/vgic/vgic.h struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
kvm               164 virt/kvm/arm/vgic/vgic.h void __vgic_put_lpi_locked(struct kvm *kvm, struct vgic_irq *irq);
kvm               165 virt/kvm/arm/vgic/vgic.h void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq);
kvm               169 virt/kvm/arm/vgic/vgic.h bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
kvm               171 virt/kvm/arm/vgic/vgic.h void vgic_kick_vcpus(struct kvm *kvm);
kvm               173 virt/kvm/arm/vgic/vgic.h int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr,
kvm               190 virt/kvm/arm/vgic/vgic.h int vgic_v2_map_resources(struct kvm *kvm);
kvm               191 virt/kvm/arm/vgic/vgic.h int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
kvm               219 virt/kvm/arm/vgic/vgic.h int vgic_v3_map_resources(struct kvm *kvm);
kvm               220 virt/kvm/arm/vgic/vgic.h int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq);
kvm               221 virt/kvm/arm/vgic/vgic.h int vgic_v3_save_pending_tables(struct kvm *kvm);
kvm               222 virt/kvm/arm/vgic/vgic.h int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count);
kvm               224 virt/kvm/arm/vgic/vgic.h bool vgic_v3_check_base(struct kvm *kvm);
kvm               230 virt/kvm/arm/vgic/vgic.h bool vgic_has_its(struct kvm *kvm);
kvm               234 virt/kvm/arm/vgic/vgic.h int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi);
kvm               249 virt/kvm/arm/vgic/vgic.h int vgic_lazy_init(struct kvm *kvm);
kvm               250 virt/kvm/arm/vgic/vgic.h int vgic_init(struct kvm *kvm);
kvm               252 virt/kvm/arm/vgic/vgic.h void vgic_debug_init(struct kvm *kvm);
kvm               253 virt/kvm/arm/vgic/vgic.h void vgic_debug_destroy(struct kvm *kvm);
kvm               255 virt/kvm/arm/vgic/vgic.h bool lock_all_vcpus(struct kvm *kvm);
kvm               256 virt/kvm/arm/vgic/vgic.h void unlock_all_vcpus(struct kvm *kvm);
kvm               286 virt/kvm/arm/vgic/vgic.h vgic_v3_rd_region_size(struct kvm *kvm, struct vgic_redist_region *rdreg)
kvm               289 virt/kvm/arm/vgic/vgic.h 		return atomic_read(&kvm->online_vcpus) * KVM_VGIC_V3_REDIST_SIZE;
kvm               294 virt/kvm/arm/vgic/vgic.h struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm,
kvm               297 virt/kvm/arm/vgic/vgic.h bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size);
kvm               299 virt/kvm/arm/vgic/vgic.h static inline bool vgic_dist_overlap(struct kvm *kvm, gpa_t base, size_t size)
kvm               301 virt/kvm/arm/vgic/vgic.h 	struct vgic_dist *d = &kvm->arch.vgic;
kvm               307 virt/kvm/arm/vgic/vgic.h int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr);
kvm               308 virt/kvm/arm/vgic/vgic.h int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
kvm               310 virt/kvm/arm/vgic/vgic.h struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi);
kvm               311 virt/kvm/arm/vgic/vgic.h int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi);
kvm               312 virt/kvm/arm/vgic/vgic.h void vgic_lpi_translation_cache_init(struct kvm *kvm);
kvm               313 virt/kvm/arm/vgic/vgic.h void vgic_lpi_translation_cache_destroy(struct kvm *kvm);
kvm               314 virt/kvm/arm/vgic/vgic.h void vgic_its_invalidate_cache(struct kvm *kvm);
kvm               316 virt/kvm/arm/vgic/vgic.h bool vgic_supports_direct_msis(struct kvm *kvm);
kvm               317 virt/kvm/arm/vgic/vgic.h int vgic_v4_init(struct kvm *kvm);
kvm               318 virt/kvm/arm/vgic/vgic.h void vgic_v4_teardown(struct kvm *kvm);
kvm               101 virt/kvm/async_pf.c 	kvm_put_kvm(vcpu->kvm);
kvm               128 virt/kvm/async_pf.c 			kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */
kvm               193 virt/kvm/async_pf.c 	kvm_get_kvm(work->vcpu->kvm);
kvm               209 virt/kvm/async_pf.c 	kvm_put_kvm(work->vcpu->kvm);
kvm                54 virt/kvm/coalesced_mmio.c 	ring = dev->kvm->coalesced_mmio_ring;
kvm                69 virt/kvm/coalesced_mmio.c 	struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
kvm                75 virt/kvm/coalesced_mmio.c 	spin_lock(&dev->kvm->ring_lock);
kvm                80 virt/kvm/coalesced_mmio.c 		spin_unlock(&dev->kvm->ring_lock);
kvm                92 virt/kvm/coalesced_mmio.c 	spin_unlock(&dev->kvm->ring_lock);
kvm               110 virt/kvm/coalesced_mmio.c int kvm_coalesced_mmio_init(struct kvm *kvm)
kvm               121 virt/kvm/coalesced_mmio.c 	kvm->coalesced_mmio_ring = page_address(page);
kvm               128 virt/kvm/coalesced_mmio.c 	spin_lock_init(&kvm->ring_lock);
kvm               129 virt/kvm/coalesced_mmio.c 	INIT_LIST_HEAD(&kvm->coalesced_zones);
kvm               135 virt/kvm/coalesced_mmio.c void kvm_coalesced_mmio_free(struct kvm *kvm)
kvm               137 virt/kvm/coalesced_mmio.c 	if (kvm->coalesced_mmio_ring)
kvm               138 virt/kvm/coalesced_mmio.c 		free_page((unsigned long)kvm->coalesced_mmio_ring);
kvm               141 virt/kvm/coalesced_mmio.c int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
kvm               156 virt/kvm/coalesced_mmio.c 	dev->kvm = kvm;
kvm               159 virt/kvm/coalesced_mmio.c 	mutex_lock(&kvm->slots_lock);
kvm               160 virt/kvm/coalesced_mmio.c 	ret = kvm_io_bus_register_dev(kvm,
kvm               165 virt/kvm/coalesced_mmio.c 	list_add_tail(&dev->list, &kvm->coalesced_zones);
kvm               166 virt/kvm/coalesced_mmio.c 	mutex_unlock(&kvm->slots_lock);
kvm               171 virt/kvm/coalesced_mmio.c 	mutex_unlock(&kvm->slots_lock);
kvm               177 virt/kvm/coalesced_mmio.c int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
kvm               185 virt/kvm/coalesced_mmio.c 	mutex_lock(&kvm->slots_lock);
kvm               187 virt/kvm/coalesced_mmio.c 	list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
kvm               190 virt/kvm/coalesced_mmio.c 			kvm_io_bus_unregister_dev(kvm,
kvm               195 virt/kvm/coalesced_mmio.c 	mutex_unlock(&kvm->slots_lock);
kvm                21 virt/kvm/coalesced_mmio.h 	struct kvm *kvm;
kvm                25 virt/kvm/coalesced_mmio.h int kvm_coalesced_mmio_init(struct kvm *kvm);
kvm                26 virt/kvm/coalesced_mmio.h void kvm_coalesced_mmio_free(struct kvm *kvm);
kvm                27 virt/kvm/coalesced_mmio.h int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
kvm                29 virt/kvm/coalesced_mmio.h int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
kvm                34 virt/kvm/coalesced_mmio.h static inline int kvm_coalesced_mmio_init(struct kvm *kvm) { return 0; }
kvm                35 virt/kvm/coalesced_mmio.h static inline void kvm_coalesced_mmio_free(struct kvm *kvm) { }
kvm                36 virt/kvm/eventfd.c kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
kvm                46 virt/kvm/eventfd.c 	struct kvm *kvm = irqfd->kvm;
kvm                49 virt/kvm/eventfd.c 		kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1,
kvm                51 virt/kvm/eventfd.c 		kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0,
kvm                54 virt/kvm/eventfd.c 		kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
kvm                67 virt/kvm/eventfd.c 	struct kvm *kvm;
kvm                73 virt/kvm/eventfd.c 	kvm = resampler->kvm;
kvm                75 virt/kvm/eventfd.c 	kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
kvm                78 virt/kvm/eventfd.c 	idx = srcu_read_lock(&kvm->irq_srcu);
kvm                83 virt/kvm/eventfd.c 	srcu_read_unlock(&kvm->irq_srcu, idx);
kvm                90 virt/kvm/eventfd.c 	struct kvm *kvm = resampler->kvm;
kvm                92 virt/kvm/eventfd.c 	mutex_lock(&kvm->irqfds.resampler_lock);
kvm                95 virt/kvm/eventfd.c 	synchronize_srcu(&kvm->irq_srcu);
kvm                99 virt/kvm/eventfd.c 		kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
kvm               100 virt/kvm/eventfd.c 		kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
kvm               105 virt/kvm/eventfd.c 	mutex_unlock(&kvm->irqfds.resampler_lock);
kvm               116 virt/kvm/eventfd.c 	struct kvm *kvm = irqfd->kvm;
kvm               120 virt/kvm/eventfd.c 	synchronize_srcu(&kvm->irq_srcu);
kvm               174 virt/kvm/eventfd.c 				struct kvm *kvm, int irq_source_id,
kvm               191 virt/kvm/eventfd.c 	struct kvm *kvm = irqfd->kvm;
kvm               196 virt/kvm/eventfd.c 		idx = srcu_read_lock(&kvm->irq_srcu);
kvm               202 virt/kvm/eventfd.c 		if (kvm_arch_set_irq_inatomic(&irq, kvm,
kvm               206 virt/kvm/eventfd.c 		srcu_read_unlock(&kvm->irq_srcu, idx);
kvm               213 virt/kvm/eventfd.c 		spin_lock_irqsave(&kvm->irqfds.lock, iflags);
kvm               227 virt/kvm/eventfd.c 		spin_unlock_irqrestore(&kvm->irqfds.lock, iflags);
kvm               243 virt/kvm/eventfd.c static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd)
kvm               249 virt/kvm/eventfd.c 	n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi);
kvm               274 virt/kvm/eventfd.c 				struct kvm *kvm, unsigned int host_irq,
kvm               282 virt/kvm/eventfd.c kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
kvm               291 virt/kvm/eventfd.c 	if (!kvm_arch_intc_initialized(kvm))
kvm               294 virt/kvm/eventfd.c 	if (!kvm_arch_irqfd_allowed(kvm, args))
kvm               301 virt/kvm/eventfd.c 	irqfd->kvm = kvm;
kvm               334 virt/kvm/eventfd.c 		mutex_lock(&kvm->irqfds.resampler_lock);
kvm               337 virt/kvm/eventfd.c 				    &kvm->irqfds.resampler_list, link) {
kvm               349 virt/kvm/eventfd.c 				mutex_unlock(&kvm->irqfds.resampler_lock);
kvm               353 virt/kvm/eventfd.c 			resampler->kvm = kvm;
kvm               359 virt/kvm/eventfd.c 			list_add(&resampler->link, &kvm->irqfds.resampler_list);
kvm               360 virt/kvm/eventfd.c 			kvm_register_irq_ack_notifier(kvm,
kvm               366 virt/kvm/eventfd.c 		synchronize_srcu(&kvm->irq_srcu);
kvm               368 virt/kvm/eventfd.c 		mutex_unlock(&kvm->irqfds.resampler_lock);
kvm               378 virt/kvm/eventfd.c 	spin_lock_irq(&kvm->irqfds.lock);
kvm               381 virt/kvm/eventfd.c 	list_for_each_entry(tmp, &kvm->irqfds.items, list) {
kvm               386 virt/kvm/eventfd.c 		spin_unlock_irq(&kvm->irqfds.lock);
kvm               390 virt/kvm/eventfd.c 	idx = srcu_read_lock(&kvm->irq_srcu);
kvm               391 virt/kvm/eventfd.c 	irqfd_update(kvm, irqfd);
kvm               393 virt/kvm/eventfd.c 	list_add_tail(&irqfd->list, &kvm->irqfds.items);
kvm               395 virt/kvm/eventfd.c 	spin_unlock_irq(&kvm->irqfds.lock);
kvm               420 virt/kvm/eventfd.c 	srcu_read_unlock(&kvm->irq_srcu, idx);
kvm               446 virt/kvm/eventfd.c bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
kvm               451 virt/kvm/eventfd.c 	idx = srcu_read_lock(&kvm->irq_srcu);
kvm               452 virt/kvm/eventfd.c 	gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
kvm               454 virt/kvm/eventfd.c 		hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
kvm               457 virt/kvm/eventfd.c 				srcu_read_unlock(&kvm->irq_srcu, idx);
kvm               461 virt/kvm/eventfd.c 	srcu_read_unlock(&kvm->irq_srcu, idx);
kvm               467 virt/kvm/eventfd.c void kvm_notify_acked_gsi(struct kvm *kvm, int gsi)
kvm               471 virt/kvm/eventfd.c 	hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
kvm               477 virt/kvm/eventfd.c void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
kvm               483 virt/kvm/eventfd.c 	idx = srcu_read_lock(&kvm->irq_srcu);
kvm               484 virt/kvm/eventfd.c 	gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
kvm               486 virt/kvm/eventfd.c 		kvm_notify_acked_gsi(kvm, gsi);
kvm               487 virt/kvm/eventfd.c 	srcu_read_unlock(&kvm->irq_srcu, idx);
kvm               490 virt/kvm/eventfd.c void kvm_register_irq_ack_notifier(struct kvm *kvm,
kvm               493 virt/kvm/eventfd.c 	mutex_lock(&kvm->irq_lock);
kvm               494 virt/kvm/eventfd.c 	hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
kvm               495 virt/kvm/eventfd.c 	mutex_unlock(&kvm->irq_lock);
kvm               496 virt/kvm/eventfd.c 	kvm_arch_post_irq_ack_notifier_list_update(kvm);
kvm               499 virt/kvm/eventfd.c void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
kvm               502 virt/kvm/eventfd.c 	mutex_lock(&kvm->irq_lock);
kvm               504 virt/kvm/eventfd.c 	mutex_unlock(&kvm->irq_lock);
kvm               505 virt/kvm/eventfd.c 	synchronize_srcu(&kvm->irq_srcu);
kvm               506 virt/kvm/eventfd.c 	kvm_arch_post_irq_ack_notifier_list_update(kvm);
kvm               511 virt/kvm/eventfd.c kvm_eventfd_init(struct kvm *kvm)
kvm               514 virt/kvm/eventfd.c 	spin_lock_init(&kvm->irqfds.lock);
kvm               515 virt/kvm/eventfd.c 	INIT_LIST_HEAD(&kvm->irqfds.items);
kvm               516 virt/kvm/eventfd.c 	INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
kvm               517 virt/kvm/eventfd.c 	mutex_init(&kvm->irqfds.resampler_lock);
kvm               519 virt/kvm/eventfd.c 	INIT_LIST_HEAD(&kvm->ioeventfds);
kvm               527 virt/kvm/eventfd.c kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
kvm               536 virt/kvm/eventfd.c 	spin_lock_irq(&kvm->irqfds.lock);
kvm               538 virt/kvm/eventfd.c 	list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
kvm               553 virt/kvm/eventfd.c 	spin_unlock_irq(&kvm->irqfds.lock);
kvm               567 virt/kvm/eventfd.c kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
kvm               573 virt/kvm/eventfd.c 		return kvm_irqfd_deassign(kvm, args);
kvm               575 virt/kvm/eventfd.c 	return kvm_irqfd_assign(kvm, args);
kvm               583 virt/kvm/eventfd.c kvm_irqfd_release(struct kvm *kvm)
kvm               587 virt/kvm/eventfd.c 	spin_lock_irq(&kvm->irqfds.lock);
kvm               589 virt/kvm/eventfd.c 	list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list)
kvm               592 virt/kvm/eventfd.c 	spin_unlock_irq(&kvm->irqfds.lock);
kvm               606 virt/kvm/eventfd.c void kvm_irq_routing_update(struct kvm *kvm)
kvm               610 virt/kvm/eventfd.c 	spin_lock_irq(&kvm->irqfds.lock);
kvm               612 virt/kvm/eventfd.c 	list_for_each_entry(irqfd, &kvm->irqfds.items, list) {
kvm               613 virt/kvm/eventfd.c 		irqfd_update(kvm, irqfd);
kvm               618 virt/kvm/eventfd.c 					irqfd->kvm, irqfd->producer->irq,
kvm               625 virt/kvm/eventfd.c 	spin_unlock_irq(&kvm->irqfds.lock);
kvm               760 virt/kvm/eventfd.c ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
kvm               764 virt/kvm/eventfd.c 	list_for_each_entry(_p, &kvm->ioeventfds, list)
kvm               785 virt/kvm/eventfd.c static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
kvm               816 virt/kvm/eventfd.c 	mutex_lock(&kvm->slots_lock);
kvm               819 virt/kvm/eventfd.c 	if (ioeventfd_check_collision(kvm, p)) {
kvm               826 virt/kvm/eventfd.c 	ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length,
kvm               831 virt/kvm/eventfd.c 	kvm_get_bus(kvm, bus_idx)->ioeventfd_count++;
kvm               832 virt/kvm/eventfd.c 	list_add_tail(&p->list, &kvm->ioeventfds);
kvm               834 virt/kvm/eventfd.c 	mutex_unlock(&kvm->slots_lock);
kvm               839 virt/kvm/eventfd.c 	mutex_unlock(&kvm->slots_lock);
kvm               849 virt/kvm/eventfd.c kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
kvm               861 virt/kvm/eventfd.c 	mutex_lock(&kvm->slots_lock);
kvm               863 virt/kvm/eventfd.c 	list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
kvm               876 virt/kvm/eventfd.c 		kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
kvm               877 virt/kvm/eventfd.c 		bus = kvm_get_bus(kvm, bus_idx);
kvm               885 virt/kvm/eventfd.c 	mutex_unlock(&kvm->slots_lock);
kvm               892 virt/kvm/eventfd.c static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
kvm               895 virt/kvm/eventfd.c 	int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
kvm               898 virt/kvm/eventfd.c 		kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
kvm               904 virt/kvm/eventfd.c kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
kvm               934 virt/kvm/eventfd.c 	ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args);
kvm               942 virt/kvm/eventfd.c 		ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
kvm               950 virt/kvm/eventfd.c 	kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
kvm               956 virt/kvm/eventfd.c kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
kvm               959 virt/kvm/eventfd.c 		return kvm_deassign_ioeventfd(kvm, args);
kvm               961 virt/kvm/eventfd.c 	return kvm_assign_ioeventfd(kvm, args);
kvm                22 virt/kvm/irqchip.c int kvm_irq_map_gsi(struct kvm *kvm,
kvm                29 virt/kvm/irqchip.c 	irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
kvm                30 virt/kvm/irqchip.c 					lockdep_is_held(&kvm->irq_lock));
kvm                41 virt/kvm/irqchip.c int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
kvm                45 virt/kvm/irqchip.c 	irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
kvm                49 virt/kvm/irqchip.c int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
kvm                53 virt/kvm/irqchip.c 	if (!irqchip_in_kernel(kvm) || (msi->flags & ~KVM_MSI_VALID_DEVID))
kvm                62 virt/kvm/irqchip.c 	return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1, false);
kvm                71 virt/kvm/irqchip.c int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
kvm                83 virt/kvm/irqchip.c 	idx = srcu_read_lock(&kvm->irq_srcu);
kvm                84 virt/kvm/irqchip.c 	i = kvm_irq_map_gsi(kvm, irq_set, irq);
kvm                85 virt/kvm/irqchip.c 	srcu_read_unlock(&kvm->irq_srcu, idx);
kvm                89 virt/kvm/irqchip.c 		r = irq_set[i].set(&irq_set[i], kvm, irq_source_id, level,
kvm               120 virt/kvm/irqchip.c void kvm_free_irq_routing(struct kvm *kvm)
kvm               124 virt/kvm/irqchip.c 	struct kvm_irq_routing_table *rt = rcu_access_pointer(kvm->irq_routing);
kvm               128 virt/kvm/irqchip.c static int setup_routing_entry(struct kvm *kvm,
kvm               149 virt/kvm/irqchip.c 	r = kvm_set_routing_entry(kvm, e, ue);
kvm               160 virt/kvm/irqchip.c void __attribute__((weak)) kvm_arch_irq_routing_update(struct kvm *kvm)
kvm               164 virt/kvm/irqchip.c bool __weak kvm_arch_can_set_irq_routing(struct kvm *kvm)
kvm               169 virt/kvm/irqchip.c int kvm_set_irq_routing(struct kvm *kvm,
kvm               213 virt/kvm/irqchip.c 		r = setup_routing_entry(kvm, new, e, ue);
kvm               219 virt/kvm/irqchip.c 	mutex_lock(&kvm->irq_lock);
kvm               220 virt/kvm/irqchip.c 	old = rcu_dereference_protected(kvm->irq_routing, 1);
kvm               221 virt/kvm/irqchip.c 	rcu_assign_pointer(kvm->irq_routing, new);
kvm               222 virt/kvm/irqchip.c 	kvm_irq_routing_update(kvm);
kvm               223 virt/kvm/irqchip.c 	kvm_arch_irq_routing_update(kvm);
kvm               224 virt/kvm/irqchip.c 	mutex_unlock(&kvm->irq_lock);
kvm               226 virt/kvm/irqchip.c 	kvm_arch_post_irq_routing_update(kvm);
kvm               228 virt/kvm/irqchip.c 	synchronize_srcu_expedited(&kvm->irq_srcu);
kvm               156 virt/kvm/kvm_main.c static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
kvm               160 virt/kvm/kvm_main.c __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
kvm               248 virt/kvm/kvm_main.c bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
kvm               257 virt/kvm/kvm_main.c 	kvm_for_each_vcpu(i, vcpu, kvm) {
kvm               278 virt/kvm/kvm_main.c bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
kvm               285 virt/kvm/kvm_main.c 	called = kvm_make_vcpus_request_mask(kvm, req, NULL, cpus);
kvm               292 virt/kvm/kvm_main.c void kvm_flush_remote_tlbs(struct kvm *kvm)
kvm               298 virt/kvm/kvm_main.c 	long dirty_count = smp_load_acquire(&kvm->tlbs_dirty);
kvm               311 virt/kvm/kvm_main.c 	if (!kvm_arch_flush_remote_tlb(kvm)
kvm               312 virt/kvm/kvm_main.c 	    || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
kvm               313 virt/kvm/kvm_main.c 		++kvm->stat.remote_tlb_flush;
kvm               314 virt/kvm/kvm_main.c 	cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
kvm               319 virt/kvm/kvm_main.c void kvm_reload_remote_mmus(struct kvm *kvm)
kvm               321 virt/kvm/kvm_main.c 	kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
kvm               324 virt/kvm/kvm_main.c int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
kvm               331 virt/kvm/kvm_main.c 	vcpu->kvm = kvm;
kvm               378 virt/kvm/kvm_main.c static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
kvm               380 virt/kvm/kvm_main.c 	return container_of(mn, struct kvm, mmu_notifier);
kvm               387 virt/kvm/kvm_main.c 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
kvm               390 virt/kvm/kvm_main.c 	idx = srcu_read_lock(&kvm->srcu);
kvm               391 virt/kvm/kvm_main.c 	kvm_arch_mmu_notifier_invalidate_range(kvm, start, end);
kvm               392 virt/kvm/kvm_main.c 	srcu_read_unlock(&kvm->srcu, idx);
kvm               400 virt/kvm/kvm_main.c 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
kvm               403 virt/kvm/kvm_main.c 	idx = srcu_read_lock(&kvm->srcu);
kvm               404 virt/kvm/kvm_main.c 	spin_lock(&kvm->mmu_lock);
kvm               405 virt/kvm/kvm_main.c 	kvm->mmu_notifier_seq++;
kvm               407 virt/kvm/kvm_main.c 	if (kvm_set_spte_hva(kvm, address, pte))
kvm               408 virt/kvm/kvm_main.c 		kvm_flush_remote_tlbs(kvm);
kvm               410 virt/kvm/kvm_main.c 	spin_unlock(&kvm->mmu_lock);
kvm               411 virt/kvm/kvm_main.c 	srcu_read_unlock(&kvm->srcu, idx);
kvm               417 virt/kvm/kvm_main.c 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
kvm               420 virt/kvm/kvm_main.c 	idx = srcu_read_lock(&kvm->srcu);
kvm               421 virt/kvm/kvm_main.c 	spin_lock(&kvm->mmu_lock);
kvm               427 virt/kvm/kvm_main.c 	kvm->mmu_notifier_count++;
kvm               428 virt/kvm/kvm_main.c 	need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end);
kvm               429 virt/kvm/kvm_main.c 	need_tlb_flush |= kvm->tlbs_dirty;
kvm               432 virt/kvm/kvm_main.c 		kvm_flush_remote_tlbs(kvm);
kvm               434 virt/kvm/kvm_main.c 	spin_unlock(&kvm->mmu_lock);
kvm               435 virt/kvm/kvm_main.c 	srcu_read_unlock(&kvm->srcu, idx);
kvm               443 virt/kvm/kvm_main.c 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
kvm               445 virt/kvm/kvm_main.c 	spin_lock(&kvm->mmu_lock);
kvm               451 virt/kvm/kvm_main.c 	kvm->mmu_notifier_seq++;
kvm               458 virt/kvm/kvm_main.c 	kvm->mmu_notifier_count--;
kvm               459 virt/kvm/kvm_main.c 	spin_unlock(&kvm->mmu_lock);
kvm               461 virt/kvm/kvm_main.c 	BUG_ON(kvm->mmu_notifier_count < 0);
kvm               469 virt/kvm/kvm_main.c 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
kvm               472 virt/kvm/kvm_main.c 	idx = srcu_read_lock(&kvm->srcu);
kvm               473 virt/kvm/kvm_main.c 	spin_lock(&kvm->mmu_lock);
kvm               475 virt/kvm/kvm_main.c 	young = kvm_age_hva(kvm, start, end);
kvm               477 virt/kvm/kvm_main.c 		kvm_flush_remote_tlbs(kvm);
kvm               479 virt/kvm/kvm_main.c 	spin_unlock(&kvm->mmu_lock);
kvm               480 virt/kvm/kvm_main.c 	srcu_read_unlock(&kvm->srcu, idx);
kvm               490 virt/kvm/kvm_main.c 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
kvm               493 virt/kvm/kvm_main.c 	idx = srcu_read_lock(&kvm->srcu);
kvm               494 virt/kvm/kvm_main.c 	spin_lock(&kvm->mmu_lock);
kvm               508 virt/kvm/kvm_main.c 	young = kvm_age_hva(kvm, start, end);
kvm               509 virt/kvm/kvm_main.c 	spin_unlock(&kvm->mmu_lock);
kvm               510 virt/kvm/kvm_main.c 	srcu_read_unlock(&kvm->srcu, idx);
kvm               519 virt/kvm/kvm_main.c 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
kvm               522 virt/kvm/kvm_main.c 	idx = srcu_read_lock(&kvm->srcu);
kvm               523 virt/kvm/kvm_main.c 	spin_lock(&kvm->mmu_lock);
kvm               524 virt/kvm/kvm_main.c 	young = kvm_test_age_hva(kvm, address);
kvm               525 virt/kvm/kvm_main.c 	spin_unlock(&kvm->mmu_lock);
kvm               526 virt/kvm/kvm_main.c 	srcu_read_unlock(&kvm->srcu, idx);
kvm               534 virt/kvm/kvm_main.c 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
kvm               537 virt/kvm/kvm_main.c 	idx = srcu_read_lock(&kvm->srcu);
kvm               538 virt/kvm/kvm_main.c 	kvm_arch_flush_shadow_all(kvm);
kvm               539 virt/kvm/kvm_main.c 	srcu_read_unlock(&kvm->srcu, idx);
kvm               553 virt/kvm/kvm_main.c static int kvm_init_mmu_notifier(struct kvm *kvm)
kvm               555 virt/kvm/kvm_main.c 	kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
kvm               556 virt/kvm/kvm_main.c 	return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
kvm               561 virt/kvm/kvm_main.c static int kvm_init_mmu_notifier(struct kvm *kvm)
kvm               595 virt/kvm/kvm_main.c static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
kvm               601 virt/kvm/kvm_main.c 	kvm_arch_free_memslot(kvm, free, dont);
kvm               606 virt/kvm/kvm_main.c static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
kvm               614 virt/kvm/kvm_main.c 		kvm_free_memslot(kvm, memslot, NULL);
kvm               619 virt/kvm/kvm_main.c static void kvm_destroy_vm_debugfs(struct kvm *kvm)
kvm               623 virt/kvm/kvm_main.c 	if (!kvm->debugfs_dentry)
kvm               626 virt/kvm/kvm_main.c 	debugfs_remove_recursive(kvm->debugfs_dentry);
kvm               628 virt/kvm/kvm_main.c 	if (kvm->debugfs_stat_data) {
kvm               630 virt/kvm/kvm_main.c 			kfree(kvm->debugfs_stat_data[i]);
kvm               631 virt/kvm/kvm_main.c 		kfree(kvm->debugfs_stat_data);
kvm               635 virt/kvm/kvm_main.c static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
kvm               645 virt/kvm/kvm_main.c 	kvm->debugfs_dentry = debugfs_create_dir(dir_name, kvm_debugfs_dir);
kvm               647 virt/kvm/kvm_main.c 	kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries,
kvm               648 virt/kvm/kvm_main.c 					 sizeof(*kvm->debugfs_stat_data),
kvm               650 virt/kvm/kvm_main.c 	if (!kvm->debugfs_stat_data)
kvm               658 virt/kvm/kvm_main.c 		stat_data->kvm = kvm;
kvm               661 virt/kvm/kvm_main.c 		kvm->debugfs_stat_data[p - debugfs_entries] = stat_data;
kvm               662 virt/kvm/kvm_main.c 		debugfs_create_file(p->name, stat_data->mode, kvm->debugfs_dentry,
kvm               672 virt/kvm/kvm_main.c int __weak kvm_arch_post_init_vm(struct kvm *kvm)
kvm               681 virt/kvm/kvm_main.c void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm)
kvm               685 virt/kvm/kvm_main.c static struct kvm *kvm_create_vm(unsigned long type)
kvm               687 virt/kvm/kvm_main.c 	struct kvm *kvm = kvm_arch_alloc_vm();
kvm               691 virt/kvm/kvm_main.c 	if (!kvm)
kvm               694 virt/kvm/kvm_main.c 	spin_lock_init(&kvm->mmu_lock);
kvm               696 virt/kvm/kvm_main.c 	kvm->mm = current->mm;
kvm               697 virt/kvm/kvm_main.c 	kvm_eventfd_init(kvm);
kvm               698 virt/kvm/kvm_main.c 	mutex_init(&kvm->lock);
kvm               699 virt/kvm/kvm_main.c 	mutex_init(&kvm->irq_lock);
kvm               700 virt/kvm/kvm_main.c 	mutex_init(&kvm->slots_lock);
kvm               701 virt/kvm/kvm_main.c 	INIT_LIST_HEAD(&kvm->devices);
kvm               705 virt/kvm/kvm_main.c 	if (init_srcu_struct(&kvm->srcu))
kvm               707 virt/kvm/kvm_main.c 	if (init_srcu_struct(&kvm->irq_srcu))
kvm               710 virt/kvm/kvm_main.c 	refcount_set(&kvm->users_count, 1);
kvm               718 virt/kvm/kvm_main.c 		rcu_assign_pointer(kvm->memslots[i], slots);
kvm               722 virt/kvm/kvm_main.c 		rcu_assign_pointer(kvm->buses[i],
kvm               724 virt/kvm/kvm_main.c 		if (!kvm->buses[i])
kvm               728 virt/kvm/kvm_main.c 	r = kvm_arch_init_vm(kvm, type);
kvm               737 virt/kvm/kvm_main.c 	INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
kvm               740 virt/kvm/kvm_main.c 	r = kvm_init_mmu_notifier(kvm);
kvm               744 virt/kvm/kvm_main.c 	r = kvm_arch_post_init_vm(kvm);
kvm               749 virt/kvm/kvm_main.c 	list_add(&kvm->vm_list, &vm_list);
kvm               754 virt/kvm/kvm_main.c 	return kvm;
kvm               758 virt/kvm/kvm_main.c 	if (kvm->mmu_notifier.ops)
kvm               759 virt/kvm/kvm_main.c 		mmu_notifier_unregister(&kvm->mmu_notifier, current->mm);
kvm               764 virt/kvm/kvm_main.c 	kvm_arch_destroy_vm(kvm);
kvm               766 virt/kvm/kvm_main.c 	WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count));
kvm               768 virt/kvm/kvm_main.c 		kfree(kvm_get_bus(kvm, i));
kvm               770 virt/kvm/kvm_main.c 		kvm_free_memslots(kvm, __kvm_memslots(kvm, i));
kvm               771 virt/kvm/kvm_main.c 	cleanup_srcu_struct(&kvm->irq_srcu);
kvm               773 virt/kvm/kvm_main.c 	cleanup_srcu_struct(&kvm->srcu);
kvm               775 virt/kvm/kvm_main.c 	kvm_arch_free_vm(kvm);
kvm               780 virt/kvm/kvm_main.c static void kvm_destroy_devices(struct kvm *kvm)
kvm               789 virt/kvm/kvm_main.c 	list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) {
kvm               795 virt/kvm/kvm_main.c static void kvm_destroy_vm(struct kvm *kvm)
kvm               798 virt/kvm/kvm_main.c 	struct mm_struct *mm = kvm->mm;
kvm               800 virt/kvm/kvm_main.c 	kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm);
kvm               801 virt/kvm/kvm_main.c 	kvm_destroy_vm_debugfs(kvm);
kvm               802 virt/kvm/kvm_main.c 	kvm_arch_sync_events(kvm);
kvm               804 virt/kvm/kvm_main.c 	list_del(&kvm->vm_list);
kvm               806 virt/kvm/kvm_main.c 	kvm_arch_pre_destroy_vm(kvm);
kvm               808 virt/kvm/kvm_main.c 	kvm_free_irq_routing(kvm);
kvm               810 virt/kvm/kvm_main.c 		struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
kvm               814 virt/kvm/kvm_main.c 		kvm->buses[i] = NULL;
kvm               816 virt/kvm/kvm_main.c 	kvm_coalesced_mmio_free(kvm);
kvm               818 virt/kvm/kvm_main.c 	mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
kvm               820 virt/kvm/kvm_main.c 	kvm_arch_flush_shadow_all(kvm);
kvm               822 virt/kvm/kvm_main.c 	kvm_arch_destroy_vm(kvm);
kvm               823 virt/kvm/kvm_main.c 	kvm_destroy_devices(kvm);
kvm               825 virt/kvm/kvm_main.c 		kvm_free_memslots(kvm, __kvm_memslots(kvm, i));
kvm               826 virt/kvm/kvm_main.c 	cleanup_srcu_struct(&kvm->irq_srcu);
kvm               827 virt/kvm/kvm_main.c 	cleanup_srcu_struct(&kvm->srcu);
kvm               828 virt/kvm/kvm_main.c 	kvm_arch_free_vm(kvm);
kvm               834 virt/kvm/kvm_main.c void kvm_get_kvm(struct kvm *kvm)
kvm               836 virt/kvm/kvm_main.c 	refcount_inc(&kvm->users_count);
kvm               840 virt/kvm/kvm_main.c void kvm_put_kvm(struct kvm *kvm)
kvm               842 virt/kvm/kvm_main.c 	if (refcount_dec_and_test(&kvm->users_count))
kvm               843 virt/kvm/kvm_main.c 		kvm_destroy_vm(kvm);
kvm               850 virt/kvm/kvm_main.c 	struct kvm *kvm = filp->private_data;
kvm               852 virt/kvm/kvm_main.c 	kvm_irqfd_release(kvm);
kvm               854 virt/kvm/kvm_main.c 	kvm_put_kvm(kvm);
kvm               947 virt/kvm/kvm_main.c static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
kvm               950 virt/kvm/kvm_main.c 	struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id);
kvm               956 virt/kvm/kvm_main.c 	rcu_assign_pointer(kvm->memslots[as_id], slots);
kvm               957 virt/kvm/kvm_main.c 	synchronize_srcu_expedited(&kvm->srcu);
kvm               976 virt/kvm/kvm_main.c 	kvm_arch_memslots_updated(kvm, gen);
kvm               991 virt/kvm/kvm_main.c int __kvm_set_memory_region(struct kvm *kvm,
kvm              1027 virt/kvm/kvm_main.c 	slot = id_to_memslot(__kvm_memslots(kvm, as_id), id);
kvm              1071 virt/kvm/kvm_main.c 		kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) {
kvm              1088 virt/kvm/kvm_main.c 		if (kvm_arch_create_memslot(kvm, &new, npages))
kvm              1101 virt/kvm/kvm_main.c 	memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots));
kvm              1107 virt/kvm/kvm_main.c 		old_memslots = install_new_memslots(kvm, as_id, slots);
kvm              1116 virt/kvm/kvm_main.c 		kvm_arch_flush_shadow_memslot(kvm, slot);
kvm              1126 virt/kvm/kvm_main.c 	r = kvm_arch_prepare_memory_region(kvm, &new, mem, change);
kvm              1137 virt/kvm/kvm_main.c 	old_memslots = install_new_memslots(kvm, as_id, slots);
kvm              1139 virt/kvm/kvm_main.c 	kvm_arch_commit_memory_region(kvm, mem, &old, &new, change);
kvm              1141 virt/kvm/kvm_main.c 	kvm_free_memslot(kvm, &old, &new);
kvm              1148 virt/kvm/kvm_main.c 	kvm_free_memslot(kvm, &new, &old);
kvm              1154 virt/kvm/kvm_main.c int kvm_set_memory_region(struct kvm *kvm,
kvm              1159 virt/kvm/kvm_main.c 	mutex_lock(&kvm->slots_lock);
kvm              1160 virt/kvm/kvm_main.c 	r = __kvm_set_memory_region(kvm, mem);
kvm              1161 virt/kvm/kvm_main.c 	mutex_unlock(&kvm->slots_lock);
kvm              1166 virt/kvm/kvm_main.c static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
kvm              1172 virt/kvm/kvm_main.c 	return kvm_set_memory_region(kvm, mem);
kvm              1175 virt/kvm/kvm_main.c int kvm_get_dirty_log(struct kvm *kvm,
kvm              1189 virt/kvm/kvm_main.c 	slots = __kvm_memslots(kvm, as_id);
kvm              1231 virt/kvm/kvm_main.c int kvm_get_dirty_log_protect(struct kvm *kvm,
kvm              1246 virt/kvm/kvm_main.c 	slots = __kvm_memslots(kvm, as_id);
kvm              1255 virt/kvm/kvm_main.c 	if (kvm->manual_dirty_log_protect) {
kvm              1269 virt/kvm/kvm_main.c 		spin_lock(&kvm->mmu_lock);
kvm              1282 virt/kvm/kvm_main.c 			kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
kvm              1285 virt/kvm/kvm_main.c 		spin_unlock(&kvm->mmu_lock);
kvm              1301 virt/kvm/kvm_main.c int kvm_clear_dirty_log_protect(struct kvm *kvm,
kvm              1320 virt/kvm/kvm_main.c 	slots = __kvm_memslots(kvm, as_id);
kvm              1339 virt/kvm/kvm_main.c 	spin_lock(&kvm->mmu_lock);
kvm              1358 virt/kvm/kvm_main.c 			kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
kvm              1362 virt/kvm/kvm_main.c 	spin_unlock(&kvm->mmu_lock);
kvm              1380 virt/kvm/kvm_main.c struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
kvm              1382 virt/kvm/kvm_main.c 	return __gfn_to_memslot(kvm_memslots(kvm), gfn);
kvm              1391 virt/kvm/kvm_main.c bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
kvm              1393 virt/kvm/kvm_main.c 	struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
kvm              1460 virt/kvm/kvm_main.c unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
kvm              1462 virt/kvm/kvm_main.c 	return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
kvm              1491 virt/kvm/kvm_main.c unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
kvm              1493 virt/kvm/kvm_main.c 	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
kvm              1734 virt/kvm/kvm_main.c kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
kvm              1737 virt/kvm/kvm_main.c 	return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL,
kvm              1754 virt/kvm/kvm_main.c kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
kvm              1756 virt/kvm/kvm_main.c 	return gfn_to_pfn_memslot_atomic(gfn_to_memslot(kvm, gfn), gfn);
kvm              1766 virt/kvm/kvm_main.c kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
kvm              1768 virt/kvm/kvm_main.c 	return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn);
kvm              1808 virt/kvm/kvm_main.c struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
kvm              1812 virt/kvm/kvm_main.c 	pfn = gfn_to_pfn(kvm, gfn);
kvm              1901 virt/kvm/kvm_main.c 	return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map,
kvm              1952 virt/kvm/kvm_main.c 	__kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map,
kvm              2052 virt/kvm/kvm_main.c int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
kvm              2055 virt/kvm/kvm_main.c 	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
kvm              2070 virt/kvm/kvm_main.c int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
kvm              2078 virt/kvm/kvm_main.c 		ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
kvm              2127 virt/kvm/kvm_main.c int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
kvm              2131 virt/kvm/kvm_main.c 	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
kvm              2165 virt/kvm/kvm_main.c int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
kvm              2168 virt/kvm/kvm_main.c 	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
kvm              2183 virt/kvm/kvm_main.c int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
kvm              2192 virt/kvm/kvm_main.c 		ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
kvm              2263 virt/kvm/kvm_main.c int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
kvm              2266 virt/kvm/kvm_main.c 	struct kvm_memslots *slots = kvm_memslots(kvm);
kvm              2271 virt/kvm/kvm_main.c int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
kvm              2275 virt/kvm/kvm_main.c 	struct kvm_memslots *slots = kvm_memslots(kvm);
kvm              2288 virt/kvm/kvm_main.c 		return kvm_write_guest(kvm, gpa, data, len);
kvm              2299 virt/kvm/kvm_main.c int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
kvm              2302 virt/kvm/kvm_main.c 	return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
kvm              2306 virt/kvm/kvm_main.c int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
kvm              2309 virt/kvm/kvm_main.c 	struct kvm_memslots *slots = kvm_memslots(kvm);
kvm              2321 virt/kvm/kvm_main.c 		return kvm_read_guest(kvm, ghc->gpa, data, len);
kvm              2331 virt/kvm/kvm_main.c int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
kvm              2335 virt/kvm/kvm_main.c 	return kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
kvm              2339 virt/kvm/kvm_main.c int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
kvm              2347 virt/kvm/kvm_main.c 		ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
kvm              2368 virt/kvm/kvm_main.c void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
kvm              2372 virt/kvm/kvm_main.c 	memslot = gfn_to_memslot(kvm, gfn);
kvm              2449 virt/kvm/kvm_main.c 	int idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm              2462 virt/kvm/kvm_main.c 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
kvm              2658 virt/kvm/kvm_main.c 	struct kvm *kvm = me->kvm;
kvm              2660 virt/kvm/kvm_main.c 	int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
kvm              2675 virt/kvm/kvm_main.c 		kvm_for_each_vcpu(i, vcpu, kvm) {
kvm              2695 virt/kvm/kvm_main.c 				kvm->last_boosted_vcpu = i;
kvm              2724 virt/kvm/kvm_main.c 		page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
kvm              2748 virt/kvm/kvm_main.c 	kvm_put_kvm(vcpu->kvm);
kvm              2781 virt/kvm/kvm_main.c 						  vcpu->kvm->debugfs_dentry);
kvm              2790 virt/kvm/kvm_main.c static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
kvm              2798 virt/kvm/kvm_main.c 	mutex_lock(&kvm->lock);
kvm              2799 virt/kvm/kvm_main.c 	if (kvm->created_vcpus == KVM_MAX_VCPUS) {
kvm              2800 virt/kvm/kvm_main.c 		mutex_unlock(&kvm->lock);
kvm              2804 virt/kvm/kvm_main.c 	kvm->created_vcpus++;
kvm              2805 virt/kvm/kvm_main.c 	mutex_unlock(&kvm->lock);
kvm              2807 virt/kvm/kvm_main.c 	vcpu = kvm_arch_vcpu_create(kvm, id);
kvm              2821 virt/kvm/kvm_main.c 	mutex_lock(&kvm->lock);
kvm              2822 virt/kvm/kvm_main.c 	if (kvm_get_vcpu_by_id(kvm, id)) {
kvm              2827 virt/kvm/kvm_main.c 	BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
kvm              2830 virt/kvm/kvm_main.c 	kvm_get_kvm(kvm);
kvm              2833 virt/kvm/kvm_main.c 		kvm_put_kvm(kvm);
kvm              2837 virt/kvm/kvm_main.c 	kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
kvm              2844 virt/kvm/kvm_main.c 	atomic_inc(&kvm->online_vcpus);
kvm              2846 virt/kvm/kvm_main.c 	mutex_unlock(&kvm->lock);
kvm              2851 virt/kvm/kvm_main.c 	mutex_unlock(&kvm->lock);
kvm              2856 virt/kvm/kvm_main.c 	mutex_lock(&kvm->lock);
kvm              2857 virt/kvm/kvm_main.c 	kvm->created_vcpus--;
kvm              2858 virt/kvm/kvm_main.c 	mutex_unlock(&kvm->lock);
kvm              2882 virt/kvm/kvm_main.c 	if (vcpu->kvm->mm != current->mm)
kvm              3089 virt/kvm/kvm_main.c 	if (vcpu->kvm->mm != current->mm)
kvm              3154 virt/kvm/kvm_main.c 	if (dev->kvm->mm != current->mm)
kvm              3175 virt/kvm/kvm_main.c 	struct kvm *kvm = dev->kvm;
kvm              3178 virt/kvm/kvm_main.c 		mutex_lock(&kvm->lock);
kvm              3181 virt/kvm/kvm_main.c 		mutex_unlock(&kvm->lock);
kvm              3184 virt/kvm/kvm_main.c 	kvm_put_kvm(kvm);
kvm              3228 virt/kvm/kvm_main.c static int kvm_ioctl_create_device(struct kvm *kvm,
kvm              3253 virt/kvm/kvm_main.c 	dev->kvm = kvm;
kvm              3255 virt/kvm/kvm_main.c 	mutex_lock(&kvm->lock);
kvm              3258 virt/kvm/kvm_main.c 		mutex_unlock(&kvm->lock);
kvm              3262 virt/kvm/kvm_main.c 	list_add(&dev->vm_node, &kvm->devices);
kvm              3263 virt/kvm/kvm_main.c 	mutex_unlock(&kvm->lock);
kvm              3268 virt/kvm/kvm_main.c 	kvm_get_kvm(kvm);
kvm              3271 virt/kvm/kvm_main.c 		kvm_put_kvm(kvm);
kvm              3272 virt/kvm/kvm_main.c 		mutex_lock(&kvm->lock);
kvm              3274 virt/kvm/kvm_main.c 		mutex_unlock(&kvm->lock);
kvm              3283 virt/kvm/kvm_main.c static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
kvm              3323 virt/kvm/kvm_main.c 	return kvm_vm_ioctl_check_extension(kvm, arg);
kvm              3326 virt/kvm/kvm_main.c int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm,
kvm              3332 virt/kvm/kvm_main.c static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
kvm              3340 virt/kvm/kvm_main.c 		kvm->manual_dirty_log_protect = cap->args[0];
kvm              3344 virt/kvm/kvm_main.c 		return kvm_vm_ioctl_enable_cap(kvm, cap);
kvm              3351 virt/kvm/kvm_main.c 	struct kvm *kvm = filp->private_data;
kvm              3355 virt/kvm/kvm_main.c 	if (kvm->mm != current->mm)
kvm              3359 virt/kvm/kvm_main.c 		r = kvm_vm_ioctl_create_vcpu(kvm, arg);
kvm              3367 virt/kvm/kvm_main.c 		r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap);
kvm              3378 virt/kvm/kvm_main.c 		r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem);
kvm              3387 virt/kvm/kvm_main.c 		r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
kvm              3397 virt/kvm/kvm_main.c 		r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
kvm              3408 virt/kvm/kvm_main.c 		r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
kvm              3417 virt/kvm/kvm_main.c 		r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
kvm              3427 virt/kvm/kvm_main.c 		r = kvm_irqfd(kvm, &data);
kvm              3436 virt/kvm/kvm_main.c 		r = kvm_ioeventfd(kvm, &data);
kvm              3446 virt/kvm/kvm_main.c 		r = kvm_send_userspace_msi(kvm, &msi);
kvm              3459 virt/kvm/kvm_main.c 		r = kvm_vm_ioctl_irq_line(kvm, &irq_event,
kvm              3484 virt/kvm/kvm_main.c 		if (!kvm_arch_can_set_irq_routing(kvm))
kvm              3502 virt/kvm/kvm_main.c 		r = kvm_set_irq_routing(kvm, entries, routing.nr,
kvm              3516 virt/kvm/kvm_main.c 		r = kvm_ioctl_create_device(kvm, &cd);
kvm              3528 virt/kvm/kvm_main.c 		r = kvm_vm_ioctl_check_extension_generic(kvm, arg);
kvm              3550 virt/kvm/kvm_main.c 	struct kvm *kvm = filp->private_data;
kvm              3553 virt/kvm/kvm_main.c 	if (kvm->mm != current->mm)
kvm              3568 virt/kvm/kvm_main.c 		r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
kvm              3588 virt/kvm/kvm_main.c 	struct kvm *kvm;
kvm              3591 virt/kvm/kvm_main.c 	kvm = kvm_create_vm(type);
kvm              3592 virt/kvm/kvm_main.c 	if (IS_ERR(kvm))
kvm              3593 virt/kvm/kvm_main.c 		return PTR_ERR(kvm);
kvm              3595 virt/kvm/kvm_main.c 	r = kvm_coalesced_mmio_init(kvm);
kvm              3603 virt/kvm/kvm_main.c 	file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
kvm              3616 virt/kvm/kvm_main.c 	if (kvm_create_vm_debugfs(kvm, r) < 0) {
kvm              3621 virt/kvm/kvm_main.c 	kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm);
kvm              3627 virt/kvm/kvm_main.c 	kvm_put_kvm(kvm);
kvm              3887 virt/kvm/kvm_main.c 	bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
kvm              3907 virt/kvm/kvm_main.c 	bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
kvm              3958 virt/kvm/kvm_main.c 	bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
kvm              3966 virt/kvm/kvm_main.c int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
kvm              3973 virt/kvm/kvm_main.c 	bus = kvm_get_bus(kvm, bus_idx);
kvm              4001 virt/kvm/kvm_main.c 	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
kvm              4002 virt/kvm/kvm_main.c 	synchronize_srcu_expedited(&kvm->srcu);
kvm              4009 virt/kvm/kvm_main.c void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
kvm              4015 virt/kvm/kvm_main.c 	bus = kvm_get_bus(kvm, bus_idx);
kvm              4040 virt/kvm/kvm_main.c 	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
kvm              4041 virt/kvm/kvm_main.c 	synchronize_srcu_expedited(&kvm->srcu);
kvm              4046 virt/kvm/kvm_main.c struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
kvm              4053 virt/kvm/kvm_main.c 	srcu_idx = srcu_read_lock(&kvm->srcu);
kvm              4055 virt/kvm/kvm_main.c 	bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
kvm              4066 virt/kvm/kvm_main.c 	srcu_read_unlock(&kvm->srcu, srcu_idx);
kvm              4084 virt/kvm/kvm_main.c 	if (!refcount_inc_not_zero(&stat_data->kvm->users_count))
kvm              4090 virt/kvm/kvm_main.c 		kvm_put_kvm(stat_data->kvm);
kvm              4103 virt/kvm/kvm_main.c 	kvm_put_kvm(stat_data->kvm);
kvm              4112 virt/kvm/kvm_main.c 	*val = *(ulong *)((void *)stat_data->kvm + stat_data->offset);
kvm              4124 virt/kvm/kvm_main.c 	*(ulong *)((void *)stat_data->kvm + stat_data->offset) = 0;
kvm              4153 virt/kvm/kvm_main.c 	kvm_for_each_vcpu(i, vcpu, stat_data->kvm)
kvm              4168 virt/kvm/kvm_main.c 	kvm_for_each_vcpu(i, vcpu, stat_data->kvm)
kvm              4198 virt/kvm/kvm_main.c 	struct kvm *kvm;
kvm              4204 virt/kvm/kvm_main.c 	list_for_each_entry(kvm, &vm_list, vm_list) {
kvm              4205 virt/kvm/kvm_main.c 		stat_tmp.kvm = kvm;
kvm              4216 virt/kvm/kvm_main.c 	struct kvm *kvm;
kvm              4223 virt/kvm/kvm_main.c 	list_for_each_entry(kvm, &vm_list, vm_list) {
kvm              4224 virt/kvm/kvm_main.c 		stat_tmp.kvm = kvm;
kvm              4237 virt/kvm/kvm_main.c 	struct kvm *kvm;
kvm              4243 virt/kvm/kvm_main.c 	list_for_each_entry(kvm, &vm_list, vm_list) {
kvm              4244 virt/kvm/kvm_main.c 		stat_tmp.kvm = kvm;
kvm              4255 virt/kvm/kvm_main.c 	struct kvm *kvm;
kvm              4262 virt/kvm/kvm_main.c 	list_for_each_entry(kvm, &vm_list, vm_list) {
kvm              4263 virt/kvm/kvm_main.c 		stat_tmp.kvm = kvm;
kvm              4279 virt/kvm/kvm_main.c static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
kvm              4284 virt/kvm/kvm_main.c 	if (!kvm_dev.this_device || !kvm)
kvm              4307 virt/kvm/kvm_main.c 		kvm->userspace_pid = task_pid_nr(current);
kvm              4311 virt/kvm/kvm_main.c 	add_uevent_var(env, "PID=%d", kvm->userspace_pid);
kvm              4313 virt/kvm/kvm_main.c 	if (!IS_ERR_OR_NULL(kvm->debugfs_dentry)) {
kvm              4317 virt/kvm/kvm_main.c 			tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX);
kvm              4523 virt/kvm/kvm_main.c 	struct kvm *kvm;
kvm              4538 virt/kvm/kvm_main.c 	struct kvm *kvm = init_context->kvm;
kvm              4570 virt/kvm/kvm_main.c 		err = thread_fn(kvm, data);
kvm              4575 virt/kvm/kvm_main.c int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
kvm              4583 virt/kvm/kvm_main.c 	init_context.kvm = kvm;
kvm                80 virt/kvm/vfio.c static void kvm_vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm)
kvm                82 virt/kvm/vfio.c 	void (*fn)(struct vfio_group *, struct kvm *);
kvm                88 virt/kvm/vfio.c 	fn(group, kvm);
kvm               137 virt/kvm/vfio.c static void kvm_spapr_tce_release_vfio_group(struct kvm *kvm,
kvm               145 virt/kvm/vfio.c 	kvm_spapr_tce_release_iommu_group(kvm, grp);
kvm               176 virt/kvm/vfio.c 			kvm_arch_register_noncoherent_dma(dev->kvm);
kvm               178 virt/kvm/vfio.c 			kvm_arch_unregister_noncoherent_dma(dev->kvm);
kvm               229 virt/kvm/vfio.c 		kvm_arch_start_assignment(dev->kvm);
kvm               233 virt/kvm/vfio.c 		kvm_vfio_group_set_kvm(vfio_group, dev->kvm);
kvm               257 virt/kvm/vfio.c 			kvm_arch_end_assignment(dev->kvm);
kvm               259 virt/kvm/vfio.c 			kvm_spapr_tce_release_vfio_group(dev->kvm,
kvm               314 virt/kvm/vfio.c 			ret = kvm_spapr_tce_attach_iommu_group(dev->kvm,
kvm               370 virt/kvm/vfio.c 		kvm_spapr_tce_release_vfio_group(dev->kvm, kvg->vfio_group);
kvm               376 virt/kvm/vfio.c 		kvm_arch_end_assignment(dev->kvm);
kvm               401 virt/kvm/vfio.c 	list_for_each_entry(tmp, &dev->kvm->devices, vm_node)