invalid_list 2335 arch/x86/kvm/mmu.c struct list_head *invalid_list); invalid_list 2337 arch/x86/kvm/mmu.c struct list_head *invalid_list); invalid_list 2357 arch/x86/kvm/mmu.c struct list_head *invalid_list) invalid_list 2361 arch/x86/kvm/mmu.c kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); invalid_list 2369 arch/x86/kvm/mmu.c struct list_head *invalid_list, invalid_list 2372 arch/x86/kvm/mmu.c if (!remote_flush && list_empty(invalid_list)) invalid_list 2375 arch/x86/kvm/mmu.c if (!list_empty(invalid_list)) invalid_list 2376 arch/x86/kvm/mmu.c kvm_mmu_commit_zap_page(kvm, invalid_list); invalid_list 2383 arch/x86/kvm/mmu.c struct list_head *invalid_list, invalid_list 2386 arch/x86/kvm/mmu.c if (kvm_mmu_remote_flush_or_zap(vcpu->kvm, invalid_list, remote_flush)) invalid_list 2407 arch/x86/kvm/mmu.c struct list_head *invalid_list) invalid_list 2410 arch/x86/kvm/mmu.c return __kvm_sync_page(vcpu, sp, invalid_list); invalid_list 2415 arch/x86/kvm/mmu.c struct list_head *invalid_list) invalid_list 2425 arch/x86/kvm/mmu.c ret |= kvm_sync_page(vcpu, s, invalid_list); invalid_list 2510 arch/x86/kvm/mmu.c LIST_HEAD(invalid_list); invalid_list 2525 arch/x86/kvm/mmu.c flush |= kvm_sync_page(vcpu, sp, &invalid_list); invalid_list 2529 arch/x86/kvm/mmu.c kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); invalid_list 2535 arch/x86/kvm/mmu.c kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); invalid_list 2563 arch/x86/kvm/mmu.c LIST_HEAD(invalid_list); invalid_list 2593 arch/x86/kvm/mmu.c if (!__kvm_sync_page(vcpu, sp, &invalid_list)) invalid_list 2596 arch/x86/kvm/mmu.c WARN_ON(!list_empty(&invalid_list)); invalid_list 2628 arch/x86/kvm/mmu.c flush |= kvm_sync_pages(vcpu, gfn, &invalid_list); invalid_list 2633 arch/x86/kvm/mmu.c kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); invalid_list 2793 arch/x86/kvm/mmu.c struct list_head *invalid_list) invalid_list 2806 arch/x86/kvm/mmu.c kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); invalid_list 2817 arch/x86/kvm/mmu.c struct list_head *invalid_list, invalid_list 2824 arch/x86/kvm/mmu.c *nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list); invalid_list 2839 arch/x86/kvm/mmu.c list_move(&sp->link, invalid_list); invalid_list 2861 arch/x86/kvm/mmu.c struct list_head *invalid_list) invalid_list 2865 arch/x86/kvm/mmu.c __kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped); invalid_list 2870 arch/x86/kvm/mmu.c struct list_head *invalid_list) invalid_list 2874 arch/x86/kvm/mmu.c if (list_empty(invalid_list)) invalid_list 2888 arch/x86/kvm/mmu.c list_for_each_entry_safe(sp, nsp, invalid_list, link) { invalid_list 2895 arch/x86/kvm/mmu.c struct list_head *invalid_list) invalid_list 2904 arch/x86/kvm/mmu.c return kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); invalid_list 2913 arch/x86/kvm/mmu.c LIST_HEAD(invalid_list); invalid_list 2920 arch/x86/kvm/mmu.c if (!prepare_zap_oldest_mmu_page(kvm, &invalid_list)) invalid_list 2923 arch/x86/kvm/mmu.c kvm_mmu_commit_zap_page(kvm, &invalid_list); invalid_list 2935 arch/x86/kvm/mmu.c LIST_HEAD(invalid_list); invalid_list 2945 arch/x86/kvm/mmu.c kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); invalid_list 2947 arch/x86/kvm/mmu.c kvm_mmu_commit_zap_page(kvm, &invalid_list); invalid_list 3701 arch/x86/kvm/mmu.c struct list_head *invalid_list) invalid_list 3711 arch/x86/kvm/mmu.c kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); invalid_list 3721 arch/x86/kvm/mmu.c LIST_HEAD(invalid_list); invalid_list 3742 arch/x86/kvm/mmu.c &invalid_list); invalid_list 3748 arch/x86/kvm/mmu.c &invalid_list); invalid_list 3754 arch/x86/kvm/mmu.c &invalid_list); invalid_list 3760 arch/x86/kvm/mmu.c kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); invalid_list 5437 arch/x86/kvm/mmu.c LIST_HEAD(invalid_list); invalid_list 5470 arch/x86/kvm/mmu.c kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); invalid_list 5494 arch/x86/kvm/mmu.c kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush); invalid_list 5517 arch/x86/kvm/mmu.c LIST_HEAD(invalid_list); invalid_list 5523 arch/x86/kvm/mmu.c if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list)) invalid_list 5528 arch/x86/kvm/mmu.c kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); invalid_list 6124 arch/x86/kvm/mmu.c LIST_HEAD(invalid_list); invalid_list 6132 arch/x86/kvm/mmu.c if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign)) invalid_list 6138 arch/x86/kvm/mmu.c kvm_mmu_commit_zap_page(kvm, &invalid_list); invalid_list 6178 arch/x86/kvm/mmu.c LIST_HEAD(invalid_list); invalid_list 6207 arch/x86/kvm/mmu.c if (prepare_zap_oldest_mmu_page(kvm, &invalid_list)) invalid_list 6209 arch/x86/kvm/mmu.c kvm_mmu_commit_zap_page(kvm, &invalid_list); invalid_list 6428 arch/x86/kvm/mmu.c LIST_HEAD(invalid_list); invalid_list 6446 arch/x86/kvm/mmu.c kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); invalid_list 6450 arch/x86/kvm/mmu.c kvm_mmu_commit_zap_page(kvm, &invalid_list);