nr_zapped 2818 arch/x86/kvm/mmu.c int *nr_zapped) nr_zapped 2824 arch/x86/kvm/mmu.c *nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list); nr_zapped 2829 arch/x86/kvm/mmu.c list_unstable = *nr_zapped; nr_zapped 2838 arch/x86/kvm/mmu.c (*nr_zapped)++; nr_zapped 2863 arch/x86/kvm/mmu.c int nr_zapped; nr_zapped 2865 arch/x86/kvm/mmu.c __kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped); nr_zapped 2866 arch/x86/kvm/mmu.c return nr_zapped; nr_zapped 5832 arch/x86/kvm/mmu.c int nr_zapped, batch = 0; nr_zapped 5866 arch/x86/kvm/mmu.c &kvm->arch.zapped_obsolete_pages, &nr_zapped)) { nr_zapped 5867 arch/x86/kvm/mmu.c batch += nr_zapped;