rmap_head 1371 arch/x86/kvm/mmu.c struct kvm_rmap_head *rmap_head) rmap_head 1376 arch/x86/kvm/mmu.c if (!rmap_head->val) { rmap_head 1378 arch/x86/kvm/mmu.c rmap_head->val = (unsigned long)spte; rmap_head 1379 arch/x86/kvm/mmu.c } else if (!(rmap_head->val & 1)) { rmap_head 1382 arch/x86/kvm/mmu.c desc->sptes[0] = (u64 *)rmap_head->val; rmap_head 1384 arch/x86/kvm/mmu.c rmap_head->val = (unsigned long)desc | 1; rmap_head 1388 arch/x86/kvm/mmu.c desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); rmap_head 1405 arch/x86/kvm/mmu.c pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head, rmap_head 1418 arch/x86/kvm/mmu.c rmap_head->val = (unsigned long)desc->sptes[0]; rmap_head 1423 arch/x86/kvm/mmu.c rmap_head->val = (unsigned long)desc->more | 1; rmap_head 1427 arch/x86/kvm/mmu.c static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head) rmap_head 1433 arch/x86/kvm/mmu.c if (!rmap_head->val) { rmap_head 1436 arch/x86/kvm/mmu.c } else if (!(rmap_head->val & 1)) { rmap_head 1438 arch/x86/kvm/mmu.c if ((u64 *)rmap_head->val != spte) { rmap_head 1442 arch/x86/kvm/mmu.c rmap_head->val = 0; rmap_head 1445 arch/x86/kvm/mmu.c desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); rmap_head 1450 arch/x86/kvm/mmu.c pte_list_desc_remove_entry(rmap_head, rmap_head 1463 arch/x86/kvm/mmu.c static void pte_list_remove(struct kvm_rmap_head *rmap_head, u64 *sptep) rmap_head 1466 arch/x86/kvm/mmu.c __pte_list_remove(sptep, rmap_head); rmap_head 1500 arch/x86/kvm/mmu.c struct kvm_rmap_head *rmap_head; rmap_head 1504 arch/x86/kvm/mmu.c rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp); rmap_head 1505 arch/x86/kvm/mmu.c return pte_list_add(vcpu, spte, rmap_head); rmap_head 1512 arch/x86/kvm/mmu.c struct kvm_rmap_head *rmap_head; rmap_head 1516 arch/x86/kvm/mmu.c rmap_head = gfn_to_rmap(kvm, gfn, sp); rmap_head 1517 arch/x86/kvm/mmu.c __pte_list_remove(spte, rmap_head); rmap_head 1537 arch/x86/kvm/mmu.c static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head, rmap_head 1542 arch/x86/kvm/mmu.c if (!rmap_head->val) rmap_head 1545 arch/x86/kvm/mmu.c if (!(rmap_head->val & 1)) { rmap_head 1547 arch/x86/kvm/mmu.c sptep = (u64 *)rmap_head->val; rmap_head 1551 arch/x86/kvm/mmu.c iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); rmap_head 1657 arch/x86/kvm/mmu.c struct kvm_rmap_head *rmap_head, rmap_head 1664 arch/x86/kvm/mmu.c for_each_rmap_spte(rmap_head, &iter, sptep) rmap_head 1697 arch/x86/kvm/mmu.c static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head) rmap_head 1703 arch/x86/kvm/mmu.c for_each_rmap_spte(rmap_head, &iter, sptep) rmap_head 1728 arch/x86/kvm/mmu.c static bool __rmap_set_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head) rmap_head 1734 arch/x86/kvm/mmu.c for_each_rmap_spte(rmap_head, &iter, sptep) rmap_head 1755 arch/x86/kvm/mmu.c struct kvm_rmap_head *rmap_head; rmap_head 1758 arch/x86/kvm/mmu.c rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), rmap_head 1760 arch/x86/kvm/mmu.c __rmap_write_protect(kvm, rmap_head, false); rmap_head 1781 arch/x86/kvm/mmu.c struct kvm_rmap_head *rmap_head; rmap_head 1784 arch/x86/kvm/mmu.c rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), rmap_head 1786 arch/x86/kvm/mmu.c __rmap_clear_dirty(kvm, rmap_head); rmap_head 1833 arch/x86/kvm/mmu.c struct kvm_rmap_head *rmap_head; rmap_head 1838 arch/x86/kvm/mmu.c rmap_head = __gfn_to_rmap(gfn, i, slot); rmap_head 1839 arch/x86/kvm/mmu.c write_protected |= __rmap_write_protect(kvm, rmap_head, true); rmap_head 1853 arch/x86/kvm/mmu.c static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head) rmap_head 1859 arch/x86/kvm/mmu.c while ((sptep = rmap_get_first(rmap_head, &iter))) { rmap_head 1862 arch/x86/kvm/mmu.c pte_list_remove(rmap_head, sptep); rmap_head 1869 arch/x86/kvm/mmu.c static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, rmap_head 1873 arch/x86/kvm/mmu.c return kvm_zap_rmapp(kvm, rmap_head); rmap_head 1876 arch/x86/kvm/mmu.c static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, rmap_head 1891 arch/x86/kvm/mmu.c for_each_rmap_spte(rmap_head, &iter, sptep) { rmap_head 1898 arch/x86/kvm/mmu.c pte_list_remove(rmap_head, sptep); rmap_head 1995 arch/x86/kvm/mmu.c struct kvm_rmap_head *rmap_head, rmap_head 2040 arch/x86/kvm/mmu.c struct kvm_rmap_head *rmap_head, rmap_head 2058 arch/x86/kvm/mmu.c static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, rmap_head 2066 arch/x86/kvm/mmu.c for_each_rmap_spte(rmap_head, &iter, sptep) rmap_head 2073 arch/x86/kvm/mmu.c static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, rmap_head 2080 arch/x86/kvm/mmu.c for_each_rmap_spte(rmap_head, &iter, sptep) rmap_head 2090 arch/x86/kvm/mmu.c struct kvm_rmap_head *rmap_head; rmap_head 2095 arch/x86/kvm/mmu.c rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp); rmap_head 2097 arch/x86/kvm/mmu.c kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, 0); rmap_head 5687 arch/x86/kvm/mmu.c typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head); rmap_head 5974 arch/x86/kvm/mmu.c struct kvm_rmap_head *rmap_head) rmap_head 5976 arch/x86/kvm/mmu.c return __rmap_write_protect(kvm, rmap_head, false); rmap_head 6013 arch/x86/kvm/mmu.c struct kvm_rmap_head *rmap_head) rmap_head 6022 arch/x86/kvm/mmu.c for_each_rmap_spte(rmap_head, &iter, sptep) { rmap_head 6036 arch/x86/kvm/mmu.c pte_list_remove(rmap_head, sptep); rmap_head 129 arch/x86/kvm/mmu_audit.c struct kvm_rmap_head *rmap_head; rmap_head 150 arch/x86/kvm/mmu_audit.c rmap_head = __gfn_to_rmap(gfn, rev_sp->role.level, slot); rmap_head 151 arch/x86/kvm/mmu_audit.c if (!rmap_head->val) { rmap_head 192 arch/x86/kvm/mmu_audit.c struct kvm_rmap_head *rmap_head; rmap_head 203 arch/x86/kvm/mmu_audit.c rmap_head = __gfn_to_rmap(sp->gfn, PT_PAGE_TABLE_LEVEL, slot); rmap_head 205 arch/x86/kvm/mmu_audit.c for_each_rmap_spte(rmap_head, &iter, sptep) {