new_pmd 34 arch/arm/mm/pgd.c pmd_t *new_pmd, *init_pmd; new_pmd 61 arch/arm/mm/pgd.c new_pmd = pmd_alloc(mm, new_pud, 0); new_pmd 62 arch/arm/mm/pgd.c if (!new_pmd) new_pmd 76 arch/arm/mm/pgd.c new_pmd = pmd_alloc(mm, new_pud, 0); new_pmd 77 arch/arm/mm/pgd.c if (!new_pmd) new_pmd 80 arch/arm/mm/pgd.c new_pte = pte_alloc_map(mm, new_pmd, 0); new_pmd 90 arch/arm/mm/pgd.c pmd_val(*new_pmd) &= ~PMD_DOMAIN_MASK; new_pmd 91 arch/arm/mm/pgd.c pmd_val(*new_pmd) |= PMD_DOMAIN(DOMAIN_VECTORS); new_pmd 106 arch/arm/mm/pgd.c pmd_free(mm, new_pmd); new_pmd 972 arch/arm64/mm/mmu.c pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), mk_pmd_sect_prot(prot)); new_pmd 976 arch/arm64/mm/mmu.c pmd_val(new_pmd))) new_pmd 980 arch/arm64/mm/mmu.c set_pmd(pmdp, new_pmd); new_pmd 150 arch/mips/kvm/mmu.c pmd_t *new_pmd; new_pmd 154 arch/mips/kvm/mmu.c new_pmd = mmu_memory_cache_alloc(cache); new_pmd 155 arch/mips/kvm/mmu.c pmd_init((unsigned long)new_pmd, new_pmd 157 arch/mips/kvm/mmu.c pud_populate(NULL, pud, new_pmd); new_pmd 570 arch/powerpc/kvm/book3s_64_mmu_radix.c pmd_t *pmd, *new_pmd = NULL; new_pmd 586 arch/powerpc/kvm/book3s_64_mmu_radix.c new_pmd = kvmppc_pmd_alloc(); new_pmd 627 arch/powerpc/kvm/book3s_64_mmu_radix.c if (!new_pmd) { new_pmd 651 arch/powerpc/kvm/book3s_64_mmu_radix.c if (!new_pmd) new_pmd 653 arch/powerpc/kvm/book3s_64_mmu_radix.c pud_populate(kvm->mm, pud, new_pmd); new_pmd 654 arch/powerpc/kvm/book3s_64_mmu_radix.c new_pmd = NULL; new_pmd 731 arch/powerpc/kvm/book3s_64_mmu_radix.c if (new_pmd) new_pmd 732 arch/powerpc/kvm/book3s_64_mmu_radix.c kvmppc_pmd_free(new_pmd); new_pmd 28 arch/powerpc/mm/book3s64/hash_hugepage.c unsigned long old_pmd, new_pmd; new_pmd 49 arch/powerpc/mm/book3s64/hash_hugepage.c new_pmd = old_pmd | H_PAGE_BUSY | _PAGE_ACCESSED; new_pmd 51 arch/powerpc/mm/book3s64/hash_hugepage.c new_pmd |= _PAGE_DIRTY; new_pmd 52 arch/powerpc/mm/book3s64/hash_hugepage.c } while (!pmd_xchg(pmdp, __pmd(old_pmd), __pmd(new_pmd))); new_pmd 60 arch/powerpc/mm/book3s64/hash_hugepage.c rflags = htab_convert_pte_flags(new_pmd); new_pmd 134 arch/powerpc/mm/book3s64/hash_hugepage.c new_pmd |= H_PAGE_HASHPTE; new_pmd 182 arch/powerpc/mm/book3s64/hash_hugepage.c new_pmd |= H_PAGE_COMBO; new_pmd 189 arch/powerpc/mm/book3s64/hash_hugepage.c *pmdp = __pmd(new_pmd & ~H_PAGE_BUSY); new_pmd 1150 arch/powerpc/mm/book3s64/radix_pgtable.c pte_t new_pmd = pfn_pte(__phys_to_pfn(addr), prot); new_pmd 1155 arch/powerpc/mm/book3s64/radix_pgtable.c set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pmd); new_pmd 27 arch/unicore32/mm/pgd.c pmd_t *new_pmd, *init_pmd; new_pmd 50 arch/unicore32/mm/pgd.c new_pmd = pmd_alloc(mm, (pud_t *)new_pgd, 0); new_pmd 51 arch/unicore32/mm/pgd.c if (!new_pmd) new_pmd 54 arch/unicore32/mm/pgd.c new_pte = pte_alloc_map(mm, new_pmd, 0); new_pmd 68 arch/unicore32/mm/pgd.c pmd_free(mm, new_pmd); new_pmd 129 arch/x86/mm/kmmio.c pmd_t new_pmd; new_pmd 133 arch/x86/mm/kmmio.c new_pmd = pmd_mknotpresent(*pmd); new_pmd 136 arch/x86/mm/kmmio.c new_pmd = __pmd(*old); new_pmd 138 arch/x86/mm/kmmio.c set_pmd(pmd, new_pmd); new_pmd 46 include/linux/huge_mm.h pmd_t *old_pmd, pmd_t *new_pmd); new_pmd 1866 mm/huge_memory.c pmd_t *old_pmd, pmd_t *new_pmd) new_pmd 1882 mm/huge_memory.c if (WARN_ON(!pmd_none(*new_pmd))) { new_pmd 1883 mm/huge_memory.c VM_BUG_ON(pmd_trans_huge(*new_pmd)); new_pmd 1893 mm/huge_memory.c new_ptl = pmd_lockptr(mm, new_pmd); new_pmd 1899 mm/huge_memory.c VM_BUG_ON(!pmd_none(*new_pmd)); new_pmd 1904 mm/huge_memory.c pgtable_trans_huge_deposit(mm, new_pmd, pgtable); new_pmd 1907 mm/huge_memory.c set_pmd_at(mm, new_addr, new_pmd, pmd); new_pmd 117 mm/mremap.c struct vm_area_struct *new_vma, pmd_t *new_pmd, new_pmd 152 mm/mremap.c new_pte = pte_offset_map(new_pmd, new_addr); new_pmd 153 mm/mremap.c new_ptl = pte_lockptr(mm, new_pmd); new_pmd 197 mm/mremap.c pmd_t *old_pmd, pmd_t *new_pmd) new_pmd 211 mm/mremap.c if (WARN_ON(!pmd_none(*new_pmd))) new_pmd 219 mm/mremap.c new_ptl = pmd_lockptr(mm, new_pmd); new_pmd 227 mm/mremap.c VM_BUG_ON(!pmd_none(*new_pmd)); new_pmd 230 mm/mremap.c set_pmd_at(mm, new_addr, new_pmd, pmd); new_pmd 247 mm/mremap.c pmd_t *old_pmd, *new_pmd; new_pmd 266 mm/mremap.c new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); new_pmd 267 mm/mremap.c if (!new_pmd) new_pmd 276 mm/mremap.c old_end, old_pmd, new_pmd); new_pmd 296 mm/mremap.c old_end, old_pmd, new_pmd); new_pmd 304 mm/mremap.c if (pte_alloc(new_vma->vm_mm, new_pmd)) new_pmd 310 mm/mremap.c new_pmd, new_addr, need_rmap_locks); new_pmd 195 virt/kvm/arm/mmu.c static inline void kvm_set_pmd(pmd_t *pmdp, pmd_t new_pmd) new_pmd 197 virt/kvm/arm/mmu.c WRITE_ONCE(*pmdp, new_pmd); new_pmd 1056 virt/kvm/arm/mmu.c *cache, phys_addr_t addr, const pmd_t *new_pmd) new_pmd 1076 virt/kvm/arm/mmu.c if (pmd_val(old_pmd) == pmd_val(*new_pmd)) new_pmd 1108 virt/kvm/arm/mmu.c WARN_ON_ONCE(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd)); new_pmd 1115 virt/kvm/arm/mmu.c kvm_set_pmd(pmd, *new_pmd); new_pmd 1822 virt/kvm/arm/mmu.c pmd_t new_pmd = kvm_pfn_pmd(pfn, mem_type); new_pmd 1824 virt/kvm/arm/mmu.c new_pmd = kvm_pmd_mkhuge(new_pmd); new_pmd 1827 virt/kvm/arm/mmu.c new_pmd = kvm_s2pmd_mkwrite(new_pmd); new_pmd 1830 virt/kvm/arm/mmu.c new_pmd = kvm_s2pmd_mkexec(new_pmd); new_pmd 1832 virt/kvm/arm/mmu.c ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);