old_pmd           208 arch/arm64/mm/mmu.c 		pmd_t old_pmd = READ_ONCE(*pmdp);
old_pmd           221 arch/arm64/mm/mmu.c 			BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd),
old_pmd           227 arch/arm64/mm/mmu.c 			BUG_ON(pmd_val(old_pmd) != 0 &&
old_pmd           228 arch/arm64/mm/mmu.c 			       pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp)));
old_pmd           236 arch/powerpc/include/asm/book3s/64/hash.h 				   pmd_t *pmdp, unsigned long old_pmd);
old_pmd           240 arch/powerpc/include/asm/book3s/64/hash.h 					  unsigned long old_pmd)
old_pmd            28 arch/powerpc/mm/book3s64/hash_hugepage.c 	unsigned long old_pmd, new_pmd;
old_pmd            38 arch/powerpc/mm/book3s64/hash_hugepage.c 		old_pmd = pmd_val(pmd);
old_pmd            40 arch/powerpc/mm/book3s64/hash_hugepage.c 		if (unlikely(old_pmd & H_PAGE_BUSY))
old_pmd            43 arch/powerpc/mm/book3s64/hash_hugepage.c 		if (unlikely(!check_pte_access(access, old_pmd)))
old_pmd            49 arch/powerpc/mm/book3s64/hash_hugepage.c 		new_pmd = old_pmd | H_PAGE_BUSY | _PAGE_ACCESSED;
old_pmd            52 arch/powerpc/mm/book3s64/hash_hugepage.c 	} while (!pmd_xchg(pmdp, __pmd(old_pmd), __pmd(new_pmd)));
old_pmd            57 arch/powerpc/mm/book3s64/hash_hugepage.c 	if (!(old_pmd & (H_PAGE_THP_HUGE | _PAGE_DEVMAP)))
old_pmd            87 arch/powerpc/mm/book3s64/hash_hugepage.c 		if ((old_pmd & H_PAGE_HASHPTE) && !(old_pmd & H_PAGE_COMBO)) {
old_pmd           133 arch/powerpc/mm/book3s64/hash_hugepage.c 		pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
old_pmd           165 arch/powerpc/mm/book3s64/hash_hugepage.c 			*pmdp = __pmd(old_pmd);
old_pmd           167 arch/powerpc/mm/book3s64/hash_hugepage.c 					   psize, lpsize, old_pmd);
old_pmd           312 arch/powerpc/mm/book3s64/hash_pgtable.c 			    pmd_t *pmdp, unsigned long old_pmd)
old_pmd           324 arch/powerpc/mm/book3s64/hash_pgtable.c 	if (old_pmd & H_PAGE_COMBO)
old_pmd           347 arch/powerpc/mm/book3s64/hash_pgtable.c 	pmd_t old_pmd;
old_pmd           353 arch/powerpc/mm/book3s64/hash_pgtable.c 	old_pmd = __pmd(old);
old_pmd           377 arch/powerpc/mm/book3s64/hash_pgtable.c 	return old_pmd;
old_pmd           108 arch/powerpc/mm/book3s64/pgtable.c 	unsigned long old_pmd;
old_pmd           110 arch/powerpc/mm/book3s64/pgtable.c 	old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID);
old_pmd           120 arch/powerpc/mm/book3s64/pgtable.c 	return __pmd(old_pmd);
old_pmd          1011 arch/powerpc/mm/book3s64/radix_pgtable.c 	pmd_t old_pmd;
old_pmd          1015 arch/powerpc/mm/book3s64/radix_pgtable.c 	old_pmd = __pmd(old);
old_pmd          1027 arch/powerpc/mm/book3s64/radix_pgtable.c 	return old_pmd;
old_pmd           258 include/asm-generic/pgtable.h 	pmd_t old_pmd = *pmdp;
old_pmd           259 include/asm-generic/pgtable.h 	set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
old_pmd           321 include/asm-generic/pgtable.h 	pmd_t old_pmd = *pmdp;
old_pmd           323 include/asm-generic/pgtable.h 	return old_pmd;
old_pmd            46 include/linux/huge_mm.h 			 pmd_t *old_pmd, pmd_t *new_pmd);
old_pmd          1866 mm/huge_memory.c 		  pmd_t *old_pmd, pmd_t *new_pmd)
old_pmd          1891 mm/huge_memory.c 	old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
old_pmd          1896 mm/huge_memory.c 		pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
old_pmd          1903 mm/huge_memory.c 			pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
old_pmd          2145 mm/huge_memory.c 	pmd_t old_pmd, _pmd;
old_pmd          2210 mm/huge_memory.c 	old_pmd = pmdp_invalidate(vma, haddr, pmd);
old_pmd          2212 mm/huge_memory.c 	pmd_migration = is_pmd_migration_entry(old_pmd);
old_pmd          2216 mm/huge_memory.c 		entry = pmd_to_swp_entry(old_pmd);
old_pmd          2220 mm/huge_memory.c 		soft_dirty = pmd_swp_soft_dirty(old_pmd);
old_pmd          2222 mm/huge_memory.c 		page = pmd_page(old_pmd);
old_pmd          2223 mm/huge_memory.c 		if (pmd_dirty(old_pmd))
old_pmd          2225 mm/huge_memory.c 		write = pmd_write(old_pmd);
old_pmd          2226 mm/huge_memory.c 		young = pmd_young(old_pmd);
old_pmd          2227 mm/huge_memory.c 		soft_dirty = pmd_soft_dirty(old_pmd);
old_pmd           115 mm/mremap.c    static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
old_pmd           151 mm/mremap.c    	old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
old_pmd           197 mm/mremap.c    		  pmd_t *old_pmd, pmd_t *new_pmd)
old_pmd           218 mm/mremap.c    	old_ptl = pmd_lock(vma->vm_mm, old_pmd);
old_pmd           224 mm/mremap.c    	pmd = *old_pmd;
old_pmd           225 mm/mremap.c    	pmd_clear(old_pmd);
old_pmd           247 mm/mremap.c    	pmd_t *old_pmd, *new_pmd;
old_pmd           263 mm/mremap.c    		old_pmd = get_old_pmd(vma->vm_mm, old_addr);
old_pmd           264 mm/mremap.c    		if (!old_pmd)
old_pmd           269 mm/mremap.c    		if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) || pmd_devmap(*old_pmd)) {
old_pmd           276 mm/mremap.c    						    old_end, old_pmd, new_pmd);
old_pmd           282 mm/mremap.c    			split_huge_pmd(vma, old_pmd, old_addr);
old_pmd           283 mm/mremap.c    			if (pmd_trans_unstable(old_pmd))
old_pmd           296 mm/mremap.c    					old_end, old_pmd, new_pmd);
old_pmd           309 mm/mremap.c    		move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
old_pmd           279 virt/kvm/arm/mmu.c 				pmd_t old_pmd = *pmd;
old_pmd           284 virt/kvm/arm/mmu.c 				kvm_flush_dcache_pmd(old_pmd);
old_pmd          1058 virt/kvm/arm/mmu.c 	pmd_t *pmd, old_pmd;
old_pmd          1064 virt/kvm/arm/mmu.c 	old_pmd = *pmd;
old_pmd          1076 virt/kvm/arm/mmu.c 	if (pmd_val(old_pmd) == pmd_val(*new_pmd))
old_pmd          1079 virt/kvm/arm/mmu.c 	if (pmd_present(old_pmd)) {
old_pmd          1093 virt/kvm/arm/mmu.c 		if (!pmd_thp_or_huge(old_pmd)) {
old_pmd          1108 virt/kvm/arm/mmu.c 		WARN_ON_ONCE(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd));