/linux-4.1.27/arch/s390/include/asm/ |
D | pgtable.h | 42 #define update_mmu_cache(vma, address, ptep) do { } while (0) argument 43 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0) argument 579 static inline pgste_t pgste_get_lock(pte_t *ptep) in pgste_get_lock() argument 593 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE]) in pgste_get_lock() 594 : "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory"); in pgste_get_lock() 599 static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste) in pgste_set_unlock() argument 605 : "=Q" (ptep[PTRS_PER_PTE]) in pgste_set_unlock() 606 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) in pgste_set_unlock() 612 static inline pgste_t pgste_get(pte_t *ptep) in pgste_get() argument 616 pgste = *(unsigned long *)(ptep + PTRS_PER_PTE); in pgste_get() [all …]
|
D | hugetlb.h | 20 pte_t *ptep, pte_t pte); 21 pte_t huge_ptep_get(pte_t *ptep); 23 unsigned long addr, pte_t *ptep); 46 pte_t *ptep) in huge_pte_clear() argument 48 pte_val(*ptep) = _SEGMENT_ENTRY_EMPTY; in huge_pte_clear() 52 unsigned long address, pte_t *ptep) in huge_ptep_clear_flush() argument 54 huge_ptep_get_and_clear(vma->vm_mm, address, ptep); in huge_ptep_clear_flush() 58 unsigned long addr, pte_t *ptep, in huge_ptep_set_access_flags() argument 61 int changed = !pte_same(huge_ptep_get(ptep), pte); in huge_ptep_set_access_flags() 63 huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); in huge_ptep_set_access_flags() [all …]
|
D | tlb.h | 144 #define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0) argument
|
/linux-4.1.27/arch/arm/include/asm/ |
D | hugetlb-3level.h | 32 static inline pte_t huge_ptep_get(pte_t *ptep) in huge_ptep_get() argument 34 pte_t retval = *ptep; in huge_ptep_get() 41 pte_t *ptep, pte_t pte) in set_huge_pte_at() argument 43 set_pte_at(mm, addr, ptep, pte); in set_huge_pte_at() 47 unsigned long addr, pte_t *ptep) in huge_ptep_clear_flush() argument 49 ptep_clear_flush(vma, addr, ptep); in huge_ptep_clear_flush() 53 unsigned long addr, pte_t *ptep) in huge_ptep_set_wrprotect() argument 55 ptep_set_wrprotect(mm, addr, ptep); in huge_ptep_set_wrprotect() 59 unsigned long addr, pte_t *ptep) in huge_ptep_get_and_clear() argument 61 return ptep_get_and_clear(mm, addr, ptep); in huge_ptep_get_and_clear() [all …]
|
D | proc-fns.h | 69 void (*set_pte_ext)(pte_t *ptep, pte_t pte); 71 void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext); 87 extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte); 89 extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
|
D | pgalloc.h | 149 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) in pmd_populate_kernel() argument 154 __pmd_populate(pmdp, __pa(ptep), _PAGE_KERNEL_TABLE); in pmd_populate_kernel() 158 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep) in pmd_populate() argument 168 __pmd_populate(pmdp, page_to_phys(ptep), prot); in pmd_populate()
|
D | pgtable-2level.h | 185 #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) argument
|
D | tlb.h | 184 tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr) in tlb_remove_tlb_entry() argument 260 #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr) argument
|
D | pgtable.h | 215 #define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) argument 242 pte_t *ptep, pte_t pteval) in set_pte_at() argument 252 set_pte_ext(ptep, pteval, ext); in set_pte_at()
|
D | kvm_mmu.h | 160 #define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep) argument
|
D | pgtable-3level.h | 206 #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,__pte(pte_val(pte)|(ext))) argument
|
D | tlbflush.h | 632 pte_t *ptep); 635 unsigned long addr, pte_t *ptep) in update_mmu_cache() argument
|
/linux-4.1.27/arch/x86/include/asm/ |
D | hugetlb.h | 42 pte_t *ptep, pte_t pte) in set_huge_pte_at() argument 44 set_pte_at(mm, addr, ptep, pte); in set_huge_pte_at() 48 unsigned long addr, pte_t *ptep) in huge_ptep_get_and_clear() argument 50 return ptep_get_and_clear(mm, addr, ptep); in huge_ptep_get_and_clear() 54 unsigned long addr, pte_t *ptep) in huge_ptep_clear_flush() argument 56 ptep_clear_flush(vma, addr, ptep); in huge_ptep_clear_flush() 70 unsigned long addr, pte_t *ptep) in huge_ptep_set_wrprotect() argument 72 ptep_set_wrprotect(mm, addr, ptep); in huge_ptep_set_wrprotect() 76 unsigned long addr, pte_t *ptep, in huge_ptep_set_access_flags() argument 79 return ptep_set_access_flags(vma, addr, ptep, pte, dirty); in huge_ptep_set_access_flags() [all …]
|
D | pgtable-3level.h | 27 static inline void native_set_pte(pte_t *ptep, pte_t pte) in native_set_pte() argument 29 ptep->pte_high = pte.pte_high; in native_set_pte() 31 ptep->pte_low = pte.pte_low; in native_set_pte() 88 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) in native_set_pte_atomic() argument 90 set_64bit((unsigned long long *)(ptep), native_pte_val(pte)); in native_set_pte_atomic() 109 pte_t *ptep) in native_pte_clear() argument 111 ptep->pte_low = 0; in native_pte_clear() 113 ptep->pte_high = 0; in native_pte_clear() 141 static inline pte_t native_ptep_get_and_clear(pte_t *ptep) in native_ptep_get_and_clear() argument 146 res.pte_low = xchg(&ptep->pte_low, 0); in native_ptep_get_and_clear() [all …]
|
D | pgtable.h | 39 #define set_pte(ptep, pte) native_set_pte(ptep, pte) argument 40 #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte) argument 43 #define set_pte_atomic(ptep, pte) \ argument 44 native_set_pte_atomic(ptep, pte) 61 #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep) argument 64 #define pte_update(mm, addr, ptep) do { } while (0) argument 65 #define pte_update_defer(mm, addr, ptep) do { } while (0) argument 66 #define pmd_update(mm, addr, ptep) do { } while (0) argument 67 #define pmd_update_defer(mm, addr, ptep) do { } while (0) argument 670 static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) in native_local_ptep_get_and_clear() argument [all …]
|
D | paravirt.h | 404 pte_t *ptep) in pte_update() argument 406 PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep); in pte_update() 415 pte_t *ptep) in pte_update_defer() argument 417 PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep); in pte_update_defer() 486 pte_t *ptep) in ptep_modify_prot_start() argument 491 mm, addr, ptep); in ptep_modify_prot_start() 497 pte_t *ptep, pte_t pte) in ptep_modify_prot_commit() argument 501 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte); in ptep_modify_prot_commit() 504 mm, addr, ptep, pte.pte); in ptep_modify_prot_commit() 507 static inline void set_pte(pte_t *ptep, pte_t pte) in set_pte() argument [all …]
|
D | pgtable_64.h | 48 pte_t *ptep) in native_pte_clear() argument 50 *ptep = native_make_pte(0); in native_pte_clear() 53 static inline void native_set_pte(pte_t *ptep, pte_t pte) in native_set_pte() argument 55 *ptep = pte; in native_set_pte() 58 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) in native_set_pte_atomic() argument 60 native_set_pte(ptep, pte); in native_set_pte_atomic()
|
D | pgtable-2level.h | 14 static inline void native_set_pte(pte_t *ptep , pte_t pte) in native_set_pte() argument 16 *ptep = pte; in native_set_pte() 24 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) in native_set_pte_atomic() argument 26 native_set_pte(ptep, pte); in native_set_pte_atomic()
|
D | pgtable_32.h | 60 #define kpte_clear_flush(ptep, vaddr) \ argument 62 pte_clear(&init_mm, (vaddr), (ptep)); \
|
D | paravirt_types.h | 271 void (*set_pte)(pte_t *ptep, pte_t pteval); 273 pte_t *ptep, pte_t pteval); 278 pte_t *ptep); 280 unsigned long addr, pte_t *ptep); 287 pte_t *ptep); 289 pte_t *ptep, pte_t pte); 299 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); 301 pte_t *ptep);
|
D | tlb.h | 6 #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) argument
|
D | io.h | 324 extern bool is_early_ioremap_ptep(pte_t *ptep);
|
/linux-4.1.27/arch/mips/include/asm/ |
D | hugetlb.h | 55 pte_t *ptep, pte_t pte) in set_huge_pte_at() argument 57 set_pte_at(mm, addr, ptep, pte); in set_huge_pte_at() 61 unsigned long addr, pte_t *ptep) in huge_ptep_get_and_clear() argument 64 pte_t pte = *ptep; in huge_ptep_get_and_clear() 67 set_pte_at(mm, addr, ptep, clear); in huge_ptep_get_and_clear() 72 unsigned long addr, pte_t *ptep) in huge_ptep_clear_flush() argument 89 unsigned long addr, pte_t *ptep) in huge_ptep_set_wrprotect() argument 91 ptep_set_wrprotect(mm, addr, ptep); in huge_ptep_set_wrprotect() 96 pte_t *ptep, pte_t pte, in huge_ptep_set_access_flags() argument 99 int changed = !pte_same(*ptep, pte); in huge_ptep_set_access_flags() [all …]
|
D | pgtable.h | 131 pte_t *ptep, pte_t pteval); 139 static inline void set_pte(pte_t *ptep, pte_t pte) in set_pte() argument 141 ptep->pte_high = pte.pte_high; in set_pte() 143 ptep->pte_low = pte.pte_low; in set_pte() 146 pte_t *buddy = ptep_buddy(ptep); in set_pte() 156 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) in pte_clear() argument 162 if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL) in pte_clear() 165 set_pte_at(mm, addr, ptep, null); in pte_clear() 179 static inline void set_pte(pte_t *ptep, pte_t pteval) in set_pte() argument 181 *ptep = pteval; in set_pte() [all …]
|
D | tlb.h | 14 #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) argument
|
/linux-4.1.27/arch/arm64/include/asm/ |
D | hugetlb.h | 28 static inline pte_t huge_ptep_get(pte_t *ptep) in huge_ptep_get() argument 30 return *ptep; in huge_ptep_get() 34 pte_t *ptep, pte_t pte) in set_huge_pte_at() argument 36 set_pte_at(mm, addr, ptep, pte); in set_huge_pte_at() 40 unsigned long addr, pte_t *ptep) in huge_ptep_clear_flush() argument 42 ptep_clear_flush(vma, addr, ptep); in huge_ptep_clear_flush() 46 unsigned long addr, pte_t *ptep) in huge_ptep_set_wrprotect() argument 48 ptep_set_wrprotect(mm, addr, ptep); in huge_ptep_set_wrprotect() 52 unsigned long addr, pte_t *ptep) in huge_ptep_get_and_clear() argument 54 return ptep_get_and_clear(mm, addr, ptep); in huge_ptep_get_and_clear() [all …]
|
D | pgalloc.h | 121 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) in pmd_populate_kernel() argument 126 __pmd_populate(pmdp, __pa(ptep), PMD_TYPE_TABLE); in pmd_populate_kernel() 130 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep) in pmd_populate() argument 132 __pmd_populate(pmdp, page_to_phys(ptep), PMD_TYPE_TABLE); in pmd_populate()
|
D | kvm_mmu.h | 104 #define kvm_set_pte(ptep, pte) set_pte(ptep, pte) 207 #define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
|
D | pgtable.h | 125 #define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0)) argument 200 static inline void set_pte(pte_t *ptep, pte_t pte) in set_pte() argument 202 *ptep = pte; in set_pte() 217 pte_t *ptep, pte_t pte) in set_pte_at() argument 228 set_pte(ptep, pte); in set_pte_at()
|
D | tlbflush.h | 163 unsigned long addr, pte_t *ptep) in update_mmu_cache() argument
|
/linux-4.1.27/arch/metag/include/asm/ |
D | hugetlb.h | 30 pte_t *ptep, pte_t pte) in set_huge_pte_at() argument 32 set_pte_at(mm, addr, ptep, pte); in set_huge_pte_at() 36 unsigned long addr, pte_t *ptep) in huge_ptep_get_and_clear() argument 38 return ptep_get_and_clear(mm, addr, ptep); in huge_ptep_get_and_clear() 42 unsigned long addr, pte_t *ptep) in huge_ptep_clear_flush() argument 57 unsigned long addr, pte_t *ptep) in huge_ptep_set_wrprotect() argument 59 ptep_set_wrprotect(mm, addr, ptep); in huge_ptep_set_wrprotect() 63 unsigned long addr, pte_t *ptep, in huge_ptep_set_access_flags() argument 66 return ptep_set_access_flags(vma, addr, ptep, pte, dirty); in huge_ptep_set_access_flags() 69 static inline pte_t huge_ptep_get(pte_t *ptep) in huge_ptep_get() argument [all …]
|
D | pgtable.h | 100 #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) argument
|
/linux-4.1.27/arch/ia64/include/asm/ |
D | hugetlb.h | 28 pte_t *ptep, pte_t pte) in set_huge_pte_at() argument 30 set_pte_at(mm, addr, ptep, pte); in set_huge_pte_at() 34 unsigned long addr, pte_t *ptep) in huge_ptep_get_and_clear() argument 36 return ptep_get_and_clear(mm, addr, ptep); in huge_ptep_get_and_clear() 40 unsigned long addr, pte_t *ptep) in huge_ptep_clear_flush() argument 55 unsigned long addr, pte_t *ptep) in huge_ptep_set_wrprotect() argument 57 ptep_set_wrprotect(mm, addr, ptep); in huge_ptep_set_wrprotect() 61 unsigned long addr, pte_t *ptep, in huge_ptep_set_access_flags() argument 64 return ptep_set_access_flags(vma, addr, ptep, pte, dirty); in huge_ptep_set_access_flags() 67 static inline pte_t huge_ptep_get(pte_t *ptep) in huge_ptep_get() argument [all …]
|
D | pgtable.h | 329 static inline void set_pte(pte_t *ptep, pte_t pteval) in set_pte() argument 336 (!pte_present(*ptep) || in set_pte() 337 pte_pfn(*ptep) != pte_pfn(pteval))) in set_pte() 340 *ptep = pteval; in set_pte() 343 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) argument 409 ptep_test_and_clear_young (struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) in ptep_test_and_clear_young() argument 412 if (!pte_young(*ptep)) in ptep_test_and_clear_young() 414 return test_and_clear_bit(_PAGE_A_BIT, ptep); in ptep_test_and_clear_young() 416 pte_t pte = *ptep; in ptep_test_and_clear_young() 419 set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); in ptep_test_and_clear_young() [all …]
|
D | tlb.h | 247 __tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long address) in __tlb_remove_tlb_entry() argument 259 #define tlb_remove_tlb_entry(tlb, ptep, addr) \ argument 262 __tlb_remove_tlb_entry(tlb, ptep, addr); \ 265 #define pte_free_tlb(tlb, ptep, address) \ argument 268 __pte_free_tlb(tlb, ptep, address); \ 271 #define pmd_free_tlb(tlb, ptep, address) \ argument 274 __pmd_free_tlb(tlb, ptep, address); \
|
/linux-4.1.27/arch/sparc/include/asm/ |
D | hugetlb.h | 9 pte_t *ptep, pte_t pte); 12 pte_t *ptep); 47 unsigned long addr, pte_t *ptep) in huge_ptep_clear_flush() argument 62 unsigned long addr, pte_t *ptep) in huge_ptep_set_wrprotect() argument 64 pte_t old_pte = *ptep; in huge_ptep_set_wrprotect() 65 set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); in huge_ptep_set_wrprotect() 69 unsigned long addr, pte_t *ptep, in huge_ptep_set_access_flags() argument 72 int changed = !pte_same(*ptep, pte); in huge_ptep_set_access_flags() 74 set_huge_pte_at(vma->vm_mm, addr, ptep, pte); in huge_ptep_set_access_flags() 80 static inline pte_t huge_ptep_get(pte_t *ptep) in huge_ptep_get() argument [all …]
|
D | pgtable_32.h | 115 static inline void set_pte(pte_t *ptep, pte_t pteval) in set_pte() argument 117 srmmu_swap((unsigned long *)ptep, pte_val(pteval)); in set_pte() 120 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) argument 154 static inline void __pte_clear(pte_t *ptep) in __pte_clear() argument 156 set_pte(ptep, __pte(0)); in __pte_clear() 159 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) in pte_clear() argument 161 __pte_clear(ptep); in pte_clear() 344 #define update_mmu_cache(vma, address, ptep) do { } while (0) argument
|
D | pgtable_64.h | 789 static inline void pmd_set(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) in pmd_set() argument 791 unsigned long val = __pa((unsigned long) (ptep)); in pmd_set() 866 pte_t *ptep, pte_t orig, int fullmm); 879 pte_t *ptep, pte_t pte, int fullmm) in __set_pte_at() argument 881 pte_t orig = *ptep; in __set_pte_at() 883 *ptep = pte; in __set_pte_at() 892 tlb_batch_add(mm, addr, ptep, orig, fullmm); in __set_pte_at() 895 #define set_pte_at(mm,addr,ptep,pte) \ argument 896 __set_pte_at((mm), (addr), (ptep), (pte), 0) 898 #define pte_clear(mm,addr,ptep) \ argument [all …]
|
D | pgalloc_32.h | 54 void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep); 57 void pmd_set(pmd_t *pmdp, pte_t *ptep);
|
D | tlb_64.h | 27 #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) argument
|
/linux-4.1.27/arch/sh/include/asm/ |
D | hugetlb.h | 41 pte_t *ptep, pte_t pte) in set_huge_pte_at() argument 43 set_pte_at(mm, addr, ptep, pte); in set_huge_pte_at() 47 unsigned long addr, pte_t *ptep) in huge_ptep_get_and_clear() argument 49 return ptep_get_and_clear(mm, addr, ptep); in huge_ptep_get_and_clear() 53 unsigned long addr, pte_t *ptep) in huge_ptep_clear_flush() argument 68 unsigned long addr, pte_t *ptep) in huge_ptep_set_wrprotect() argument 70 ptep_set_wrprotect(mm, addr, ptep); in huge_ptep_set_wrprotect() 74 unsigned long addr, pte_t *ptep, in huge_ptep_set_access_flags() argument 77 return ptep_set_access_flags(vma, addr, ptep, pte, dirty); in huge_ptep_set_access_flags() 80 static inline pte_t huge_ptep_get(pte_t *ptep) in huge_ptep_get() argument [all …]
|
D | tlb.h | 60 tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address) in tlb_remove_tlb_entry() argument 112 #define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep) argument
|
D | pgtable.h | 143 update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) in update_mmu_cache() argument 145 pte_t pte = *ptep; in update_mmu_cache()
|
D | pgtable_32.h | 299 static inline void set_pte(pte_t *ptep, pte_t pte) in set_pte() argument 301 ptep->pte_high = pte.pte_high; in set_pte() 303 ptep->pte_low = pte.pte_low; in set_pte() 309 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) argument
|
D | pgtable_64.h | 44 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) argument
|
/linux-4.1.27/arch/s390/mm/ |
D | hugetlbpage.c | 87 pte_t *ptep, pte_t pte) in set_huge_pte_at() argument 98 *(pmd_t *) ptep = pmd; in set_huge_pte_at() 101 pte_t huge_ptep_get(pte_t *ptep) in huge_ptep_get() argument 106 pmd = *(pmd_t *) ptep; in huge_ptep_get() 118 unsigned long addr, pte_t *ptep) in huge_ptep_get_and_clear() argument 120 pmd_t *pmdp = (pmd_t *) ptep; in huge_ptep_get_and_clear() 121 pte_t pte = huge_ptep_get(ptep); in huge_ptep_get_and_clear() 132 pte_t *ptep; in arch_prepare_hugepage() local 138 ptep = (pte_t *) pte_alloc_one(&init_mm, addr); in arch_prepare_hugepage() 139 if (!ptep) in arch_prepare_hugepage() [all …]
|
D | pageattr.c | 48 pte_t *ptep; in walk_page_table() local 59 ptep = pte_offset_kernel(pmdp, addr); in walk_page_table() 60 if (pte_none(*ptep)) in walk_page_table() 62 return ptep; in walk_page_table() 68 pte_t *ptep, pte; in change_page_attr() local 72 ptep = walk_page_table(addr); in change_page_attr() 73 if (WARN_ON_ONCE(!ptep)) in change_page_attr() 75 pte = *ptep; in change_page_attr() 77 __ptep_ipte(addr, ptep); in change_page_attr() 78 *ptep = pte; in change_page_attr()
|
D | pgtable.c | 627 pte_t *ptep, pte; in __gmap_zap() local 638 ptep = get_locked_pte(gmap->mm, vmaddr, &ptl); in __gmap_zap() 639 if (unlikely(!ptep)) in __gmap_zap() 641 pte = *ptep; in __gmap_zap() 645 pgste = pgste_get_lock(ptep); in __gmap_zap() 651 pte_clear(gmap->mm, vmaddr, ptep); in __gmap_zap() 653 pgste_set_unlock(ptep, pgste); in __gmap_zap() 655 pte_unmap_unlock(ptep, ptl); in __gmap_zap() 725 pte_t *ptep, entry; in gmap_ipte_notify() local 748 ptep = get_locked_pte(gmap->mm, addr, &ptl); in gmap_ipte_notify() [all …]
|
D | gup.c | 24 pte_t *ptep, pte; in gup_pte_range() local 29 ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr); in gup_pte_range() 31 pte = *ptep; in gup_pte_range() 39 if (unlikely(pte_val(pte) != pte_val(*ptep))) { in gup_pte_range() 46 } while (ptep++, addr += PAGE_SIZE, addr != end); in gup_pte_range()
|
/linux-4.1.27/arch/tile/include/asm/ |
D | hugetlb.h | 56 pte_t *ptep, pte_t pte) in set_huge_pte_at() argument 58 set_pte(ptep, pte); in set_huge_pte_at() 62 unsigned long addr, pte_t *ptep) in huge_ptep_get_and_clear() argument 64 return ptep_get_and_clear(mm, addr, ptep); in huge_ptep_get_and_clear() 68 unsigned long addr, pte_t *ptep) in huge_ptep_clear_flush() argument 70 ptep_clear_flush(vma, addr, ptep); in huge_ptep_clear_flush() 84 unsigned long addr, pte_t *ptep) in huge_ptep_set_wrprotect() argument 86 ptep_set_wrprotect(mm, addr, ptep); in huge_ptep_set_wrprotect() 90 unsigned long addr, pte_t *ptep, in huge_ptep_set_access_flags() argument 93 return ptep_set_access_flags(vma, addr, ptep, pte, dirty); in huge_ptep_set_access_flags() [all …]
|
D | pgtable_64.h | 149 unsigned long addr, pte_t *ptep) in ptep_test_and_clear_young() argument 151 return (__insn_fetchand(&ptep->val, ~HV_PTE_ACCESSED) >> in ptep_test_and_clear_young() 157 unsigned long addr, pte_t *ptep) in ptep_set_wrprotect() argument 159 __insn_fetchand(&ptep->val, ~HV_PTE_WRITABLE); in ptep_set_wrprotect() 164 unsigned long addr, pte_t *ptep) in ptep_get_and_clear() argument 166 return hv_pte(__insn_exch(&ptep->val, 0UL)); in ptep_get_and_clear()
|
D | pgtable.h | 176 static inline void __pte_clear(pte_t *ptep) in __pte_clear() argument 179 ptep->val = 0; in __pte_clear() 181 u32 *tmp = (u32 *)ptep; in __pte_clear() 187 #define pte_clear(mm, addr, ptep) __pte_clear(ptep) argument 250 void __set_pte(pte_t *ptep, pte_t pte); 258 extern void set_pte(pte_t *ptep, pte_t pte); 259 #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) argument 338 #define kpte_clear_flush(ptep, vaddr) \ argument 340 pte_clear(&init_mm, (vaddr), (ptep)); \ 526 pte_t *ptep, void **datap);
|
D | pgtable_32.h | 103 unsigned long addr, pte_t *ptep) in ptep_get_and_clear() argument 105 pte_t pte = *ptep; in ptep_get_and_clear() 106 pte_clear(_mm, addr, ptep); in ptep_get_and_clear()
|
D | mmu_context.h | 48 pte_t *ptep = virt_to_kpte((unsigned long)pgdir); in install_page_table() local 49 __install_page_table(pgdir, asid, *ptep); in install_page_table()
|
D | pgalloc.h | 51 pmd_t *pmd, pte_t *ptep) in pmd_populate_kernel() argument 53 set_pmd(pmd, ptfn_pmd(HV_CPA_TO_PTFN(__pa(ptep)), in pmd_populate_kernel()
|
D | tlb.h | 20 #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) argument
|
/linux-4.1.27/arch/powerpc/include/asm/ |
D | pgtable.h | 109 extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, 118 pte_t *ptep, pte_t pte, int percpu) in __set_pte_at() argument 128 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) in __set_pte_at() 131 pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); in __set_pte_at() 143 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) in __set_pte_at() 148 if (pte_val(*ptep) & _PAGE_HASHPTE) in __set_pte_at() 149 flush_hash_entry(mm, ptep, addr); in __set_pte_at() 155 : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) in __set_pte_at() 164 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) in __set_pte_at() 171 *ptep = pte; in __set_pte_at() [all …]
|
D | hugetlb.h | 121 pte_t *ptep, pte_t pte) in set_huge_pte_at() argument 123 set_pte_at(mm, addr, ptep, pte); in set_huge_pte_at() 127 unsigned long addr, pte_t *ptep) in huge_ptep_get_and_clear() argument 130 return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1)); in huge_ptep_get_and_clear() 132 return __pte(pte_update(ptep, ~0UL, 0)); in huge_ptep_get_and_clear() 137 unsigned long addr, pte_t *ptep) in huge_ptep_clear_flush() argument 140 pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); in huge_ptep_clear_flush() 155 unsigned long addr, pte_t *ptep, in huge_ptep_set_access_flags() argument 164 ptep_set_access_flags(vma, addr, ptep, pte, dirty); in huge_ptep_set_access_flags() 167 return ptep_set_access_flags(vma, addr, ptep, pte, dirty); in huge_ptep_set_access_flags() [all …]
|
D | pgtable-ppc64.h | 216 pte_t *ptep, unsigned long pte, int huge); 221 pte_t *ptep, unsigned long clr, in pte_update() argument 236 : "=&r" (old), "=&r" (tmp), "=m" (*ptep) in pte_update() 237 : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY), "r" (set) in pte_update() 240 unsigned long old = pte_val(*ptep); in pte_update() 241 *ptep = __pte((old & ~clr) | set); in pte_update() 249 hpte_need_flush(mm, addr, ptep, old, huge); in pte_update() 256 unsigned long addr, pte_t *ptep) in __ptep_test_and_clear_young() argument 260 if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) in __ptep_test_and_clear_young() 262 old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0); in __ptep_test_and_clear_young() [all …]
|
D | pgtable-ppc32.h | 127 #define pte_clear(mm, addr, ptep) \ argument 128 do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0) 147 extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, 251 static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep) in __ptep_test_and_clear_young() argument 254 old = pte_update(ptep, _PAGE_ACCESSED, 0); in __ptep_test_and_clear_young() 257 unsigned long ptephys = __pa(ptep) & PAGE_MASK; in __ptep_test_and_clear_young() 268 pte_t *ptep) in ptep_get_and_clear() argument 270 return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0)); in ptep_get_and_clear() 275 pte_t *ptep) in ptep_set_wrprotect() argument 277 pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), _PAGE_RO); in ptep_set_wrprotect() [all …]
|
D | tlb.h | 37 extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, 40 static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, in __tlb_remove_tlb_entry() argument 44 if (pte_val(*ptep) & _PAGE_HASHPTE) in __tlb_remove_tlb_entry() 45 flush_hash_entry(tlb->mm, ptep, address); in __tlb_remove_tlb_entry()
|
D | pte-hash64-64k.h | 50 static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep) in __real_pte() argument 62 rpte.hidx = pte_val(*((ptep) + PTRS_PER_PTE)); in __real_pte()
|
D | kvm_book3s_64.h | 300 static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing) in kvmppc_read_update_linux_pte() argument 308 old_pte = READ_ONCE(*ptep); in kvmppc_read_update_linux_pte() 324 if (pte_val(old_pte) == __cmpxchg_u64((unsigned long *)ptep, in kvmppc_read_update_linux_pte()
|
D | mmu-hash64.h | 324 unsigned long vsid, pte_t *ptep, unsigned long trap, 327 unsigned long vsid, pte_t *ptep, unsigned long trap, 337 pte_t *ptep, unsigned long trap, unsigned long flags,
|
/linux-4.1.27/arch/x86/xen/ |
D | p2m.c | 197 pte_t *ptep; in xen_build_mfn_list_list() local 225 ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), in xen_build_mfn_list_list() 227 BUG_ON(!ptep || level != PG_LEVEL_4K); in xen_build_mfn_list_list() 228 mfn = pte_mfn(*ptep); in xen_build_mfn_list_list() 229 ptep = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1)); in xen_build_mfn_list_list() 235 if (ptep == p2m_missing_pte || ptep == p2m_identity_pte) { in xen_build_mfn_list_list() 312 pte_t *ptep; in xen_rebuild_p2m_list() local 364 ptep = populate_extra_pte((unsigned long)(p2m + pfn)); in xen_rebuild_p2m_list() 365 set_pte(ptep, in xen_rebuild_p2m_list() 374 ptep = populate_extra_pte((unsigned long)(p2m + pfn)); in xen_rebuild_p2m_list() [all …]
|
D | mmu.c | 196 void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid) in xen_set_domain_pte() argument 201 trace_xen_mmu_set_domain_pte(ptep, pteval, domid); in xen_set_domain_pte() 207 u->ptr = virt_to_machine(ptep).maddr; in xen_set_domain_pte() 293 static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval) in xen_batched_set_pte() argument 302 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE; in xen_batched_set_pte() 311 static inline void __xen_set_pte(pte_t *ptep, pte_t pteval) in __xen_set_pte() argument 313 if (!xen_batched_set_pte(ptep, pteval)) { in __xen_set_pte() 323 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE; in __xen_set_pte() 329 static void xen_set_pte(pte_t *ptep, pte_t pteval) in xen_set_pte() argument 331 trace_xen_mmu_set_pte(ptep, pteval); in xen_set_pte() [all …]
|
D | mmu.h | 18 pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep); 20 pte_t *ptep, pte_t pte);
|
D | enlighten.c | 486 pte_t *ptep; in set_aliased_prot() local 492 ptep = lookup_address((unsigned long)v, &level); in set_aliased_prot() 493 BUG_ON(ptep == NULL); in set_aliased_prot() 495 pfn = pte_pfn(*ptep); in set_aliased_prot() 605 pte_t *ptep; in xen_load_gdt() local 616 ptep = lookup_address(va, &level); in xen_load_gdt() 617 BUG_ON(ptep == NULL); in xen_load_gdt() 619 pfn = pte_pfn(*ptep); in xen_load_gdt()
|
/linux-4.1.27/include/asm-generic/ |
D | pgtable.h | 28 unsigned long address, pte_t *ptep, 41 pte_t *ptep) in ptep_test_and_clear_young() argument 43 pte_t pte = *ptep; in ptep_test_and_clear_young() 48 set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte)); in ptep_test_and_clear_young() 80 unsigned long address, pte_t *ptep); 91 pte_t *ptep) in ptep_get_and_clear() argument 93 pte_t pte = *ptep; in ptep_get_and_clear() 94 pte_clear(mm, address, ptep); in ptep_get_and_clear() 125 unsigned long address, pte_t *ptep, in ptep_get_and_clear_full() argument 129 pte = ptep_get_and_clear(mm, address, ptep); in ptep_get_and_clear_full() [all …]
|
D | tlb.h | 169 #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) argument 179 #define tlb_remove_tlb_entry(tlb, ptep, address) \ argument 182 __tlb_remove_tlb_entry(tlb, ptep, address); \ 199 #define pte_free_tlb(tlb, ptep, address) \ argument 202 __pte_free_tlb(tlb, ptep, address); \
|
D | hugetlb.h | 35 pte_t *ptep) in huge_pte_clear() argument 37 pte_clear(mm, addr, ptep); in huge_pte_clear()
|
/linux-4.1.27/include/trace/events/ |
D | xen.h | 127 TP_PROTO(pte_t *ptep, pte_t pteval), 128 TP_ARGS(ptep, pteval), 130 __field(pte_t *, ptep) 133 TP_fast_assign(__entry->ptep = ptep; 136 __entry->ptep, 143 TP_PROTO(pte_t *ptep, pte_t pteval), \ 144 TP_ARGS(ptep, pteval)) 150 TP_PROTO(pte_t *ptep, pte_t pteval, unsigned domid), 151 TP_ARGS(ptep, pteval, domid), 153 __field(pte_t *, ptep) [all …]
|
/linux-4.1.27/arch/xtensa/include/asm/ |
D | pgtable.h | 248 #define pte_clear(mm,addr,ptep) \ argument 249 do { update_pte(ptep, __pte(_PAGE_CA_INVALID | _PAGE_USER)); } while (0) 299 static inline void update_pte(pte_t *ptep, pte_t pteval) in update_pte() argument 301 *ptep = pteval; in update_pte() 303 __asm__ __volatile__ ("dhwb %0, 0" :: "a" (ptep)); in update_pte() 311 set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval) in set_pte_at() argument 313 update_pte(ptep, pteval); in set_pte_at() 316 static inline void set_pte(pte_t *ptep, pte_t pteval) in set_pte() argument 318 update_pte(ptep, pteval); in set_pte() 331 pte_t *ptep) in ptep_test_and_clear_young() argument [all …]
|
D | pgalloc.h | 24 #define pmd_populate_kernel(mm, pmdp, ptep) \ argument 25 (pmd_val(*(pmdp)) = ((unsigned long)ptep)) 44 pte_t *ptep; in pte_alloc_one_kernel() local 47 ptep = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); in pte_alloc_one_kernel() 48 if (!ptep) in pte_alloc_one_kernel() 51 pte_clear(NULL, 0, ptep + i); in pte_alloc_one_kernel() 52 return ptep; in pte_alloc_one_kernel()
|
/linux-4.1.27/drivers/iommu/ |
D | io-pgtable-arm.c | 205 arm_lpae_iopte *ptep); 210 arm_lpae_iopte *ptep) in arm_lpae_init_pte() argument 214 if (iopte_leaf(*ptep, lvl)) { in arm_lpae_init_pte() 218 } else if (iopte_type(*ptep, lvl) == ARM_LPAE_PTE_TYPE_TABLE) { in arm_lpae_init_pte() 226 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data); in arm_lpae_init_pte() 242 *ptep = pte; in arm_lpae_init_pte() 243 data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), data->iop.cookie); in arm_lpae_init_pte() 249 int lvl, arm_lpae_iopte *ptep) in __arm_lpae_map() argument 256 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); in __arm_lpae_map() 260 return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep); in __arm_lpae_map() [all …]
|
/linux-4.1.27/arch/sh/mm/ |
D | gup.c | 17 static inline pte_t gup_get_pte(pte_t *ptep) in gup_get_pte() argument 20 return READ_ONCE(*ptep); in gup_get_pte() 58 pte.pte_low = ptep->pte_low; in gup_get_pte() 60 pte.pte_high = ptep->pte_high; in gup_get_pte() 62 if (unlikely(pte.pte_low != ptep->pte_low)) in gup_get_pte() 78 pte_t *ptep; in gup_pte_range() local 96 ptep = pte_offset_map(&pmd, addr); in gup_pte_range() 98 pte_t pte = gup_get_pte(ptep); in gup_pte_range() 102 pte_unmap(ptep); in gup_pte_range() 113 } while (ptep++, addr += PAGE_SIZE, addr != end); in gup_pte_range() [all …]
|
D | hugetlbpage.c | 65 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) in huge_pmd_unshare() argument
|
/linux-4.1.27/arch/microblaze/mm/ |
D | consistent.c | 169 pte_t *ptep = consistent_virt_to_pte(vaddr); in consistent_virt_to_pfn() local 171 if (pte_none(*ptep) || !pte_present(*ptep)) in consistent_virt_to_pfn() 174 return pte_pfn(*ptep); in consistent_virt_to_pfn() 203 pte_t *ptep = consistent_virt_to_pte(vaddr); in consistent_free() local 206 if (!pte_none(*ptep) && pte_present(*ptep)) { in consistent_free() 207 pfn = pte_pfn(*ptep); in consistent_free() 208 pte_clear(&init_mm, (unsigned int)vaddr, ptep); in consistent_free()
|
D | pgtable.c | 191 static int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep) in get_pteptr() argument 205 *ptep = pte; in get_pteptr()
|
/linux-4.1.27/arch/sparc/mm/ |
D | hugetlbpage.c | 175 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) in huge_pmd_unshare() argument 181 pte_t *ptep, pte_t entry) in set_huge_pte_at() argument 185 if (!pte_present(*ptep) && pte_present(entry)) in set_huge_pte_at() 190 set_pte_at(mm, addr, ptep, entry); in set_huge_pte_at() 191 ptep++; in set_huge_pte_at() 198 pte_t *ptep) in huge_ptep_get_and_clear() argument 203 entry = *ptep; in huge_ptep_get_and_clear() 210 pte_clear(mm, addr, ptep); in huge_ptep_get_and_clear() 212 ptep++; in huge_ptep_get_and_clear()
|
D | srmmu.c | 112 void pmd_set(pmd_t *pmdp, pte_t *ptep) in pmd_set() argument 117 ptp = __nocache_pa((unsigned long) ptep) >> 4; in pmd_set() 124 void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep) in pmd_populate() argument 129 ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */ in pmd_populate() 487 pte_t *ptep; in srmmu_mapioaddr() local 493 ptep = pte_offset_kernel(pmdp, virt_addr); in srmmu_mapioaddr() 503 set_pte(ptep, __pte(tmp)); in srmmu_mapioaddr() 522 pte_t *ptep; in srmmu_unmapioaddr() local 526 ptep = pte_offset_kernel(pmdp, virt_addr); in srmmu_unmapioaddr() 529 __pte_clear(ptep); in srmmu_unmapioaddr() [all …]
|
D | gup.c | 24 pte_t *ptep; in gup_pte_range() local 37 ptep = pte_offset_kernel(&pmd, addr); in gup_pte_range() 40 pte_t pte = *ptep; in gup_pte_range() 55 if (unlikely(pte_val(pte) != pte_val(*ptep))) { in gup_pte_range() 64 } while (ptep++, addr += PAGE_SIZE, addr != end); in gup_pte_range()
|
D | fault_64.c | 97 pte_t *ptep, pte; in get_user_insn() local 129 ptep = pte_offset_map(pmdp, tpc); in get_user_insn() 130 pte = *ptep; in get_user_insn() 140 pte_unmap(ptep); in get_user_insn()
|
D | io-unit.c | 219 pte_t *ptep; in iounit_map_dma_area() local 224 ptep = pte_offset_map(pmdp, addr); in iounit_map_dma_area() 226 set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot)); in iounit_map_dma_area()
|
D | iommu.c | 351 pte_t *ptep; in iommu_map_dma_area() local 362 ptep = pte_offset_map(pmdp, addr); in iommu_map_dma_area() 364 set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot)); in iommu_map_dma_area()
|
D | tlb.c | 105 pte_t *ptep, pte_t orig, int fullmm) in tlb_batch_add() argument
|
/linux-4.1.27/arch/mips/mm/ |
D | tlb-r4k.c | 291 pte_t *ptep; in __update_tlb() local 318 ptep = (pte_t *)pmdp; in __update_tlb() 319 lo = pte_to_entrylo(pte_val(*ptep)); in __update_tlb() 333 ptep = pte_offset_map(pmdp, address); in __update_tlb() 337 write_c0_entrylo0(pte_to_entrylo(ptep->pte_high)); in __update_tlb() 338 writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK); in __update_tlb() 339 ptep++; in __update_tlb() 340 write_c0_entrylo1(pte_to_entrylo(ptep->pte_high)); in __update_tlb() 341 writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK); in __update_tlb() 343 write_c0_entrylo0(ptep->pte_high); in __update_tlb() [all …]
|
D | gup.c | 18 static inline pte_t gup_get_pte(pte_t *ptep) in gup_get_pte() argument 24 pte.pte_low = ptep->pte_low; in gup_get_pte() 26 pte.pte_high = ptep->pte_high; in gup_get_pte() 28 if (unlikely(pte.pte_low != ptep->pte_low)) in gup_get_pte() 33 return READ_ONCE(*ptep); in gup_get_pte() 40 pte_t *ptep = pte_offset_map(&pmd, addr); in gup_pte_range() local 42 pte_t pte = gup_get_pte(ptep); in gup_pte_range() 47 pte_unmap(ptep); in gup_pte_range() 57 } while (ptep++, addr += PAGE_SIZE, addr != end); in gup_pte_range() 59 pte_unmap(ptep - 1); in gup_pte_range()
|
D | tlbex.c | 1002 static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) in build_update_entries() argument 1010 uasm_i_ld(p, tmp, 0, ptep); /* get even pte */ in build_update_entries() 1011 uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ in build_update_entries() 1015 UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); in build_update_entries() 1019 uasm_i_dsrl_safe(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */ in build_update_entries() 1021 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ in build_update_entries() 1028 uasm_i_addu(p, scratch, 0, ptep); in build_update_entries() 1030 uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */ in build_update_entries() 1031 uasm_i_lw(p, ptep, pte_off_odd, ptep); /* odd pte */ in build_update_entries() 1033 UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); in build_update_entries() [all …]
|
D | tlb-r8k.c | 188 pte_t *ptep; in __update_tlb() local 205 ptep = pte_offset_map(pmdp, address); in __update_tlb() 208 write_c0_entrylo(pte_val(*ptep++) >> 6); in __update_tlb()
|
D | c-tx39.c | 176 pte_t *ptep; in tx39_flush_cache_page() local 189 ptep = pte_offset(pmdp, page); in tx39_flush_cache_page() 195 if (!(pte_val(*ptep) & _PAGE_PRESENT)) in tx39_flush_cache_page() 204 if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) { in tx39_flush_cache_page()
|
D | c-r3k.c | 245 pte_t *ptep; in r3k_flush_cache_page() local 257 ptep = pte_offset(pmdp, addr); in r3k_flush_cache_page() 260 if (!(pte_val(*ptep) & _PAGE_PRESENT)) in r3k_flush_cache_page()
|
D | hugetlbpage.c | 54 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) in huge_pmd_unshare() argument
|
D | c-r4k.c | 559 pte_t *ptep; in local_r4k_flush_cache_page() local 573 ptep = pte_offset(pmdp, addr); in local_r4k_flush_cache_page() 579 if (!(pte_present(*ptep))) in local_r4k_flush_cache_page() 582 if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) in local_r4k_flush_cache_page()
|
/linux-4.1.27/arch/x86/mm/ |
D | gup.c | 15 static inline pte_t gup_get_pte(pte_t *ptep) in gup_get_pte() argument 18 return READ_ONCE(*ptep); in gup_get_pte() 55 pte.pte_low = ptep->pte_low; in gup_get_pte() 57 pte.pte_high = ptep->pte_high; in gup_get_pte() 59 if (unlikely(pte.pte_low != ptep->pte_low)) in gup_get_pte() 75 pte_t *ptep; in gup_pte_range() local 81 ptep = pte_offset_map(&pmd, addr); in gup_pte_range() 83 pte_t pte = gup_get_pte(ptep); in gup_pte_range() 88 pte_unmap(ptep); in gup_pte_range() 93 pte_unmap(ptep); in gup_pte_range() [all …]
|
D | pgtable.c | 410 unsigned long address, pte_t *ptep, in ptep_set_access_flags() argument 413 int changed = !pte_same(*ptep, entry); in ptep_set_access_flags() 416 *ptep = entry; in ptep_set_access_flags() 417 pte_update_defer(vma->vm_mm, address, ptep); in ptep_set_access_flags() 448 unsigned long addr, pte_t *ptep) in ptep_test_and_clear_young() argument 452 if (pte_young(*ptep)) in ptep_test_and_clear_young() 454 (unsigned long *) &ptep->pte); in ptep_test_and_clear_young() 457 pte_update(vma->vm_mm, addr, ptep); in ptep_test_and_clear_young() 480 unsigned long address, pte_t *ptep) in ptep_clear_flush_young() argument 495 return ptep_test_and_clear_young(vma, address, ptep); in ptep_clear_flush_young()
|
D | ioremap.c | 397 bool __init is_early_ioremap_ptep(pte_t *ptep) in is_early_ioremap_ptep() argument 399 return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)]; in is_early_ioremap_ptep()
|
/linux-4.1.27/arch/arm/mm/ |
D | fault-armv.c | 41 unsigned long pfn, pte_t *ptep) in do_adjust_pte() argument 43 pte_t entry = *ptep; in do_adjust_pte() 61 set_pte_at(vma->vm_mm, address, ptep, entry); in do_adjust_pte() 133 unsigned long addr, pte_t *ptep, unsigned long pfn) in make_coherent() argument 164 do_adjust_pte(vma, addr, pfn, ptep); in make_coherent() 181 pte_t *ptep) in update_mmu_cache() argument 183 unsigned long pfn = pte_pfn(*ptep); in update_mmu_cache() 203 make_coherent(mapping, vma, addr, ptep, pfn); in update_mmu_cache()
|
D | mm.h | 25 pte_t *ptep = pte_offset_kernel(top_pmd, va); in set_top_pte() local 26 set_pte_ext(ptep, pte, 0); in set_top_pte() 32 pte_t *ptep = pte_offset_kernel(top_pmd, va); in get_top_pte() local 33 return *ptep; in get_top_pte()
|
D | highmem.c | 24 pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr); in set_fixmap_pte() local 26 set_pte_ext(ptep, pte, 0); in set_fixmap_pte() 32 pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr); in get_fixmap_pte() local 34 return *ptep; in get_fixmap_pte()
|
D | pageattr.c | 24 static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr, in change_page_range() argument 28 pte_t pte = *ptep; in change_page_range() 33 set_pte_ext(ptep, pte, 0); in change_page_range()
|
D | hugetlbpage.c | 44 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) in huge_pmd_unshare() argument
|
/linux-4.1.27/arch/tile/mm/ |
D | highmem.c | 29 pte_t *ptep; in kmap() local 40 ptep = kmap_get_pte((unsigned long)kva); in kmap() 42 set_pte_at(&init_mm, kva, ptep, mk_pte(page, page_to_kpgprot(page))); in kmap() 97 unsigned long va, pte_t *ptep, pte_t pteval) in kmap_atomic_register() argument 116 set_pte(ptep, pteval); in kmap_atomic_register() 148 pte_t *ptep = kmap_get_pte(amp->va); in kmap_atomic_fix_one_kpte() local 150 set_pte(ptep, pte_mkmigrate(*ptep)); in kmap_atomic_fix_one_kpte() 160 set_pte(ptep, pte); in kmap_atomic_fix_one_kpte()
|
D | pgtable.c | 299 unsigned long addr, pte_t *ptep) in ptep_test_and_clear_young() argument 304 u8 *tmp = (u8 *)ptep; in ptep_test_and_clear_young() 318 unsigned long addr, pte_t *ptep) in ptep_set_wrprotect() argument 323 u32 *tmp = (u32 *)ptep; in ptep_set_wrprotect() 409 void __set_pte(pte_t *ptep, pte_t pte) in __set_pte() argument 412 *ptep = pte; in __set_pte() 418 ((u32 *)ptep)[1] = (u32)(pte_val(pte) >> 32); in __set_pte() 420 ((u32 *)ptep)[0] = (u32)(pte_val(pte)); in __set_pte() 422 ((u32 *)ptep)[0] = (u32)(pte_val(pte)); in __set_pte() 424 ((u32 *)ptep)[1] = (u32)(pte_val(pte) >> 32); in __set_pte() [all …]
|
D | homecache.c | 178 pte_t *ptep; in homecache_finv_map_page() local 190 ptep = virt_to_kpte(va); in homecache_finv_map_page() 192 __set_pte(ptep, pte_set_home(pte, home)); in homecache_finv_map_page() 194 __pte_clear(ptep); in homecache_finv_map_page() 376 pte_t *ptep = virt_to_kpte(kva); in homecache_change_page_home() local 377 pte_t pteval = *ptep; in homecache_change_page_home() 379 __set_pte(ptep, pte_set_home(pteval, home)); in homecache_change_page_home()
|
D | hugetlbpage.c | 91 pte_t *ptep = base + index; in get_pte() local 93 if (!pte_present(*ptep) && huge_shift[level] != 0) { in get_pte() 98 ptep = super_ptep; in get_pte() 101 return ptep; in get_pte() 163 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) in huge_pmd_unshare() argument
|
D | init.c | 937 pte_t *ptep = virt_to_kpte(addr); in free_init_pages() local 945 pte_clear(&init_mm, addr, ptep); in free_init_pages() 948 if (pte_huge(*ptep)) in free_init_pages() 951 set_pte_at(&init_mm, addr, ptep, in free_init_pages()
|
/linux-4.1.27/arch/m68k/include/asm/ |
D | motorola_pgtable.h | 108 static inline void pmd_set(pmd_t *pmdp, pte_t *ptep) in pmd_set() argument 110 unsigned long ptbl = virt_to_phys(ptep) | _PAGE_TABLE | _PAGE_ACCESSED; in pmd_set() 131 #define pte_clear(mm,addr,ptep) ({ pte_val(*(ptep)) = 0; }) argument 242 pte_t *ptep; in nocache_page() local 246 ptep = pte_offset_kernel(pmdp, addr); in nocache_page() 247 *ptep = pte_mknocache(*ptep); in nocache_page() 258 pte_t *ptep; in cache_page() local 262 ptep = pte_offset_kernel(pmdp, addr); in cache_page() 263 *ptep = pte_mkcache(*ptep); in cache_page()
|
D | mcf_pgtable.h | 164 #define pmd_set(pmdp, ptep) do {} while (0) argument 185 pte_t *ptep) in pte_clear() argument 187 pte_val(*ptep) = 0; in pte_clear() 363 pte_t *ptep; in nocache_page() local 368 ptep = pte_offset_kernel(pmdp, addr); in nocache_page() 369 *ptep = pte_mknocache(*ptep); in nocache_page() 379 pte_t *ptep; in cache_page() local 384 ptep = pte_offset_kernel(pmdp, addr); in cache_page() 385 *ptep = pte_mkcache(*ptep); in cache_page()
|
D | pgtable_mm.h | 28 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) argument 132 unsigned long address, pte_t *ptep) in update_mmu_cache() argument
|
D | sun3_pgtable.h | 110 #define pmd_set(pmdp,ptep) do {} while (0) argument 124 static inline void pte_clear (struct mm_struct *mm, unsigned long addr, pte_t *ptep) in pte_clear() argument 126 pte_val (*ptep) = 0; in pte_clear()
|
D | tlb.h | 10 #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) argument
|
/linux-4.1.27/arch/mn10300/include/asm/ |
D | pgtable.h | 302 #define set_pte_at(mm, addr, ptep, pteval) set_pte((ptep), (pteval)) argument 311 #define ptep_get_and_clear(mm, addr, ptep) \ argument 312 __pte(xchg(&(ptep)->pte, 0)) 347 pte_t *ptep) in ptep_test_and_clear_dirty() argument 349 if (!pte_dirty(*ptep)) in ptep_test_and_clear_dirty() 351 return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte); in ptep_test_and_clear_dirty() 356 pte_t *ptep) in ptep_test_and_clear_young() argument 358 if (!pte_young(*ptep)) in ptep_test_and_clear_young() 360 return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte); in ptep_test_and_clear_young() 364 void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) in ptep_set_wrprotect() argument [all …]
|
D | tlb.h | 24 #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) argument
|
/linux-4.1.27/arch/microblaze/include/asm/ |
D | pgtable.h | 298 #define pte_clear(mm, addr, ptep) \ argument 299 do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0) 421 pte_t *ptep, pte_t pte) in set_pte() argument 423 *ptep = pte; in set_pte() 427 pte_t *ptep, pte_t pte) in set_pte_at() argument 429 *ptep = pte; in set_pte_at() 434 unsigned long address, pte_t *ptep) in ptep_test_and_clear_young() argument 436 return (pte_update(ptep, _PAGE_ACCESSED, 0) & _PAGE_ACCESSED) != 0; in ptep_test_and_clear_young() 440 unsigned long addr, pte_t *ptep) in ptep_test_and_clear_dirty() argument 442 return (pte_update(ptep, \ in ptep_test_and_clear_dirty() [all …]
|
D | tlbflush.h | 42 #define update_mmu_cache(vma, addr, ptep) do { } while (0) argument
|
/linux-4.1.27/arch/powerpc/mm/ |
D | dma-noncoherent.c | 279 pte_t *ptep; in __dma_free_coherent() local 282 ptep = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(addr), in __dma_free_coherent() 286 if (!pte_none(*ptep) && pte_present(*ptep)) { in __dma_free_coherent() 287 pfn = pte_pfn(*ptep); in __dma_free_coherent() 288 pte_clear(&init_mm, addr, ptep); in __dma_free_coherent() 415 pte_t *ptep = pte_offset_kernel(pmd, cpu_addr); in __dma_get_coherent_pfn() local 417 if (pte_none(*ptep) || !pte_present(*ptep)) in __dma_get_coherent_pfn() 419 return pte_pfn(*ptep); in __dma_get_coherent_pfn()
|
D | tlb_hash64.c | 44 pte_t *ptep, unsigned long pte, int huge) in hpte_need_flush() argument 92 rpte = __real_pte(__pte(pte), ptep); in hpte_need_flush() 211 pte_t *ptep = find_linux_pte_or_hugepte(mm->pgd, start, in __flush_hash_table_range() local 215 if (ptep == NULL) in __flush_hash_table_range() 217 pte = pte_val(*ptep); in __flush_hash_table_range() 223 hpte_do_hugepage_flush(mm, start, (pmd_t *)ptep, pte); in __flush_hash_table_range() 225 hpte_need_flush(mm, start, ptep, pte, 0); in __flush_hash_table_range()
|
D | hugetlbpage-hash64.c | 22 pte_t *ptep, unsigned long trap, unsigned long flags, in __hash_page_huge() argument 48 old_pte = pte_val(*ptep); in __hash_page_huge() 60 } while(old_pte != __cmpxchg_u64((unsigned long *)ptep, in __hash_page_huge() 115 *ptep = __pte(old_pte); in __hash_page_huge() 127 *ptep = __pte(new_pte & ~_PAGE_BUSY); in __hash_page_huge()
|
D | pgtable.c | 172 void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, in set_pte_at() argument 180 VM_WARN_ON((pte_val(*ptep) & (_PAGE_PRESENT | _PAGE_USER)) == in set_pte_at() 190 __set_pte_at(mm, addr, ptep, pte, 0); in set_pte_at() 201 pte_t *ptep, pte_t entry, int dirty) in ptep_set_access_flags() argument 205 changed = !pte_same(*(ptep), entry); in ptep_set_access_flags() 209 __ptep_set_access_flags(ptep, entry); in ptep_set_access_flags()
|
D | hash_utils_64.c | 999 pte_t *ptep; in hash_page_mm() local 1069 ptep = __find_linux_pte_or_hugepte(pgdir, ea, &hugeshift); in hash_page_mm() 1070 if (ptep == NULL || !pte_present(*ptep)) { in hash_page_mm() 1082 if (access & ~pte_val(*ptep)) { in hash_page_mm() 1089 if (pmd_trans_huge(*(pmd_t *)ptep)) in hash_page_mm() 1090 rc = __hash_page_thp(ea, access, vsid, (pmd_t *)ptep, in hash_page_mm() 1094 rc = __hash_page_huge(ea, access, vsid, ptep, trap, in hash_page_mm() 1113 DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep)); in hash_page_mm() 1115 DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep), in hash_page_mm() 1116 pte_val(*(ptep + PTRS_PER_PTE))); in hash_page_mm() [all …]
|
D | hugetlbpage.c | 442 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) in huge_pmd_unshare() argument 692 pte_t *ptep, pte; in follow_huge_addr() local 698 ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift); in follow_huge_addr() 699 if (!ptep) in follow_huge_addr() 701 pte = READ_ONCE(*ptep); in follow_huge_addr() 750 pte_t *ptep; in gup_huge_pd() local 754 ptep = hugepte_offset(hugepd, addr, pdshift); in gup_huge_pd() 757 if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr)) in gup_huge_pd() 759 } while (ptep++, addr = next, addr != end); in gup_huge_pd() 1052 int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, in gup_hugepte() argument [all …]
|
D | pgtable_64.c | 96 pte_t *ptep; in map_kernel_page() local 106 ptep = pte_alloc_kernel(pmdp, ea); in map_kernel_page() 107 if (!ptep) in map_kernel_page() 109 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, in map_kernel_page() 129 ptep = early_alloc_pgtable(PAGE_SIZE); in map_kernel_page() 130 BUG_ON(ptep == NULL); in map_kernel_page() 131 pmd_populate_kernel(&init_mm, pmdp, ptep); in map_kernel_page() 133 ptep = pte_offset_kernel(pmdp, ea); in map_kernel_page() 134 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, in map_kernel_page()
|
D | tlb_hash32.c | 40 void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr) in flush_hash_entry() argument 45 ptephys = __pa(ptep) & PAGE_MASK; in flush_hash_entry()
|
D | mem.c | 488 pte_t *ptep) in update_mmu_cache() argument 498 if (!pte_young(*ptep) || address >= TASK_SIZE) in update_mmu_cache() 520 book3e_hugetlb_preload(vma, address, *ptep); in update_mmu_cache()
|
D | pgtable_32.c | 367 get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp) in get_pteptr() argument 384 *ptep = pte; in get_pteptr()
|
/linux-4.1.27/mm/ |
D | pgtable-generic.c | 48 unsigned long address, pte_t *ptep, in ptep_set_access_flags() argument 51 int changed = !pte_same(*ptep, entry); in ptep_set_access_flags() 53 set_pte_at(vma->vm_mm, address, ptep, entry); in ptep_set_access_flags() 82 unsigned long address, pte_t *ptep) in ptep_clear_flush_young() argument 85 young = ptep_test_and_clear_young(vma, address, ptep); in ptep_clear_flush_young() 111 pte_t *ptep) in ptep_clear_flush() argument 115 pte = ptep_get_and_clear(mm, address, ptep); in ptep_clear_flush()
|
D | hugetlb.c | 2623 unsigned long address, pte_t *ptep) in set_huge_ptep_writable() argument 2627 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep))); in set_huge_ptep_writable() 2628 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) in set_huge_ptep_writable() 2629 update_mmu_cache(vma, address, ptep); in set_huge_ptep_writable() 2742 pte_t *ptep; in __unmap_hugepage_range() local 2760 ptep = huge_pte_offset(mm, address); in __unmap_hugepage_range() 2761 if (!ptep) in __unmap_hugepage_range() 2764 ptl = huge_pte_lock(h, mm, ptep); in __unmap_hugepage_range() 2765 if (huge_pmd_unshare(mm, &address, ptep)) in __unmap_hugepage_range() 2768 pte = huge_ptep_get(ptep); in __unmap_hugepage_range() [all …]
|
D | mincore.c | 116 pte_t *ptep; in mincore_pte_range() local 131 ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in mincore_pte_range() 132 for (; addr != end; ptep++, addr += PAGE_SIZE) { in mincore_pte_range() 133 pte_t pte = *ptep; in mincore_pte_range() 161 pte_unmap_unlock(ptep - 1, ptl); in mincore_pte_range()
|
D | gup.c | 41 pte_t *ptep, pte; in follow_page_pte() local 47 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); in follow_page_pte() 48 pte = *ptep; in follow_page_pte() 63 pte_unmap_unlock(ptep, ptl); in follow_page_pte() 70 pte_unmap_unlock(ptep, ptl); in follow_page_pte() 117 pte_unmap_unlock(ptep, ptl); in follow_page_pte() 120 pte_unmap_unlock(ptep, ptl); in follow_page_pte() 124 pte_unmap_unlock(ptep, ptl); in follow_page_pte() 1010 pte_t *ptep, *ptem; in gup_pte_range() local 1013 ptem = ptep = pte_offset_map(&pmd, addr); in gup_pte_range() [all …]
|
D | migrate.c | 111 pte_t *ptep, pte; in remove_migration_pte() local 115 ptep = huge_pte_offset(mm, addr); in remove_migration_pte() 116 if (!ptep) in remove_migration_pte() 118 ptl = huge_pte_lockptr(hstate_vma(vma), mm, ptep); in remove_migration_pte() 124 ptep = pte_offset_map(pmd, addr); in remove_migration_pte() 135 pte = *ptep; in remove_migration_pte() 147 if (pte_swp_soft_dirty(*ptep)) in remove_migration_pte() 161 set_pte_at(mm, addr, ptep, pte); in remove_migration_pte() 174 update_mmu_cache(vma, addr, ptep); in remove_migration_pte() 176 pte_unmap_unlock(ptep, ptl); in remove_migration_pte() [all …]
|
D | ksm.c | 860 pte_t *ptep; in write_protect_page() local 877 ptep = page_check_address(page, mm, addr, &ptl, 0); in write_protect_page() 878 if (!ptep) in write_protect_page() 881 if (pte_write(*ptep) || pte_dirty(*ptep)) { in write_protect_page() 895 entry = ptep_clear_flush_notify(vma, addr, ptep); in write_protect_page() 901 set_pte_at(mm, addr, ptep, entry); in write_protect_page() 907 set_pte_at_notify(mm, addr, ptep, entry); in write_protect_page() 909 *orig_pte = *ptep; in write_protect_page() 913 pte_unmap_unlock(ptep, ptl); in write_protect_page() 934 pte_t *ptep; in replace_page() local [all …]
|
D | memory.c | 3132 unsigned long addr, pte_t pte, pte_t *ptep, pmd_t *pmd) in do_numa_page() argument 3157 if (unlikely(!pte_same(*ptep, pte))) { in do_numa_page() 3158 pte_unmap_unlock(ptep, ptl); in do_numa_page() 3167 set_pte_at(mm, addr, ptep, pte); in do_numa_page() 3168 update_mmu_cache(vma, addr, ptep); in do_numa_page() 3172 pte_unmap_unlock(ptep, ptl); in do_numa_page() 3197 pte_unmap_unlock(ptep, ptl); in do_numa_page() 3495 pte_t *ptep; in __follow_pte() local 3514 ptep = pte_offset_map_lock(mm, pmd, address, ptlp); in __follow_pte() 3515 if (!ptep) in __follow_pte() [all …]
|
D | vmalloc.c | 249 pte_t *ptep, pte; in vmalloc_to_page() local 251 ptep = pte_offset_map(pmd, addr); in vmalloc_to_page() 252 pte = *ptep; in vmalloc_to_page() 255 pte_unmap(ptep); in vmalloc_to_page()
|
/linux-4.1.27/arch/arc/include/asm/ |
D | pgalloc.h | 45 pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t ptep) in pmd_populate() argument 47 pmd_set(pmd, (pte_t *) ptep); in pmd_populate() 128 static inline void pte_free(struct mm_struct *mm, pgtable_t ptep) in pte_free() argument 130 pgtable_page_dtor(virt_to_page(ptep)); in pte_free() 131 free_pages(ptep, __get_order_pte()); in pte_free()
|
D | pgtable.h | 245 static inline void pmd_set(pmd_t *pmdp, pte_t *ptep) in pmd_set() argument 247 pmd_val(*pmdp) = (unsigned long)ptep; in pmd_set() 252 #define pte_clear(mm, addr, ptep) set_pte_at(mm, addr, ptep, __pte(0)) argument 315 pte_t *ptep, pte_t pteval) in set_pte_at() argument 317 set_pte(ptep, pteval); in set_pte_at() 353 pte_t *ptep);
|
D | tlb.h | 42 #define __tlb_remove_tlb_entry(tlb, ptep, address) argument
|
/linux-4.1.27/arch/um/include/asm/ |
D | tlb.h | 25 static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, in __tlb_remove_tlb_entry() argument 120 #define tlb_remove_tlb_entry(tlb, ptep, address) \ argument 123 __tlb_remove_tlb_entry(tlb, ptep, address); \ 126 #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr) argument
|
D | pgtable.h | 259 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) argument 339 #define update_mmu_cache(vma,address,ptep) do ; while (0) argument 356 #define kpte_clear_flush(ptep, vaddr) \ argument 358 pte_clear(&init_mm, (vaddr), (ptep)); \
|
/linux-4.1.27/arch/ia64/mm/ |
D | hugetlbpage.c | 68 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) in huge_pmd_unshare() argument 95 pte_t *ptep; in follow_huge_addr() local 100 ptep = huge_pte_offset(mm, addr); in follow_huge_addr() 101 if (!ptep || pte_none(*ptep)) in follow_huge_addr() 103 page = pte_page(*ptep); in follow_huge_addr()
|
D | fault.c | 53 pte_t *ptep, pte; in mapped_kernel_page_is_present() local 67 ptep = pte_offset_kernel(pmd, address); in mapped_kernel_page_is_present() 68 if (!ptep) in mapped_kernel_page_is_present() 71 pte = *ptep; in mapped_kernel_page_is_present()
|
/linux-4.1.27/arch/parisc/include/asm/ |
D | pgtable.h | 62 #define set_pte_at(mm, addr, ptep, pteval) \ argument 67 old_pte = *ptep; \ 68 set_pte(ptep, pteval); \ 448 …c inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) in ptep_test_and_clear_young() argument 453 if (!pte_young(*ptep)) in ptep_test_and_clear_young() 457 pte = *ptep; in ptep_test_and_clear_young() 462 set_pte(ptep, pte_mkold(pte)); in ptep_test_and_clear_young() 469 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) in ptep_get_and_clear() argument 475 old_pte = *ptep; in ptep_get_and_clear() 476 set_pte(ptep, __pte(0)); in ptep_get_and_clear() [all …]
|
/linux-4.1.27/arch/tile/kernel/ |
D | machine_kexec.c | 255 pte_t *ptep = (pte_t *) pmd_offset(pud, vaddr); in setup_quasi_va_is_pa() local 259 __set_pte(ptep, pfn_pte(pfn, pte)); in setup_quasi_va_is_pa() 267 pte_t *ptep; in machine_kexec() local 285 ptep = virt_to_pte(NULL, (unsigned long)reboot_code_buffer); in machine_kexec() 286 __set_pte(ptep, pte_mkexec(*ptep)); in machine_kexec()
|
D | setup.c | 1599 pte_t *ptep = virt_to_kpte(addr); in setup_per_cpu_areas() local 1600 pte_t pte = *ptep; in setup_per_cpu_areas() 1604 set_pte_at(&init_mm, addr, ptep, pte); in setup_per_cpu_areas() 1608 ptep = virt_to_kpte(lowmem_va); in setup_per_cpu_areas() 1609 if (pte_huge(*ptep)) { in setup_per_cpu_areas() 1612 shatter_pmd((pmd_t *)ptep); in setup_per_cpu_areas() 1613 ptep = virt_to_kpte(lowmem_va); in setup_per_cpu_areas() 1614 BUG_ON(pte_huge(*ptep)); in setup_per_cpu_areas() 1616 BUG_ON(pfn != pte_pfn(*ptep)); in setup_per_cpu_areas() 1617 set_pte_at(&init_mm, lowmem_va, ptep, pte); in setup_per_cpu_areas()
|
/linux-4.1.27/arch/frv/include/asm/ |
D | pgtable.h | 177 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) argument 388 …c inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) in ptep_test_and_clear_young() argument 390 int i = test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep); in ptep_test_and_clear_young() 391 asm volatile("dcf %M0" :: "U"(*ptep)); in ptep_test_and_clear_young() 395 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) in ptep_get_and_clear() argument 397 unsigned long x = xchg(&ptep->pte, 0); in ptep_get_and_clear() 398 asm volatile("dcf %M0" :: "U"(*ptep)); in ptep_get_and_clear() 402 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) in ptep_set_wrprotect() argument 404 set_bit(_PAGE_BIT_WP, ptep); in ptep_set_wrprotect() 405 asm volatile("dcf %M0" :: "U"(*ptep)); in ptep_set_wrprotect() [all …]
|
D | tlb.h | 17 #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) argument
|
/linux-4.1.27/arch/unicore32/include/asm/ |
D | pgalloc.h | 93 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) in pmd_populate_kernel() argument 95 unsigned long pte_ptr = (unsigned long)ptep; in pmd_populate_kernel() 105 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep) in pmd_populate() argument 108 page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE); in pmd_populate()
|
D | pgtable.h | 157 #define pte_clear(mm, addr, ptep) set_pte(ptep, __pte(0)) argument 166 #define set_pte(ptep, pte) cpu_set_pte(ptep, pte) argument 168 #define set_pte_at(mm, addr, ptep, pteval) \ argument 170 set_pte(ptep, pteval); \
|
D | tlb.h | 17 #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) argument
|
D | cpu-single.h | 39 extern void cpu_set_pte(pte_t *ptep, pte_t pte);
|
D | tlbflush.h | 188 unsigned long addr, pte_t *ptep);
|
/linux-4.1.27/arch/m68k/sun3/ |
D | dvma.c | 27 pte_t ptep; in dvma_page() local 32 ptep = pfn_pte(virt_to_pfn(kaddr), PAGE_KERNEL); in dvma_page() 33 pte = pte_val(ptep); in dvma_page()
|
/linux-4.1.27/arch/arc/mm/ |
D | tlb.c | 442 void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) in create_tlb() argument 483 pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED); in create_tlb() 490 pd0 = address | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0); in create_tlb() 499 rwx = pte_val(*ptep) & PTE_BITS_RWX; in create_tlb() 501 if (pte_val(*ptep) & _PAGE_GLOBAL) in create_tlb() 506 pd1 = rwx | (pte_val(*ptep) & PTE_BITS_NON_RWX_IN_PD1); in create_tlb() 523 pte_t *ptep) in update_mmu_cache() argument 526 unsigned long paddr = pte_val(*ptep) & PAGE_MASK; in update_mmu_cache() 527 struct page *page = pfn_to_page(pte_pfn(*ptep)); in update_mmu_cache() 529 create_tlb(vma, vaddr, ptep); in update_mmu_cache()
|
/linux-4.1.27/arch/parisc/kernel/ |
D | cache.c | 78 update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) in update_mmu_cache() argument 80 unsigned long pfn = pte_pfn(*ptep); in update_mmu_cache() 501 pte_t *ptep = NULL; in get_ptep() local 508 ptep = pte_offset_map(pmd, addr); in get_ptep() 511 return ptep; in get_ptep() 543 pte_t *ptep = get_ptep(pgd, addr); in flush_cache_mm() local 544 if (!ptep) in flush_cache_mm() 546 pfn = pte_pfn(*ptep); in flush_cache_mm() 595 pte_t *ptep = get_ptep(pgd, addr); in flush_cache_range() local 596 if (!ptep) in flush_cache_range() [all …]
|
/linux-4.1.27/arch/x86/lguest/ |
D | boot.c | 672 pte_t *ptep) in lguest_pte_update() argument 677 ptep->pte_low, ptep->pte_high); in lguest_pte_update() 679 lazy_hcall3(LHCALL_SET_PTE, __pa(mm->pgd), addr, ptep->pte_low); in lguest_pte_update() 685 pte_t *ptep, pte_t pteval) in lguest_set_pte_at() argument 687 native_set_pte(ptep, pteval); in lguest_set_pte_at() 688 lguest_pte_update(mm, addr, ptep); in lguest_set_pte_at() 736 static void lguest_set_pte(pte_t *ptep, pte_t pteval) in lguest_set_pte() argument 738 native_set_pte(ptep, pteval); in lguest_set_pte() 749 static void lguest_set_pte_atomic(pte_t *ptep, pte_t pte) in lguest_set_pte_atomic() argument 751 native_set_pte_atomic(ptep, pte); in lguest_set_pte_atomic() [all …]
|
/linux-4.1.27/arch/nios2/include/asm/ |
D | pgtable.h | 204 static inline void set_pte(pte_t *ptep, pte_t pteval) in set_pte() argument 206 *ptep = pteval; in set_pte() 210 pte_t *ptep, pte_t pteval) in set_pte_at() argument 215 set_pte(ptep, pteval); in set_pte_at() 227 unsigned long addr, pte_t *ptep) in pte_clear() argument 233 set_pte_at(mm, addr, ptep, null); in pte_clear()
|
D | tlb.h | 29 #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) argument
|
/linux-4.1.27/arch/alpha/include/asm/ |
D | pgtable.h | 28 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) argument 225 extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep) in pmd_set() argument 226 { pmd_val(*pmdp) = _PAGE_TABLE | ((((unsigned long) ptep) - PAGE_OFFSET) << (32-PAGE_SHIFT)); } in pmd_set() 248 extern inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) in pte_clear() argument 250 pte_val(*ptep) = 0; in pte_clear() 329 unsigned long address, pte_t *ptep) in update_mmu_cache() argument
|
/linux-4.1.27/arch/m32r/include/asm/ |
D | pgtable.h | 252 … inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) in ptep_test_and_clear_young() argument 254 return test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep); in ptep_test_and_clear_young() 257 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) in ptep_set_wrprotect() argument 259 clear_bit(_PAGE_BIT_WRITE, ptep); in ptep_set_wrprotect() 294 static inline void pmd_set(pmd_t * pmdp, pte_t * ptep) in pmd_set() argument 296 pmd_val(*pmdp) = (((unsigned long) ptep) & PAGE_MASK); in pmd_set()
|
D | pgtable-2level.h | 45 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) argument
|
/linux-4.1.27/arch/arm64/mm/ |
D | pageattr.c | 26 static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr, in change_page_range() argument 30 pte_t pte = *ptep; in change_page_range() 35 set_pte(ptep, pte); in change_page_range()
|
D | hugetlbpage.c | 35 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) in huge_pmd_unshare() argument
|
/linux-4.1.27/arch/hexagon/include/asm/ |
D | pgtable.h | 193 static inline void set_pte(pte_t *ptep, pte_t pteval) in set_pte() argument 199 *ptep = pteval; in set_pte() 221 pte_t *ptep) in pte_clear() argument 223 pte_val(*ptep) = _NULL_PTE; in pte_clear() 421 #define set_pte_at(mm, addr, ptep, pte) set_pte(ptep, pte) argument
|
D | tlb.h | 30 #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) argument
|
D | cacheflush.h | 87 unsigned long address, pte_t *ptep) in update_mmu_cache() argument
|
/linux-4.1.27/arch/m68k/mm/ |
D | sun3kmap.c | 34 pte_t ptep; in do_page_mapin() local 36 ptep = pfn_pte(phys >> PAGE_SHIFT, PAGE_KERNEL); in do_page_mapin() 37 pte = pte_val(ptep); in do_page_mapin()
|
/linux-4.1.27/arch/powerpc/kernel/ |
D | io-workarounds.c | 69 pte_t *ptep; in iowa_mem_find_bus() local 78 ptep = __find_linux_pte_or_hugepte(init_mm.pgd, vaddr, in iowa_mem_find_bus() 80 if (ptep == NULL) in iowa_mem_find_bus() 84 paddr = pte_pfn(*ptep) << PAGE_SHIFT; in iowa_mem_find_bus()
|
D | eeh.c | 347 pte_t *ptep; in eeh_token_to_phys() local 356 ptep = __find_linux_pte_or_hugepte(init_mm.pgd, token, &hugepage_shift); in eeh_token_to_phys() 357 if (!ptep) in eeh_token_to_phys() 360 pa = pte_pfn(*ptep) << PAGE_SHIFT; in eeh_token_to_phys()
|
/linux-4.1.27/arch/score/mm/ |
D | cache.c | 180 pte_t *ptep; in flush_cache_range() local 188 ptep = pte_offset(pmdp, start); in flush_cache_range() 195 ptep = pte_offset(pmdp, start); in flush_cache_range() 197 if (!(pte_val(*ptep) & _PAGE_PRESENT)) { in flush_cache_range()
|
/linux-4.1.27/arch/cris/include/asm/ |
D | pgtable.h | 37 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) argument 225 static inline void pmd_set(pmd_t * pmdp, pte_t * ptep) in pmd_set() argument 226 { pmd_val(*pmdp) = _PAGE_TABLE | (unsigned long) ptep; } in pmd_set() 270 unsigned long address, pte_t *ptep) in update_mmu_cache() argument
|
D | tlb.h | 14 #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) argument
|
/linux-4.1.27/arch/avr32/include/asm/ |
D | pgtable-2level.h | 33 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep, pteval) argument
|
D | pgtable.h | 314 unsigned long address, pte_t *ptep);
|
/linux-4.1.27/arch/mn10300/mm/ |
D | mmu-context.c | 29 void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) in update_mmu_cache() argument 32 pte_t pte = *ptep; in update_mmu_cache()
|
/linux-4.1.27/arch/microblaze/kernel/ |
D | signal.c | 164 pte_t *ptep; in setup_rt_frame() local 203 ptep = pte_offset_map(pmdp, address); in setup_rt_frame() 204 if (pte_present(*ptep)) { in setup_rt_frame() 205 address = (unsigned long) page_address(pte_page(*ptep)); in setup_rt_frame() 213 pte_unmap(ptep); in setup_rt_frame()
|
/linux-4.1.27/arch/c6x/include/asm/ |
D | pgtable.h | 54 #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) argument
|
/linux-4.1.27/include/linux/ |
D | rmap.h | 188 pte_t *ptep; in page_check_address() local 190 __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address, in page_check_address() 192 return ptep; in page_check_address()
|
D | swapops.h | 138 extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, 153 static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, in __migration_entry_wait() argument
|
/linux-4.1.27/arch/score/include/asm/ |
D | pgtable.h | 103 #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) argument 255 unsigned long address, pte_t *ptep) in update_mmu_cache() argument 257 pte_t pte = *ptep; in update_mmu_cache()
|
D | tlb.h | 10 #define __tlb_remove_tlb_entry(tlb, ptep, address) do {} while (0) argument
|
/linux-4.1.27/arch/blackfin/include/asm/ |
D | pgtable.h | 49 #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) argument
|
D | tlb.h | 12 #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) argument
|
/linux-4.1.27/arch/arm/lib/ |
D | uaccess_with_memcpy.c | 26 pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp) in pin_page_for_write() argument 67 *ptep = NULL; in pin_page_for_write() 82 *ptep = pte; in pin_page_for_write()
|
/linux-4.1.27/arch/metag/kernel/ |
D | dma.c | 276 pte_t *ptep; in dma_free_coherent() local 294 ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start); in dma_free_coherent() 297 pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); in dma_free_coherent() 300 ptep++; in dma_free_coherent()
|
/linux-4.1.27/arch/openrisc/include/asm/ |
D | pgtable.h | 53 #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) argument 366 static inline void pmd_set(pmd_t *pmdp, pte_t *ptep) in pmd_set() argument 368 pmd_val(*pmdp) = _KERNPG_TABLE | (unsigned long) ptep; in pmd_set()
|
D | tlb.h | 28 #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) argument
|
/linux-4.1.27/drivers/xen/ |
D | xlate_mmu.c | 76 static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr, in remap_pte_fn() argument 88 set_pte_at(info->vma->vm_mm, addr, ptep, pte); in remap_pte_fn()
|
/linux-4.1.27/arch/powerpc/perf/ |
D | callchain.c | 118 pte_t *ptep, pte; in read_user_stack_slow() local 130 ptep = find_linux_pte_or_hugepte(pgdir, addr, &shift); in read_user_stack_slow() 131 if (!ptep) in read_user_stack_slow() 139 pte = READ_ONCE(*ptep); in read_user_stack_slow()
|
/linux-4.1.27/arch/xtensa/mm/ |
D | cache.c | 214 update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) in update_mmu_cache() argument 216 unsigned long pfn = pte_pfn(*ptep); in update_mmu_cache()
|
/linux-4.1.27/arch/m32r/mm/ |
D | fault-nommu.c | 97 pte_t *ptep) in update_mmu_cache() argument
|
D | fault.c | 349 pte_t *ptep) in update_mmu_cache() argument 365 pte_data = pte_val(*ptep); in update_mmu_cache()
|
/linux-4.1.27/arch/powerpc/kvm/ |
D | book3s_hv_rm_mmu.c | 150 pte_t *ptep; in kvmppc_do_h_enter() local 194 ptep = __find_linux_pte_or_hugepte(pgdir, hva, &hpage_shift); in kvmppc_do_h_enter() 197 ptep = find_linux_pte_or_hugepte(pgdir, hva, &hpage_shift); in kvmppc_do_h_enter() 199 if (ptep) { in kvmppc_do_h_enter() 216 pte = kvmppc_read_update_linux_pte(ptep, writing); in kvmppc_do_h_enter()
|
D | e500_mmu_host.c | 338 pte_t *ptep; in kvmppc_e500_shadow_map() local 479 ptep = find_linux_pte_or_hugepte(pgdir, hva, NULL); in kvmppc_e500_shadow_map() 480 if (ptep) { in kvmppc_e500_shadow_map() 481 pte_t pte = READ_ONCE(*ptep); in kvmppc_e500_shadow_map()
|
D | book3s_64_mmu_hv.c | 538 pte_t *ptep, pte; in kvmppc_book3s_hv_page_fault() local 545 ptep = find_linux_pte_or_hugepte(current->mm->pgd, in kvmppc_book3s_hv_page_fault() 547 if (ptep) { in kvmppc_book3s_hv_page_fault() 548 pte = kvmppc_read_update_linux_pte(ptep, 1); in kvmppc_book3s_hv_page_fault()
|
/linux-4.1.27/arch/avr32/mm/ |
D | tlb.c | 104 unsigned long address, pte_t *ptep) in update_mmu_cache() argument 113 update_dtlb(address, *ptep); in update_mmu_cache()
|
/linux-4.1.27/kernel/events/ |
D | uprobes.c | 165 pte_t *ptep; in __replace_page() local 181 ptep = page_check_address(page, mm, addr, &ptl, 0); in __replace_page() 182 if (!ptep) in __replace_page() 195 flush_cache_page(vma, addr, pte_pfn(*ptep)); in __replace_page() 196 ptep_clear_flush_notify(vma, addr, ptep); in __replace_page() 197 set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); in __replace_page() 202 pte_unmap_unlock(ptep, ptl); in __replace_page()
|
/linux-4.1.27/arch/unicore32/mm/ |
D | mmu.c | 489 pte_t *ptep) in update_mmu_cache() argument 491 unsigned long pfn = pte_pfn(*ptep); in update_mmu_cache()
|
/linux-4.1.27/arch/sparc/kernel/ |
D | signal32.c | 356 pte_t *ptep, pte; in flush_signal_insns() local 384 ptep = pte_offset_map(pmdp, address); in flush_signal_insns() 385 pte = *ptep; in flush_signal_insns() 398 pte_unmap(ptep); in flush_signal_insns()
|
/linux-4.1.27/arch/x86/include/asm/xen/ |
D | page.h | 255 void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid);
|
/linux-4.1.27/arch/metag/mm/ |
D | hugetlbpage.c | 92 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) in huge_pmd_unshare() argument
|