Lines Matching refs:pmd

391 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,  in free_pte_range()  argument
394 pgtable_t token = pmd_pgtable(*pmd); in free_pte_range()
395 pmd_clear(pmd); in free_pte_range()
404 pmd_t *pmd; in free_pmd_range() local
409 pmd = pmd_offset(pud, addr); in free_pmd_range()
412 if (pmd_none_or_clear_bad(pmd)) in free_pmd_range()
414 free_pte_range(tlb, pmd, addr); in free_pmd_range()
415 } while (pmd++, addr = next, addr != end); in free_pmd_range()
428 pmd = pmd_offset(pud, start); in free_pmd_range()
430 pmd_free_tlb(tlb, pmd, start); in free_pmd_range()
564 pmd_t *pmd, unsigned long address) in __pte_alloc() argument
587 ptl = pmd_lock(mm, pmd); in __pte_alloc()
589 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ in __pte_alloc()
591 pmd_populate(mm, pmd, new); in __pte_alloc()
593 } else if (unlikely(pmd_trans_splitting(*pmd))) in __pte_alloc()
599 wait_split_huge_page(vma->anon_vma, pmd); in __pte_alloc()
603 int __pte_alloc_kernel(pmd_t *pmd, unsigned long address) in __pte_alloc_kernel() argument
612 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ in __pte_alloc_kernel()
613 pmd_populate_kernel(&init_mm, pmd, new); in __pte_alloc_kernel()
616 VM_BUG_ON(pmd_trans_splitting(*pmd)); in __pte_alloc_kernel()
651 pmd_t *pmd = pmd_offset(pud, addr); in print_bad_pte() local
684 (long long)pte_val(pte), (long long)pmd_val(*pmd)); in print_bad_pte()
1073 struct vm_area_struct *vma, pmd_t *pmd, in zap_pte_range() argument
1087 start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); in zap_pte_range()
1189 pmd_t *pmd; in zap_pmd_range() local
1192 pmd = pmd_offset(pud, addr); in zap_pmd_range()
1195 if (pmd_trans_huge(*pmd)) { in zap_pmd_range()
1206 split_huge_page_pmd(vma, addr, pmd); in zap_pmd_range()
1207 } else if (zap_huge_pmd(tlb, vma, pmd, addr)) in zap_pmd_range()
1218 if (pmd_none_or_trans_huge_or_clear_bad(pmd)) in zap_pmd_range()
1220 next = zap_pte_range(tlb, vma, pmd, addr, next, details); in zap_pmd_range()
1223 } while (pmd++, addr = next, addr != end); in zap_pmd_range()
1424 pmd_t * pmd = pmd_alloc(mm, pud, addr); in __get_locked_pte() local
1425 if (pmd) { in __get_locked_pte()
1426 VM_BUG_ON(pmd_trans_huge(*pmd)); in __get_locked_pte()
1427 return pte_alloc_map_lock(mm, pmd, addr, ptl); in __get_locked_pte()
1621 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, in remap_pte_range() argument
1628 pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); in remap_pte_range()
1646 pmd_t *pmd; in remap_pmd_range() local
1650 pmd = pmd_alloc(mm, pud, addr); in remap_pmd_range()
1651 if (!pmd) in remap_pmd_range()
1653 VM_BUG_ON(pmd_trans_huge(*pmd)); in remap_pmd_range()
1656 if (remap_pte_range(mm, pmd, addr, next, in remap_pmd_range()
1659 } while (pmd++, addr = next, addr != end); in remap_pmd_range()
1798 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, in apply_to_pte_range() argument
1808 pte_alloc_kernel(pmd, addr) : in apply_to_pte_range()
1809 pte_alloc_map_lock(mm, pmd, addr, &ptl); in apply_to_pte_range()
1813 BUG_ON(pmd_huge(*pmd)); in apply_to_pte_range()
1817 token = pmd_pgtable(*pmd); in apply_to_pte_range()
1836 pmd_t *pmd; in apply_to_pmd_range() local
1842 pmd = pmd_alloc(mm, pud, addr); in apply_to_pmd_range()
1843 if (!pmd) in apply_to_pmd_range()
1847 err = apply_to_pte_range(mm, pmd, addr, next, fn, data); in apply_to_pmd_range()
1850 } while (pmd++, addr = next, addr != end); in apply_to_pmd_range()
1907 static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, in pte_unmap_same() argument
1913 spinlock_t *ptl = pte_lockptr(mm, pmd); in pte_unmap_same()
2060 unsigned long address, pte_t *page_table, pmd_t *pmd, in wp_page_copy() argument
2094 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); in wp_page_copy()
2190 pmd_t *pmd) in wp_pfn_shared() argument
2205 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); in wp_pfn_shared()
2221 pmd_t *pmd, spinlock_t *ptl, pte_t orig_pte, in wp_page_shared() argument
2250 page_table = pte_offset_map_lock(mm, pmd, address, in wp_page_shared()
2284 unsigned long address, pte_t *page_table, pmd_t *pmd, in do_wp_page() argument
2302 orig_pte, pmd); in do_wp_page()
2305 return wp_page_copy(mm, vma, address, page_table, pmd, in do_wp_page()
2318 page_table = pte_offset_map_lock(mm, pmd, address, in do_wp_page()
2342 return wp_page_shared(mm, vma, address, page_table, pmd, in do_wp_page()
2352 return wp_page_copy(mm, vma, address, page_table, pmd, in do_wp_page()
2445 unsigned long address, pte_t *page_table, pmd_t *pmd, in do_swap_page() argument
2457 if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) in do_swap_page()
2463 migration_entry_wait(mm, pmd, address); in do_swap_page()
2482 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); in do_swap_page()
2537 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); in do_swap_page()
2596 ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); in do_swap_page()
2662 unsigned long address, pte_t *page_table, pmd_t *pmd, in do_anonymous_page() argument
2684 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); in do_anonymous_page()
2710 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); in do_anonymous_page()
2922 unsigned long address, pmd_t *pmd, in do_read_fault() argument
2936 pte = pte_offset_map_lock(mm, pmd, address, &ptl); in do_read_fault()
2947 pte = pte_offset_map_lock(mm, pmd, address, &ptl); in do_read_fault()
2962 unsigned long address, pmd_t *pmd, in do_cow_fault() argument
2991 pte = pte_offset_map_lock(mm, pmd, address, &ptl); in do_cow_fault()
3028 unsigned long address, pmd_t *pmd, in do_shared_fault() argument
3056 pte = pte_offset_map_lock(mm, pmd, address, &ptl); in do_shared_fault()
3097 unsigned long address, pte_t *page_table, pmd_t *pmd, in do_fault() argument
3108 return do_read_fault(mm, vma, address, pmd, pgoff, flags, in do_fault()
3111 return do_cow_fault(mm, vma, address, pmd, pgoff, flags, in do_fault()
3113 return do_shared_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); in do_fault()
3132 unsigned long addr, pte_t pte, pte_t *ptep, pmd_t *pmd) in do_numa_page() argument
3155 ptl = pte_lockptr(mm, pmd); in do_numa_page()
3235 pte_t *pte, pmd_t *pmd, unsigned int flags) in handle_pte_fault() argument
3253 return do_fault(mm, vma, address, pte, pmd, in handle_pte_fault()
3256 return do_anonymous_page(mm, vma, address, pte, pmd, in handle_pte_fault()
3260 pte, pmd, flags, entry); in handle_pte_fault()
3264 return do_numa_page(mm, vma, address, entry, pte, pmd); in handle_pte_fault()
3266 ptl = pte_lockptr(mm, pmd); in handle_pte_fault()
3273 pte, pmd, ptl, entry); in handle_pte_fault()
3305 pmd_t *pmd; in __handle_mm_fault() local
3315 pmd = pmd_alloc(mm, pud, address); in __handle_mm_fault()
3316 if (!pmd) in __handle_mm_fault()
3318 if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) { in __handle_mm_fault()
3322 pmd, flags); in __handle_mm_fault()
3326 pmd_t orig_pmd = *pmd; in __handle_mm_fault()
3343 orig_pmd, pmd); in __handle_mm_fault()
3346 ret = do_huge_pmd_wp_page(mm, vma, address, pmd, in __handle_mm_fault()
3351 huge_pmd_set_accessed(mm, vma, address, pmd, in __handle_mm_fault()
3363 if (unlikely(pmd_none(*pmd)) && in __handle_mm_fault()
3364 unlikely(__pte_alloc(mm, vma, pmd, address))) in __handle_mm_fault()
3377 if (unlikely(pmd_trans_unstable(pmd))) in __handle_mm_fault()
3385 pte = pte_offset_map(pmd, address); in __handle_mm_fault()
3387 return handle_pte_fault(mm, vma, address, pte, pmd, flags); in __handle_mm_fault()
3494 pmd_t *pmd; in __follow_pte() local
3505 pmd = pmd_offset(pud, address); in __follow_pte()
3506 VM_BUG_ON(pmd_trans_huge(*pmd)); in __follow_pte()
3507 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) in __follow_pte()
3511 if (pmd_huge(*pmd)) in __follow_pte()
3514 ptep = pte_offset_map_lock(mm, pmd, address, ptlp); in __follow_pte()