Lines Matching refs:address

565 		pmd_t *pmd, unsigned long address)  in __pte_alloc()  argument
568 pgtable_t new = pte_alloc_one(mm, address); in __pte_alloc()
604 int __pte_alloc_kernel(pmd_t *pmd, unsigned long address) in __pte_alloc_kernel() argument
606 pte_t *new = pte_alloc_one_kernel(&init_mm, address); in __pte_alloc_kernel()
1420 static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, in zap_page_range_single() argument
1425 unsigned long end = address + size; in zap_page_range_single()
1428 tlb_gather_mmu(&tlb, mm, address, end); in zap_page_range_single()
1430 mmu_notifier_invalidate_range_start(mm, address, end); in zap_page_range_single()
1431 unmap_single_vma(&tlb, vma, address, end, details); in zap_page_range_single()
1432 mmu_notifier_invalidate_range_end(mm, address, end); in zap_page_range_single()
1433 tlb_finish_mmu(&tlb, address, end); in zap_page_range_single()
1448 int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, in zap_vma_ptes() argument
1451 if (address < vma->vm_start || address + size > vma->vm_end || in zap_vma_ptes()
1454 zap_page_range_single(vma, address, size, NULL); in zap_vma_ptes()
1999 unsigned long address) in do_page_mkwrite() argument
2004 vmf.virtual_address = (void __user *)(address & PAGE_MASK); in do_page_mkwrite()
2034 struct vm_area_struct *vma, unsigned long address, in wp_page_reuse() argument
2049 flush_cache_page(vma, address, pte_pfn(orig_pte)); in wp_page_reuse()
2052 if (ptep_set_access_flags(vma, address, page_table, entry, 1)) in wp_page_reuse()
2053 update_mmu_cache(vma, address, page_table); in wp_page_reuse()
2101 unsigned long address, pte_t *page_table, pmd_t *pmd, in wp_page_copy() argument
2108 const unsigned long mmun_start = address & PAGE_MASK; /* For mmu_notifiers */ in wp_page_copy()
2116 new_page = alloc_zeroed_user_highpage_movable(vma, address); in wp_page_copy()
2120 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); in wp_page_copy()
2123 cow_user_page(new_page, old_page, address, vma); in wp_page_copy()
2136 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); in wp_page_copy()
2146 flush_cache_page(vma, address, pte_pfn(orig_pte)); in wp_page_copy()
2155 ptep_clear_flush_notify(vma, address, page_table); in wp_page_copy()
2156 page_add_new_anon_rmap(new_page, vma, address); in wp_page_copy()
2164 set_pte_at_notify(mm, address, page_table, entry); in wp_page_copy()
2165 update_mmu_cache(vma, address, page_table); in wp_page_copy()
2230 struct vm_area_struct *vma, unsigned long address, in wp_pfn_shared() argument
2237 .pgoff = linear_page_index(vma, address), in wp_pfn_shared()
2238 .virtual_address = (void __user *)(address & PAGE_MASK), in wp_pfn_shared()
2247 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); in wp_pfn_shared()
2257 return wp_page_reuse(mm, vma, address, page_table, ptl, orig_pte, in wp_pfn_shared()
2262 unsigned long address, pte_t *page_table, in wp_page_shared() argument
2280 tmp = do_page_mkwrite(vma, old_page, address); in wp_page_shared()
2292 page_table = pte_offset_map_lock(mm, pmd, address, in wp_page_shared()
2303 return wp_page_reuse(mm, vma, address, page_table, ptl, in wp_page_shared()
2326 unsigned long address, pte_t *page_table, pmd_t *pmd, in do_wp_page() argument
2332 old_page = vm_normal_page(vma, address, orig_pte); in do_wp_page()
2343 return wp_pfn_shared(mm, vma, address, page_table, ptl, in do_wp_page()
2347 return wp_page_copy(mm, vma, address, page_table, pmd, in do_wp_page()
2360 page_table = pte_offset_map_lock(mm, pmd, address, in do_wp_page()
2376 page_move_anon_rmap(old_page, vma, address); in do_wp_page()
2378 return wp_page_reuse(mm, vma, address, page_table, ptl, in do_wp_page()
2384 return wp_page_shared(mm, vma, address, page_table, pmd, in do_wp_page()
2394 return wp_page_copy(mm, vma, address, page_table, pmd, in do_wp_page()
2487 unsigned long address, pte_t *page_table, pmd_t *pmd, in do_swap_page() argument
2505 migration_entry_wait(mm, pmd, address); in do_swap_page()
2509 print_bad_pte(vma, address, orig_pte, NULL); in do_swap_page()
2518 GFP_HIGHUSER_MOVABLE, vma, address); in do_swap_page()
2524 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); in do_swap_page()
2564 page = ksm_might_need_to_copy(page, vma, address); in do_swap_page()
2579 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); in do_swap_page()
2610 set_pte_at(mm, address, page_table, pte); in do_swap_page()
2612 do_page_add_anon_rmap(page, vma, address, exclusive); in do_swap_page()
2615 page_add_new_anon_rmap(page, vma, address); in do_swap_page()
2638 ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); in do_swap_page()
2645 update_mmu_cache(vma, address, page_table); in do_swap_page()
2669 static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) in check_stack_guard_page() argument
2671 address &= PAGE_MASK; in check_stack_guard_page()
2672 if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { in check_stack_guard_page()
2681 if (prev && prev->vm_end == address) in check_stack_guard_page()
2684 return expand_downwards(vma, address - PAGE_SIZE); in check_stack_guard_page()
2686 if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { in check_stack_guard_page()
2690 if (next && next->vm_start == address + PAGE_SIZE) in check_stack_guard_page()
2693 return expand_upwards(vma, address + PAGE_SIZE); in check_stack_guard_page()
2704 unsigned long address, pte_t *page_table, pmd_t *pmd, in do_anonymous_page() argument
2719 if (check_stack_guard_page(vma, address) < 0) in do_anonymous_page()
2724 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), in do_anonymous_page()
2726 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); in do_anonymous_page()
2732 return handle_userfault(vma, address, flags, in do_anonymous_page()
2741 page = alloc_zeroed_user_highpage_movable(vma, address); in do_anonymous_page()
2759 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); in do_anonymous_page()
2768 return handle_userfault(vma, address, flags, in do_anonymous_page()
2773 page_add_new_anon_rmap(page, vma, address); in do_anonymous_page()
2777 set_pte_at(mm, address, page_table, entry); in do_anonymous_page()
2780 update_mmu_cache(vma, address, page_table); in do_anonymous_page()
2799 static int __do_fault(struct vm_area_struct *vma, unsigned long address, in __do_fault() argument
2806 vmf.virtual_address = (void __user *)(address & PAGE_MASK); in __do_fault()
2850 void do_set_pte(struct vm_area_struct *vma, unsigned long address, in do_set_pte() argument
2861 page_add_new_anon_rmap(page, vma, address); in do_set_pte()
2866 set_pte_at(vma->vm_mm, address, pte, entry); in do_set_pte()
2869 update_mmu_cache(vma, address, pte); in do_set_pte()
2936 static void do_fault_around(struct vm_area_struct *vma, unsigned long address, in do_fault_around() argument
2947 start_addr = max(address & mask, vma->vm_start); in do_fault_around()
2948 off = ((address - start_addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); in do_fault_around()
2980 unsigned long address, pmd_t *pmd, in do_read_fault() argument
2994 pte = pte_offset_map_lock(mm, pmd, address, &ptl); in do_read_fault()
2995 do_fault_around(vma, address, pte, pgoff, flags); in do_read_fault()
3001 ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page); in do_read_fault()
3005 pte = pte_offset_map_lock(mm, pmd, address, &ptl); in do_read_fault()
3012 do_set_pte(vma, address, fault_page, pte, false, false); in do_read_fault()
3020 unsigned long address, pmd_t *pmd, in do_cow_fault() argument
3032 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); in do_cow_fault()
3041 ret = __do_fault(vma, address, pgoff, flags, new_page, &fault_page); in do_cow_fault()
3046 copy_user_highpage(new_page, fault_page, address, vma); in do_cow_fault()
3049 pte = pte_offset_map_lock(mm, pmd, address, &ptl); in do_cow_fault()
3064 do_set_pte(vma, address, new_page, pte, true, true); in do_cow_fault()
3086 unsigned long address, pmd_t *pmd, in do_shared_fault() argument
3096 ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page); in do_shared_fault()
3106 tmp = do_page_mkwrite(vma, fault_page, address); in do_shared_fault()
3114 pte = pte_offset_map_lock(mm, pmd, address, &ptl); in do_shared_fault()
3121 do_set_pte(vma, address, fault_page, pte, true, false); in do_shared_fault()
3155 unsigned long address, pte_t *page_table, pmd_t *pmd, in do_fault() argument
3158 pgoff_t pgoff = (((address & PAGE_MASK) in do_fault()
3166 return do_read_fault(mm, vma, address, pmd, pgoff, flags, in do_fault()
3169 return do_cow_fault(mm, vma, address, pmd, pgoff, flags, in do_fault()
3171 return do_shared_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); in do_fault()
3276 unsigned long address, pmd_t *pmd, unsigned int flags) in create_huge_pmd() argument
3279 return do_huge_pmd_anonymous_page(mm, vma, address, pmd, flags); in create_huge_pmd()
3281 return vma->vm_ops->pmd_fault(vma, address, pmd, flags); in create_huge_pmd()
3286 unsigned long address, pmd_t *pmd, pmd_t orig_pmd, in wp_huge_pmd() argument
3290 return do_huge_pmd_wp_page(mm, vma, address, pmd, orig_pmd); in wp_huge_pmd()
3292 return vma->vm_ops->pmd_fault(vma, address, pmd, flags); in wp_huge_pmd()
3313 struct vm_area_struct *vma, unsigned long address, in handle_pte_fault() argument
3332 return do_anonymous_page(mm, vma, address, in handle_pte_fault()
3335 return do_fault(mm, vma, address, pte, pmd, in handle_pte_fault()
3338 return do_swap_page(mm, vma, address, in handle_pte_fault()
3343 return do_numa_page(mm, vma, address, entry, pte, pmd); in handle_pte_fault()
3351 return do_wp_page(mm, vma, address, in handle_pte_fault()
3356 if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) { in handle_pte_fault()
3357 update_mmu_cache(vma, address, pte); in handle_pte_fault()
3366 flush_tlb_fix_spurious_fault(vma, address); in handle_pte_fault()
3380 unsigned long address, unsigned int flags) in __handle_mm_fault() argument
3388 return hugetlb_fault(mm, vma, address, flags); in __handle_mm_fault()
3390 pgd = pgd_offset(mm, address); in __handle_mm_fault()
3391 pud = pud_alloc(mm, pgd, address); in __handle_mm_fault()
3394 pmd = pmd_alloc(mm, pud, address); in __handle_mm_fault()
3398 int ret = create_huge_pmd(mm, vma, address, pmd, flags); in __handle_mm_fault()
3418 return do_huge_pmd_numa_page(mm, vma, address, in __handle_mm_fault()
3422 ret = wp_huge_pmd(mm, vma, address, pmd, in __handle_mm_fault()
3427 huge_pmd_set_accessed(mm, vma, address, pmd, in __handle_mm_fault()
3440 unlikely(__pte_alloc(mm, vma, pmd, address))) in __handle_mm_fault()
3461 pte = pte_offset_map(pmd, address); in __handle_mm_fault()
3463 return handle_pte_fault(mm, vma, address, pte, pmd, flags); in __handle_mm_fault()
3473 unsigned long address, unsigned int flags) in handle_mm_fault() argument
3492 ret = __handle_mm_fault(mm, vma, address, flags); in handle_mm_fault()
3515 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) in __pud_alloc() argument
3517 pud_t *new = pud_alloc_one(mm, address); in __pud_alloc()
3538 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) in __pmd_alloc() argument
3540 pmd_t *new = pmd_alloc_one(mm, address); in __pmd_alloc()
3565 static int __follow_pte(struct mm_struct *mm, unsigned long address, in __follow_pte() argument
3573 pgd = pgd_offset(mm, address); in __follow_pte()
3577 pud = pud_offset(pgd, address); in __follow_pte()
3581 pmd = pmd_offset(pud, address); in __follow_pte()
3590 ptep = pte_offset_map_lock(mm, pmd, address, ptlp); in __follow_pte()
3603 static inline int follow_pte(struct mm_struct *mm, unsigned long address, in follow_pte() argument
3610 !(res = __follow_pte(mm, address, ptepp, ptlp))); in follow_pte()
3624 int follow_pfn(struct vm_area_struct *vma, unsigned long address, in follow_pfn() argument
3634 ret = follow_pte(vma->vm_mm, address, &ptep, &ptl); in follow_pfn()
3645 unsigned long address, unsigned int flags, in follow_phys() argument
3655 if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) in follow_phys()