Searched refs:vma (Results 1 - 200 of 898) sorted by relevance

12345

/linux-4.4.14/arch/sparc/include/asm/
H A Dtlb_32.h4 #define tlb_start_vma(tlb, vma) \
6 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
9 #define tlb_end_vma(tlb, vma) \
11 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
H A Dtlbflush_32.h10 #define flush_tlb_range(vma, start, end) \
11 sparc32_cachetlb_ops->tlb_range(vma, start, end)
12 #define flush_tlb_page(vma, addr) \
13 sparc32_cachetlb_ops->tlb_page(vma, addr)
H A Dcacheflush_32.h12 #define flush_cache_range(vma,start,end) \
13 sparc32_cachetlb_ops->cache_range(vma, start, end)
14 #define flush_cache_page(vma,addr,pfn) \
15 sparc32_cachetlb_ops->cache_page(vma, addr)
17 #define flush_icache_page(vma, pg) do { } while (0)
19 #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
21 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
23 flush_cache_page(vma, vaddr, page_to_pfn(page));\
26 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
28 flush_cache_page(vma, vaddr, page_to_pfn(page));\
H A Dcacheflush_64.h23 #define flush_cache_range(vma, start, end) \
24 flush_cache_mm((vma)->vm_mm)
25 #define flush_cache_page(vma, page, pfn) \
26 flush_cache_mm((vma)->vm_mm)
50 #define flush_icache_page(vma, pg) do { } while(0)
51 #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
57 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
59 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
61 flush_ptrace_access(vma, page, vaddr, src, len, 0); \
64 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
66 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
68 flush_ptrace_access(vma, page, vaddr, dst, len, 1); \
H A Dfb.h9 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, fb_pgprotect() argument
13 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); fb_pgprotect()
H A Dtlb_64.h25 #define tlb_start_vma(tlb, vma) do { } while (0)
26 #define tlb_end_vma(tlb, vma) do { } while (0)
H A Dhugetlb.h42 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, huge_ptep_clear_flush() argument
64 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, huge_ptep_set_access_flags() argument
70 set_huge_pte_at(vma->vm_mm, addr, ptep, pte); huge_ptep_set_access_flags()
71 flush_tlb_page(vma, addr); huge_ptep_set_access_flags()
/linux-4.4.14/arch/ia64/include/asm/
H A Dfb.h9 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, fb_pgprotect() argument
12 if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start)) fb_pgprotect()
13 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); fb_pgprotect()
15 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); fb_pgprotect()
H A Dcacheflush.h22 #define flush_cache_range(vma, start, end) do { } while (0)
23 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
24 #define flush_icache_page(vma,page) do { } while (0)
41 #define flush_icache_user_range(vma, page, user_addr, len) \
47 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
49 flush_icache_user_range(vma, page, vaddr, len); \
51 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
H A Dtlbflush.h69 extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end);
75 flush_tlb_page (struct vm_area_struct *vma, unsigned long addr) flush_tlb_page() argument
78 flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE); flush_tlb_page()
80 if (vma->vm_mm == current->active_mm) flush_tlb_page()
83 vma->vm_mm->context = 0; flush_tlb_page()
/linux-4.4.14/arch/metag/include/asm/
H A Dtlb.h11 #define tlb_start_vma(tlb, vma) \
14 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
17 #define tlb_end_vma(tlb, vma) \
20 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
26 #define tlb_start_vma(tlb, vma) do { } while (0)
27 #define tlb_end_vma(tlb, vma) do { } while (0)
H A Dtlbflush.h15 * - flush_tlb_page(vma, vmaddr) flushes one page
52 static inline void flush_tlb_page(struct vm_area_struct *vma, flush_tlb_page() argument
55 flush_tlb_mm(vma->vm_mm); flush_tlb_page()
58 static inline void flush_tlb_range(struct vm_area_struct *vma, flush_tlb_range() argument
61 flush_tlb_mm(vma->vm_mm); flush_tlb_range()
/linux-4.4.14/arch/x86/um/
H A Dmem_64.c4 const char *arch_vma_name(struct vm_area_struct *vma) arch_vma_name() argument
6 if (vma->vm_mm && vma->vm_start == um_vdso_addr) arch_vma_name()
H A Dmem_32.c47 struct vm_area_struct *vma = get_gate_vma(mm); in_gate_area() local
49 if (!vma) in_gate_area()
52 return (addr >= vma->vm_start) && (addr < vma->vm_end); in_gate_area()
/linux-4.4.14/arch/parisc/include/asm/
H A Dtlb.h9 #define tlb_start_vma(tlb, vma) \
11 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
14 #define tlb_end_vma(tlb, vma) \
16 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
H A Dfb.h8 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, fb_pgprotect() argument
11 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; fb_pgprotect()
H A Dcacheflush.h82 #define flush_icache_page(vma,page) do { \
92 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
94 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
99 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
101 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
105 void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn);
106 void flush_cache_range(struct vm_area_struct *vma,
114 flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) flush_anon_page() argument
117 flush_tlb_page(vma, vmaddr); flush_anon_page()
/linux-4.4.14/arch/powerpc/include/asm/
H A Dfb.h8 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, fb_pgprotect() argument
11 vma->vm_page_prot = phys_mem_access_prot(file, off >> PAGE_SHIFT, fb_pgprotect()
12 vma->vm_end - vma->vm_start, fb_pgprotect()
13 vma->vm_page_prot); fb_pgprotect()
H A Dtlbflush.h8 * - flush_tlb_page(vma, vmaddr) flushes one page
11 * - local_flush_tlb_page(vma, vmaddr) flushes one page on the local processor
12 * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
13 * - flush_tlb_range(vma, start, end) flushes a range of pages
37 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
42 extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
49 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
54 #define flush_tlb_page(vma,addr) local_flush_tlb_page(vma,addr)
57 #define flush_tlb_page_nohash(vma,addr) flush_tlb_page(vma,addr)
65 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
66 extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
67 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
70 static inline void local_flush_tlb_page(struct vm_area_struct *vma, local_flush_tlb_page() argument
73 flush_tlb_page(vma, vmaddr); local_flush_tlb_page()
142 static inline void local_flush_tlb_page(struct vm_area_struct *vma, local_flush_tlb_page() argument
147 static inline void flush_tlb_page(struct vm_area_struct *vma, flush_tlb_page() argument
152 static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, flush_tlb_page_nohash() argument
157 static inline void flush_tlb_range(struct vm_area_struct *vma, flush_tlb_range() argument
H A Dcacheflush.h22 #define flush_cache_range(vma, start, end) do { } while (0)
23 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
24 #define flush_icache_page(vma, page) do { } while (0)
36 extern void flush_icache_user_range(struct vm_area_struct *vma,
60 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
63 flush_icache_user_range(vma, page, vaddr, len); \
65 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
H A Dhugetlb.h86 void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
88 void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
131 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, huge_ptep_clear_flush() argument
135 pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); huge_ptep_clear_flush()
136 flush_tlb_page(vma, addr); huge_ptep_clear_flush()
149 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, huge_ptep_set_access_flags() argument
159 ptep_set_access_flags(vma, addr, ptep, pte, dirty); huge_ptep_set_access_flags()
162 return ptep_set_access_flags(vma, addr, ptep, pte, dirty); huge_ptep_set_access_flags()
176 static inline void flush_hugetlb_page(struct vm_area_struct *vma, flush_hugetlb_page() argument
H A Dtlb.h28 #define tlb_start_vma(tlb, vma) do { } while (0)
29 #define tlb_end_vma(tlb, vma) do { } while (0)
/linux-4.4.14/include/linux/
H A Dhugetlb_inline.h8 static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) is_vm_hugetlb_page() argument
10 return !!(vma->vm_flags & VM_HUGETLB); is_vm_hugetlb_page()
15 static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) is_vm_hugetlb_page() argument
H A Duserfaultfd_k.h30 extern int handle_userfault(struct vm_area_struct *vma, unsigned long address,
40 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, is_mergeable_vm_userfaultfd_ctx() argument
43 return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx; is_mergeable_vm_userfaultfd_ctx()
46 static inline bool userfaultfd_missing(struct vm_area_struct *vma) userfaultfd_missing() argument
48 return vma->vm_flags & VM_UFFD_MISSING; userfaultfd_missing()
51 static inline bool userfaultfd_armed(struct vm_area_struct *vma) userfaultfd_armed() argument
53 return vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP); userfaultfd_armed()
59 static inline int handle_userfault(struct vm_area_struct *vma, handle_userfault() argument
67 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, is_mergeable_vm_userfaultfd_ctx() argument
73 static inline bool userfaultfd_missing(struct vm_area_struct *vma) userfaultfd_missing() argument
78 static inline bool userfaultfd_armed(struct vm_area_struct *vma) userfaultfd_armed() argument
H A Dkhugepaged.h9 extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
39 static inline int khugepaged_enter(struct vm_area_struct *vma, khugepaged_enter() argument
42 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags)) khugepaged_enter()
46 if (__khugepaged_enter(vma->vm_mm)) khugepaged_enter()
58 static inline int khugepaged_enter(struct vm_area_struct *vma, khugepaged_enter() argument
63 static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma, khugepaged_enter_vma_merge() argument
H A Dhuge_mm.h5 struct vm_area_struct *vma,
10 struct vm_area_struct *vma);
12 struct vm_area_struct *vma,
15 extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
18 extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
23 struct vm_area_struct *vma,
25 extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
28 extern int move_huge_pmd(struct vm_area_struct *vma,
33 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
70 extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
103 extern void __split_huge_page_pmd(struct vm_area_struct *vma,
125 extern int hugepage_madvise(struct vm_area_struct *vma,
127 extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
131 extern int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
134 static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, pmd_trans_huge_lock() argument
137 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); pmd_trans_huge_lock()
139 return __pmd_trans_huge_lock(pmd, vma, ptl); pmd_trans_huge_lock()
150 extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
192 static inline int hugepage_madvise(struct vm_area_struct *vma, hugepage_madvise() argument
198 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, vma_adjust_trans_huge() argument
204 static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, pmd_trans_huge_lock() argument
210 static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, do_huge_pmd_numa_page() argument
H A Ddax.h23 static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, dax_pmd_fault() argument
32 #define dax_mkwrite(vma, vmf, gb, iod) dax_fault(vma, vmf, gb, iod)
33 #define __dax_mkwrite(vma, vmf, gb, iod) __dax_fault(vma, vmf, gb, iod)
35 static inline bool vma_is_dax(struct vm_area_struct *vma) vma_is_dax() argument
37 return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host); vma_is_dax()
H A Dmmdebug.h14 void dump_vma(const struct vm_area_struct *vma);
26 #define VM_BUG_ON_VMA(cond, vma) \
29 dump_vma(vma); \
46 #define VM_BUG_ON_VMA(cond, vma) VM_BUG_ON(cond)
H A Drmap.h20 * directly to a vma: instead it points to an anon_vma, on whose list
23 * After unlinking the last vma on the list, we must garbage collect
25 * pointing to this anon_vma once its vma list is empty.
32 * guarantee that the vma of page tables will exist for
74 struct vm_area_struct *vma; member in struct:anon_vma_chain
141 static inline void anon_vma_merge(struct vm_area_struct *vma, anon_vma_merge() argument
144 VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma); anon_vma_merge()
199 * Used by swapoff to help locate where page is expected in vma.
222 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
228 * rmap_one: executed on each vma where page is mapped
231 * invalid_vma: for skipping uninterested vma
235 int (*rmap_one)(struct page *page, struct vm_area_struct *vma,
239 bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
247 #define anon_vma_prepare(vma) (0)
248 #define anon_vma_link(vma) do {} while (0)
H A Dmempolicy.h31 * its own state. All vma manipulation is somewhat protected by a down_read on
93 #define vma_policy(vma) ((vma)->vm_policy)
131 struct vm_area_struct *vma,
138 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
140 bool vma_policy_mof(struct vm_area_struct *vma);
148 extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
174 /* Check if a vma is migratable */ vma_migratable()
175 static inline int vma_migratable(struct vm_area_struct *vma) vma_migratable() argument
177 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) vma_migratable()
181 if (vma->vm_flags & VM_HUGETLB) vma_migratable()
190 if (vma->vm_file && vma_migratable()
191 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) vma_migratable()
231 #define vma_policy(vma) NULL
257 static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, huge_zonelist() argument
288 static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma, mpol_misplaced() argument
H A Dksm.h20 int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
52 * no problem, it will be assigned to this vma's anon_vma; but thereafter,
58 * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
59 * but what if the vma was unmerged while the page was swapped out?
62 struct vm_area_struct *vma, unsigned long address);
79 static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, ksm_madvise() argument
86 struct vm_area_struct *vma, unsigned long address) ksm_might_need_to_copy()
85 ksm_might_need_to_copy(struct page *page, struct vm_area_struct *vma, unsigned long address) ksm_might_need_to_copy() argument
H A Dhighmem.h14 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) flush_anon_page() argument
145 * @vma: The VMA the page is to be allocated for
158 struct vm_area_struct *vma,
162 vma, vaddr);
173 * @vma: The VMA the page is to be allocated for
180 alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
183 return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
226 unsigned long vaddr, struct vm_area_struct *vma) copy_user_highpage()
225 copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) copy_user_highpage() argument
/linux-4.4.14/arch/avr32/include/asm/
H A Dtlb.h11 #define tlb_start_vma(tlb, vma) \
12 flush_cache_range(vma, vma->vm_start, vma->vm_end)
14 #define tlb_end_vma(tlb, vma) \
15 flush_tlb_range(vma, vma->vm_start, vma->vm_end)
H A Dfb.h8 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, fb_pgprotect() argument
11 vma->vm_page_prot = __pgprot((pgprot_val(vma->vm_page_prot) fb_pgprotect()
H A Dtlbflush.h19 * - flush_tlb_page(vma, vmaddr) flushes one page
20 * - flush_tlb_range(vma, start, end) flushes a range of pages
26 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
28 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
H A Dcacheflush.h91 #define flush_cache_range(vma, start, end) do { } while (0)
92 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
101 * #define flush_icache_page(vma, page) do { } while (0)
103 extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
121 extern void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
125 static inline void copy_from_user_page(struct vm_area_struct *vma, copy_from_user_page() argument
/linux-4.4.14/fs/ocfs2/
H A Dmmap.h4 int ocfs2_mmap(struct file *file, struct vm_area_struct *vma);
/linux-4.4.14/arch/xtensa/include/asm/
H A Dtlb.h21 # define tlb_start_vma(tlb,vma) do { } while (0)
22 # define tlb_end_vma(tlb,vma) do { } while (0)
26 # define tlb_start_vma(tlb, vma) \
29 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
32 # define tlb_end_vma(tlb, vma) \
35 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
/linux-4.4.14/drivers/gpu/drm/
H A Ddrm_vm.c49 struct vm_area_struct *vma; member in struct:drm_vma_entry
53 static void drm_vm_open(struct vm_area_struct *vma);
54 static void drm_vm_close(struct vm_area_struct *vma);
57 struct vm_area_struct *vma) drm_io_prot()
59 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); drm_io_prot()
67 if (efi_range_is_wc(vma->vm_start, vma->vm_end - drm_io_prot()
68 vma->vm_start)) drm_io_prot()
78 static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma) drm_dma_prot() argument
80 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); drm_dma_prot()
91 * \param vma virtual memory area.
99 static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) drm_do_vm_fault() argument
101 struct drm_file *priv = vma->vm_file->private_data; drm_do_vm_fault()
116 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) drm_do_vm_fault()
128 vma->vm_start; drm_do_vm_fault()
172 static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) drm_do_vm_fault() argument
181 * \param vma virtual memory area.
188 static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) drm_do_vm_shm_fault() argument
190 struct drm_local_map *map = vma->vm_private_data; drm_do_vm_shm_fault()
198 offset = (unsigned long)vmf->virtual_address - vma->vm_start; drm_do_vm_shm_fault()
213 * \param vma virtual memory area.
218 static void drm_vm_shm_close(struct vm_area_struct *vma) drm_vm_shm_close() argument
220 struct drm_file *priv = vma->vm_file->private_data; drm_vm_shm_close()
228 vma->vm_start, vma->vm_end - vma->vm_start); drm_vm_shm_close()
230 map = vma->vm_private_data; drm_vm_shm_close()
234 if (pt->vma->vm_private_data == map) drm_vm_shm_close()
236 if (pt->vma == vma) { drm_vm_shm_close()
284 * \param vma virtual memory area.
290 static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) drm_do_vm_dma_fault() argument
292 struct drm_file *priv = vma->vm_file->private_data; drm_do_vm_dma_fault()
304 offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */ drm_do_vm_dma_fault()
318 * \param vma virtual memory area.
324 static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf) drm_do_vm_sg_fault() argument
326 struct drm_local_map *map = vma->vm_private_data; drm_do_vm_sg_fault()
327 struct drm_file *priv = vma->vm_file->private_data; drm_do_vm_sg_fault()
340 offset = (unsigned long)vmf->virtual_address - vma->vm_start; drm_do_vm_sg_fault()
350 static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) drm_vm_fault() argument
352 return drm_do_vm_fault(vma, vmf); drm_vm_fault()
355 static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) drm_vm_shm_fault() argument
357 return drm_do_vm_shm_fault(vma, vmf); drm_vm_shm_fault()
360 static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) drm_vm_dma_fault() argument
362 return drm_do_vm_dma_fault(vma, vmf); drm_vm_dma_fault()
365 static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf) drm_vm_sg_fault() argument
367 return drm_do_vm_sg_fault(vma, vmf); drm_vm_sg_fault()
401 * \param vma virtual memory area.
403 * Create a new drm_vma_entry structure as the \p vma private data entry and
407 struct vm_area_struct *vma) drm_vm_open_locked()
412 vma->vm_start, vma->vm_end - vma->vm_start); drm_vm_open_locked()
416 vma_entry->vma = vma; drm_vm_open_locked()
422 static void drm_vm_open(struct vm_area_struct *vma) drm_vm_open() argument
424 struct drm_file *priv = vma->vm_file->private_data; drm_vm_open()
428 drm_vm_open_locked(dev, vma); drm_vm_open()
433 struct vm_area_struct *vma) drm_vm_close_locked()
438 vma->vm_start, vma->vm_end - vma->vm_start); drm_vm_close_locked()
441 if (pt->vma == vma) { drm_vm_close_locked()
452 * \param vma virtual memory area.
454 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
457 static void drm_vm_close(struct vm_area_struct *vma) drm_vm_close() argument
459 struct drm_file *priv = vma->vm_file->private_data; drm_vm_close()
463 drm_vm_close_locked(dev, vma); drm_vm_close()
471 * \param vma virtual memory area.
477 static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) drm_mmap_dma() argument
482 unsigned long length = vma->vm_end - vma->vm_start; drm_mmap_dma()
487 vma->vm_start, vma->vm_end, vma->vm_pgoff); drm_mmap_dma()
496 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE); drm_mmap_dma()
498 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW; drm_mmap_dma()
503 vma->vm_page_prot = drm_mmap_dma()
506 (__pte(pgprot_val(vma->vm_page_prot))))); drm_mmap_dma()
510 vma->vm_ops = &drm_vm_dma_ops; drm_mmap_dma()
512 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; drm_mmap_dma()
514 drm_vm_open_locked(dev, vma); drm_mmap_dma()
531 * \param vma virtual memory area.
540 static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) drm_mmap_locked() argument
549 vma->vm_start, vma->vm_end, vma->vm_pgoff); drm_mmap_locked()
558 if (!vma->vm_pgoff drm_mmap_locked()
564 return drm_mmap_dma(filp, vma); drm_mmap_locked()
566 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) { drm_mmap_locked()
576 if (map->size < vma->vm_end - vma->vm_start) drm_mmap_locked()
580 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE); drm_mmap_locked()
582 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW; drm_mmap_locked()
587 vma->vm_page_prot = drm_mmap_locked()
590 (__pte(pgprot_val(vma->vm_page_prot))))); drm_mmap_locked()
604 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; drm_mmap_locked()
606 vma->vm_ops = &drm_vm_ops; drm_mmap_locked()
614 vma->vm_page_prot = drm_io_prot(map, vma); drm_mmap_locked()
615 if (io_remap_pfn_range(vma, vma->vm_start, drm_mmap_locked()
617 vma->vm_end - vma->vm_start, drm_mmap_locked()
618 vma->vm_page_prot)) drm_mmap_locked()
623 vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset)); drm_mmap_locked()
625 vma->vm_ops = &drm_vm_ops; drm_mmap_locked()
630 if (remap_pfn_range(vma, vma->vm_start, drm_mmap_locked()
632 vma->vm_end - vma->vm_start, vma->vm_page_prot)) drm_mmap_locked()
634 vma->vm_page_prot = drm_dma_prot(map->type, vma); drm_mmap_locked()
637 vma->vm_ops = &drm_vm_shm_ops; drm_mmap_locked()
638 vma->vm_private_data = (void *)map; drm_mmap_locked()
641 vma->vm_ops = &drm_vm_sg_ops; drm_mmap_locked()
642 vma->vm_private_data = (void *)map; drm_mmap_locked()
643 vma->vm_page_prot = drm_dma_prot(map->type, vma); drm_mmap_locked()
648 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; drm_mmap_locked()
650 drm_vm_open_locked(dev, vma); drm_mmap_locked()
654 int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma) drm_legacy_mmap() argument
664 ret = drm_mmap_locked(filp, vma); drm_legacy_mmap()
673 struct drm_vma_entry *vma, *vma_temp; drm_legacy_vma_flush() local
675 /* Clear vma list (only needed for legacy drivers) */ drm_legacy_vma_flush()
676 list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) { drm_legacy_vma_flush()
677 list_del(&vma->head); drm_legacy_vma_flush()
678 kfree(vma); drm_legacy_vma_flush()
687 struct vm_area_struct *vma; drm_vma_info() local
697 seq_printf(m, "vma use count: %lu, high_memory = %pK, 0x%pK\n", drm_vma_info()
702 vma = pt->vma; drm_vma_info()
703 if (!vma) drm_vma_info()
708 (void *)vma->vm_start, (void *)vma->vm_end, drm_vma_info()
709 vma->vm_flags & VM_READ ? 'r' : '-', drm_vma_info()
710 vma->vm_flags & VM_WRITE ? 'w' : '-', drm_vma_info()
711 vma->vm_flags & VM_EXEC ? 'x' : '-', drm_vma_info()
712 vma->vm_flags & VM_MAYSHARE ? 's' : 'p', drm_vma_info()
713 vma->vm_flags & VM_LOCKED ? 'l' : '-', drm_vma_info()
714 vma->vm_flags & VM_IO ? 'i' : '-', drm_vma_info()
715 vma->vm_pgoff); drm_vma_info()
718 pgprot = pgprot_val(vma->vm_page_prot); drm_vma_info()
56 drm_io_prot(struct drm_local_map *map, struct vm_area_struct *vma) drm_io_prot() argument
406 drm_vm_open_locked(struct drm_device *dev, struct vm_area_struct *vma) drm_vm_open_locked() argument
432 drm_vm_close_locked(struct drm_device *dev, struct vm_area_struct *vma) drm_vm_close_locked() argument
/linux-4.4.14/arch/arc/include/asm/
H A Dtlb.h27 #define tlb_start_vma(tlb, vma)
29 #define tlb_start_vma(tlb, vma) \
32 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
36 #define tlb_end_vma(tlb, vma) \
39 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
H A Dtlbflush.h16 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
18 void local_flush_tlb_range(struct vm_area_struct *vma,
20 void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
24 #define flush_tlb_range(vma, s, e) local_flush_tlb_range(vma, s, e)
25 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
29 #define flush_pmd_tlb_range(vma, s, e) local_flush_pmd_tlb_range(vma, s, e)
31 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
33 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
37 extern void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
H A Dcacheflush.h30 #define flush_icache_page(vma, page)
60 #define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */
66 void flush_cache_range(struct vm_area_struct *vma,
68 void flush_cache_page(struct vm_area_struct *vma,
76 void flush_anon_page(struct vm_area_struct *vma,
108 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
111 if (vma->vm_flags & VM_EXEC) \
115 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
/linux-4.4.14/mm/
H A Dmprotect.c41 static pte_t *lock_pte_protection(struct vm_area_struct *vma, pmd_t *pmd, lock_pte_protection() argument
49 return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); lock_pte_protection()
51 pmdl = pmd_lock(vma->vm_mm, pmd); lock_pte_protection()
57 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); lock_pte_protection()
62 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, change_pte_range() argument
66 struct mm_struct *mm = vma->vm_mm; change_pte_range()
71 pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl); change_pte_range()
89 page = vm_normal_page(vma, addr, oldpte); change_pte_range()
106 !(vma->vm_flags & VM_SOFTDIRTY))) { change_pte_range()
136 static inline unsigned long change_pmd_range(struct vm_area_struct *vma, change_pmd_range() argument
141 struct mm_struct *mm = vma->vm_mm; change_pmd_range()
163 split_huge_page_pmd(vma, addr, pmd); change_pmd_range()
165 int nr_ptes = change_huge_pmd(vma, pmd, addr, change_pmd_range()
180 this_pages = change_pte_range(vma, pmd, addr, next, newprot, change_pmd_range()
193 static inline unsigned long change_pud_range(struct vm_area_struct *vma, change_pud_range() argument
206 pages += change_pmd_range(vma, pud, addr, next, newprot, change_pud_range()
213 static unsigned long change_protection_range(struct vm_area_struct *vma, change_protection_range() argument
217 struct mm_struct *mm = vma->vm_mm; change_protection_range()
225 flush_cache_range(vma, addr, end); change_protection_range()
231 pages += change_pud_range(vma, pgd, addr, next, newprot, change_protection_range()
237 flush_tlb_range(vma, start, end); change_protection_range()
243 unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, change_protection() argument
249 if (is_vm_hugetlb_page(vma)) change_protection()
250 pages = hugetlb_change_protection(vma, start, end, newprot); change_protection()
252 pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa); change_protection()
258 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, mprotect_fixup() argument
261 struct mm_struct *mm = vma->vm_mm; mprotect_fixup()
262 unsigned long oldflags = vma->vm_flags; mprotect_fixup()
270 *pprev = vma; mprotect_fixup()
291 * First try to merge with previous and/or next vma. mprotect_fixup()
293 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); mprotect_fixup()
295 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), mprotect_fixup()
296 vma->vm_userfaultfd_ctx); mprotect_fixup()
298 vma = *pprev; mprotect_fixup()
302 *pprev = vma; mprotect_fixup()
304 if (start != vma->vm_start) { mprotect_fixup()
305 error = split_vma(mm, vma, start, 1); mprotect_fixup()
310 if (end != vma->vm_end) { mprotect_fixup()
311 error = split_vma(mm, vma, end, 0); mprotect_fixup()
321 vma->vm_flags = newflags; mprotect_fixup()
322 dirty_accountable = vma_wants_writenotify(vma); mprotect_fixup()
323 vma_set_page_prot(vma); mprotect_fixup()
325 change_protection(vma, start, end, vma->vm_page_prot, mprotect_fixup()
334 populate_vma_page_range(vma, start, end, NULL); mprotect_fixup()
337 vm_stat_account(mm, oldflags, vma->vm_file, -nrpages); mprotect_fixup()
338 vm_stat_account(mm, newflags, vma->vm_file, nrpages); mprotect_fixup()
339 perf_event_mmap(vma); mprotect_fixup()
351 struct vm_area_struct *vma, *prev; SYSCALL_DEFINE3() local
380 vma = find_vma(current->mm, start); SYSCALL_DEFINE3()
382 if (!vma) SYSCALL_DEFINE3()
384 prev = vma->vm_prev; SYSCALL_DEFINE3()
386 if (vma->vm_start >= end) SYSCALL_DEFINE3()
388 start = vma->vm_start; SYSCALL_DEFINE3()
390 if (!(vma->vm_flags & VM_GROWSDOWN)) SYSCALL_DEFINE3()
393 if (vma->vm_start > start) SYSCALL_DEFINE3()
396 end = vma->vm_end; SYSCALL_DEFINE3()
398 if (!(vma->vm_flags & VM_GROWSUP)) SYSCALL_DEFINE3()
402 if (start > vma->vm_start) SYSCALL_DEFINE3()
403 prev = vma; SYSCALL_DEFINE3()
408 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ SYSCALL_DEFINE3()
411 newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC)); SYSCALL_DEFINE3()
419 error = security_file_mprotect(vma, reqprot, prot); SYSCALL_DEFINE3()
423 tmp = vma->vm_end; SYSCALL_DEFINE3()
426 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); SYSCALL_DEFINE3()
436 vma = prev->vm_next; SYSCALL_DEFINE3()
437 if (!vma || vma->vm_start != nstart) { SYSCALL_DEFINE3()
H A Dmsync.c35 struct vm_area_struct *vma; SYSCALL_DEFINE3() local
58 vma = find_vma(mm, start); SYSCALL_DEFINE3()
65 if (!vma) SYSCALL_DEFINE3()
67 /* Here start < vma->vm_end. */ SYSCALL_DEFINE3()
68 if (start < vma->vm_start) { SYSCALL_DEFINE3()
69 start = vma->vm_start; SYSCALL_DEFINE3()
74 /* Here vma->vm_start <= start < vma->vm_end. */ SYSCALL_DEFINE3()
76 (vma->vm_flags & VM_LOCKED)) { SYSCALL_DEFINE3()
80 file = vma->vm_file; SYSCALL_DEFINE3()
81 fstart = (start - vma->vm_start) + SYSCALL_DEFINE3()
82 ((loff_t)vma->vm_pgoff << PAGE_SHIFT); SYSCALL_DEFINE3()
83 fend = fstart + (min(end, vma->vm_end) - start) - 1; SYSCALL_DEFINE3()
84 start = vma->vm_end; SYSCALL_DEFINE3()
86 (vma->vm_flags & VM_SHARED)) { SYSCALL_DEFINE3()
94 vma = find_vma(mm, start); SYSCALL_DEFINE3()
100 vma = vma->vm_next; SYSCALL_DEFINE3()
H A Dmremap.c53 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, alloc_new_pmd() argument
89 static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, move_ptes() argument
96 struct mm_struct *mm = vma->vm_mm; move_ptes()
109 * - During exec() shift_arg_pages(), we use a specially tagged vma move_ptes()
112 * - During mremap(), new_vma is often known to be placed after vma move_ptes()
119 if (vma->vm_file) { move_ptes()
120 mapping = vma->vm_file->f_mapping; move_ptes()
123 if (vma->anon_vma) { move_ptes()
124 anon_vma = vma->anon_vma; move_ptes()
163 unsigned long move_page_tables(struct vm_area_struct *vma, move_page_tables() argument
175 flush_cache_range(vma, old_addr, old_end); move_page_tables()
179 mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); move_page_tables()
188 old_pmd = get_old_pmd(vma->vm_mm, old_addr); move_page_tables()
191 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); move_page_tables()
197 VM_BUG_ON_VMA(vma->vm_file || !vma->anon_vma, move_page_tables()
198 vma); move_page_tables()
201 anon_vma_lock_write(vma->anon_vma); move_page_tables()
202 err = move_huge_pmd(vma, new_vma, old_addr, move_page_tables()
206 anon_vma_unlock_write(vma->anon_vma); move_page_tables()
212 split_huge_page_pmd(vma, old_addr, old_pmd); move_page_tables()
224 move_ptes(vma, old_pmd, old_addr, old_addr + extent, move_page_tables()
229 flush_tlb_range(vma, old_end-len, old_addr); move_page_tables()
231 mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); move_page_tables()
236 static unsigned long move_vma(struct vm_area_struct *vma, move_vma() argument
240 struct mm_struct *mm = vma->vm_mm; move_vma()
242 unsigned long vm_flags = vma->vm_flags; move_vma()
253 * which may split one vma into three before unmapping. move_vma()
262 * pages recently unmapped. But leave vma->vm_flags as it was, move_vma()
263 * so KSM can come around to merge on vma and new_vma afterwards. move_vma()
265 err = ksm_madvise(vma, old_addr, old_addr + old_len, move_vma()
270 new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); move_vma()
271 new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff, move_vma()
276 moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len, move_vma()
280 } else if (vma->vm_ops && vma->vm_ops->mremap) { move_vma()
281 err = vma->vm_ops->mremap(new_vma); move_vma()
290 move_page_tables(new_vma, new_addr, vma, old_addr, moved_len, move_vma()
292 vma = new_vma; move_vma()
303 vma->vm_flags &= ~VM_ACCOUNT; move_vma()
304 excess = vma->vm_end - vma->vm_start - old_len; move_vma()
305 if (old_addr > vma->vm_start && move_vma()
306 old_addr + old_len < vma->vm_end) move_vma()
320 vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT); move_vma()
323 /* OOM: unable to split vma, just get accounts right */ move_vma()
329 /* Restore VM_ACCOUNT if one or two pieces of vma left */ move_vma()
331 vma->vm_flags |= VM_ACCOUNT; move_vma()
333 vma->vm_next->vm_flags |= VM_ACCOUNT; move_vma()
348 struct vm_area_struct *vma = find_vma(mm, addr); vma_to_resize() local
351 if (!vma || vma->vm_start > addr) vma_to_resize()
354 if (is_vm_hugetlb_page(vma)) vma_to_resize()
358 if (old_len > vma->vm_end - addr) vma_to_resize()
362 return vma; vma_to_resize()
365 pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; vma_to_resize()
366 pgoff += vma->vm_pgoff; vma_to_resize()
370 if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) vma_to_resize()
373 if (vma->vm_flags & VM_LOCKED) { vma_to_resize()
385 if (vma->vm_flags & VM_ACCOUNT) { vma_to_resize()
392 return vma; vma_to_resize()
399 struct vm_area_struct *vma; mremap_to() local
425 vma = vma_to_resize(addr, old_len, new_len, &charged); mremap_to()
426 if (IS_ERR(vma)) { mremap_to()
427 ret = PTR_ERR(vma); mremap_to()
432 if (vma->vm_flags & VM_MAYSHARE) mremap_to()
435 ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff + mremap_to()
436 ((addr - vma->vm_start) >> PAGE_SHIFT), mremap_to()
441 ret = move_vma(vma, addr, old_len, new_len, new_addr, locked); mremap_to()
451 static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) vma_expandable() argument
453 unsigned long end = vma->vm_end + delta; vma_expandable()
454 if (end < vma->vm_end) /* overflow */ vma_expandable()
456 if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */ vma_expandable()
458 if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start, vma_expandable()
476 struct vm_area_struct *vma; SYSCALL_DEFINE5() local
525 vma = vma_to_resize(addr, old_len, new_len, &charged); SYSCALL_DEFINE5()
526 if (IS_ERR(vma)) { SYSCALL_DEFINE5()
527 ret = PTR_ERR(vma); SYSCALL_DEFINE5()
533 if (old_len == vma->vm_end - addr) { SYSCALL_DEFINE5()
535 if (vma_expandable(vma, new_len - old_len)) { SYSCALL_DEFINE5()
538 if (vma_adjust(vma, vma->vm_start, addr + new_len, SYSCALL_DEFINE5()
539 vma->vm_pgoff, NULL)) { SYSCALL_DEFINE5()
544 vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages); SYSCALL_DEFINE5()
545 if (vma->vm_flags & VM_LOCKED) { SYSCALL_DEFINE5()
562 if (vma->vm_flags & VM_MAYSHARE) SYSCALL_DEFINE5()
565 new_addr = get_unmapped_area(vma->vm_file, 0, new_len, SYSCALL_DEFINE5()
566 vma->vm_pgoff + SYSCALL_DEFINE5()
567 ((addr - vma->vm_start) >> PAGE_SHIFT), SYSCALL_DEFINE5()
574 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked); SYSCALL_DEFINE5()
H A Dpgtable-generic.c47 int ptep_set_access_flags(struct vm_area_struct *vma, ptep_set_access_flags() argument
53 set_pte_at(vma->vm_mm, address, ptep, entry); ptep_set_access_flags()
54 flush_tlb_fix_spurious_fault(vma, address); ptep_set_access_flags()
61 int ptep_clear_flush_young(struct vm_area_struct *vma, ptep_clear_flush_young() argument
65 young = ptep_test_and_clear_young(vma, address, ptep); ptep_clear_flush_young()
67 flush_tlb_page(vma, address); ptep_clear_flush_young()
73 pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, ptep_clear_flush() argument
76 struct mm_struct *mm = (vma)->vm_mm; ptep_clear_flush()
80 flush_tlb_page(vma, address); ptep_clear_flush()
98 #define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
102 int pmdp_set_access_flags(struct vm_area_struct *vma, pmdp_set_access_flags() argument
109 set_pmd_at(vma->vm_mm, address, pmdp, entry); pmdp_set_access_flags()
110 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); pmdp_set_access_flags()
117 int pmdp_clear_flush_young(struct vm_area_struct *vma, pmdp_clear_flush_young() argument
122 young = pmdp_test_and_clear_young(vma, address, pmdp); pmdp_clear_flush_young()
124 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); pmdp_clear_flush_young()
130 pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, pmdp_huge_clear_flush() argument
136 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); pmdp_huge_clear_flush()
137 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); pmdp_huge_clear_flush()
143 void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, pmdp_splitting_flush() argument
148 set_pmd_at(vma->vm_mm, address, pmdp, pmd); pmdp_splitting_flush()
150 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); pmdp_splitting_flush()
191 void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, pmdp_invalidate() argument
195 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry)); pmdp_invalidate()
196 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); pmdp_invalidate()
201 pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, pmdp_collapse_flush() argument
212 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); pmdp_collapse_flush()
215 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); pmdp_collapse_flush()
H A Dpagewalk.c38 if (pmd_none(*pmd) || !walk->vma) { walk_pmd_range()
135 struct vm_area_struct *vma = walk->vma; walk_hugetlb_range() local
136 struct hstate *h = hstate_vma(vma); walk_hugetlb_range()
164 * Decide whether we really walk over the current vma on [@start, @end)
166 * current vma, and return 1 if we skip the vma. Negative values means
172 struct vm_area_struct *vma = walk->vma; walk_page_test() local
178 * vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP walk_page_test()
183 * vma(VM_PFNMAP). walk_page_test()
185 if (vma->vm_flags & VM_PFNMAP) { walk_page_test()
198 struct vm_area_struct *vma = walk->vma; __walk_page_range() local
200 if (vma && is_vm_hugetlb_page(vma)) { __walk_page_range()
226 * they really want to walk over the current vma, typically by checking
230 * struct mm_walk keeps current values of some common data like vma and pmd,
236 * @walk->mm->mmap_sem, because these function traverse vma list and/or
237 * access to vma's data.
244 struct vm_area_struct *vma; walk_page_range() local
254 vma = find_vma(walk->mm, start); walk_page_range()
256 if (!vma) { /* after the last vma */ walk_page_range()
257 walk->vma = NULL; walk_page_range()
259 } else if (start < vma->vm_start) { /* outside vma */ walk_page_range()
260 walk->vma = NULL; walk_page_range()
261 next = min(end, vma->vm_start); walk_page_range()
262 } else { /* inside vma */ walk_page_range()
263 walk->vma = vma; walk_page_range()
264 next = min(end, vma->vm_end); walk_page_range()
265 vma = vma->vm_next; walk_page_range()
280 if (walk->vma || walk->pte_hole) walk_page_range()
288 int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk) walk_page_vma() argument
296 VM_BUG_ON(!vma); walk_page_vma()
297 walk->vma = vma; walk_page_vma()
298 err = walk_page_test(vma->vm_start, vma->vm_end, walk); walk_page_vma()
303 return __walk_page_range(vma->vm_start, vma->vm_end, walk); walk_page_vma()
H A Dmadvise.c25 * Any behaviour which results in changes to the vma->vm_flags needs to
46 static long madvise_behavior(struct vm_area_struct *vma, madvise_behavior() argument
50 struct mm_struct *mm = vma->vm_mm; madvise_behavior()
53 unsigned long new_flags = vma->vm_flags; madvise_behavior()
69 if (vma->vm_flags & VM_IO) { madvise_behavior()
87 error = ksm_madvise(vma, start, end, behavior, &new_flags); madvise_behavior()
93 error = hugepage_madvise(vma, &new_flags, behavior); madvise_behavior()
99 if (new_flags == vma->vm_flags) { madvise_behavior()
100 *prev = vma; madvise_behavior()
104 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); madvise_behavior()
105 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, madvise_behavior()
106 vma->vm_file, pgoff, vma_policy(vma), madvise_behavior()
107 vma->vm_userfaultfd_ctx); madvise_behavior()
109 vma = *prev; madvise_behavior()
113 *prev = vma; madvise_behavior()
115 if (start != vma->vm_start) { madvise_behavior()
116 error = split_vma(mm, vma, start, 1); madvise_behavior()
121 if (end != vma->vm_end) { madvise_behavior()
122 error = split_vma(mm, vma, end, 0); madvise_behavior()
131 vma->vm_flags = new_flags; madvise_behavior()
144 struct vm_area_struct *vma = walk->private; swapin_walk_pmd_entry() local
156 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); swapin_walk_pmd_entry()
167 vma, index); swapin_walk_pmd_entry()
175 static void force_swapin_readahead(struct vm_area_struct *vma, force_swapin_readahead() argument
179 .mm = vma->vm_mm, force_swapin_readahead()
181 .private = vma, force_swapin_readahead()
189 static void force_shm_swapin_readahead(struct vm_area_struct *vma, force_shm_swapin_readahead() argument
198 index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; force_shm_swapin_readahead()
220 static long madvise_willneed(struct vm_area_struct *vma, madvise_willneed() argument
224 struct file *file = vma->vm_file; madvise_willneed()
228 *prev = vma; madvise_willneed()
229 force_swapin_readahead(vma, start, end); madvise_willneed()
234 *prev = vma; madvise_willneed()
235 force_shm_swapin_readahead(vma, start, end, madvise_willneed()
249 *prev = vma; madvise_willneed()
250 start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; madvise_willneed()
251 if (end > vma->vm_end) madvise_willneed()
252 end = vma->vm_end; madvise_willneed()
253 end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; madvise_willneed()
278 static long madvise_dontneed(struct vm_area_struct *vma, madvise_dontneed() argument
282 *prev = vma; madvise_dontneed()
283 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)) madvise_dontneed()
286 zap_page_range(vma, start, end - start, NULL); madvise_dontneed()
294 static long madvise_remove(struct vm_area_struct *vma, madvise_remove() argument
304 if (vma->vm_flags & VM_LOCKED) madvise_remove()
307 f = vma->vm_file; madvise_remove()
313 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)) madvise_remove()
316 offset = (loff_t)(start - vma->vm_start) madvise_remove()
317 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); madvise_remove()
321 * explicitly grab a reference because the vma (and hence the madvise_remove()
322 * vma's reference to the file) can go away as soon as we drop madvise_remove()
374 madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, madvise_vma() argument
379 return madvise_remove(vma, prev, start, end); madvise_vma()
381 return madvise_willneed(vma, prev, start, end); madvise_vma()
383 return madvise_dontneed(vma, prev, start, end); madvise_vma()
385 return madvise_behavior(vma, prev, start, end, behavior); madvise_vma()
463 struct vm_area_struct *vma, *prev; SYSCALL_DEFINE3() local
504 vma = find_vma_prev(current->mm, start, &prev); SYSCALL_DEFINE3()
505 if (vma && start > vma->vm_start) SYSCALL_DEFINE3()
506 prev = vma; SYSCALL_DEFINE3()
512 if (!vma) SYSCALL_DEFINE3()
515 /* Here start < (end|vma->vm_end). */ SYSCALL_DEFINE3()
516 if (start < vma->vm_start) { SYSCALL_DEFINE3()
518 start = vma->vm_start; SYSCALL_DEFINE3()
523 /* Here vma->vm_start <= start < (end|vma->vm_end) */ SYSCALL_DEFINE3()
524 tmp = vma->vm_end; SYSCALL_DEFINE3()
528 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */ SYSCALL_DEFINE3()
529 error = madvise_vma(vma, &prev, start, tmp, behavior); SYSCALL_DEFINE3()
539 vma = prev->vm_next; SYSCALL_DEFINE3()
541 vma = find_vma(current->mm, start); SYSCALL_DEFINE3()
H A Dmmap.c62 struct vm_area_struct *vma, struct vm_area_struct *prev,
98 /* Update vma->vm_page_prot to reflect vma->vm_flags. */ vma_set_page_prot()
99 void vma_set_page_prot(struct vm_area_struct *vma) vma_set_page_prot() argument
101 unsigned long vm_flags = vma->vm_flags; vma_set_page_prot()
103 vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); vma_set_page_prot()
104 if (vma_wants_writenotify(vma)) { vma_set_page_prot()
106 vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vma_set_page_prot()
238 static void __remove_shared_vm_struct(struct vm_area_struct *vma, __remove_shared_vm_struct() argument
241 if (vma->vm_flags & VM_DENYWRITE) __remove_shared_vm_struct()
243 if (vma->vm_flags & VM_SHARED) __remove_shared_vm_struct()
247 vma_interval_tree_remove(vma, &mapping->i_mmap); __remove_shared_vm_struct()
253 * vma from rmap and vmtruncate before freeing its page tables.
255 void unlink_file_vma(struct vm_area_struct *vma) unlink_file_vma() argument
257 struct file *file = vma->vm_file; unlink_file_vma()
262 __remove_shared_vm_struct(vma, file, mapping); unlink_file_vma()
270 static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) remove_vma() argument
272 struct vm_area_struct *next = vma->vm_next; remove_vma()
275 if (vma->vm_ops && vma->vm_ops->close) remove_vma()
276 vma->vm_ops->close(vma); remove_vma()
277 if (vma->vm_file) remove_vma()
278 fput(vma->vm_file); remove_vma()
279 mpol_put(vma_policy(vma)); remove_vma()
280 kmem_cache_free(vm_area_cachep, vma); remove_vma()
356 static long vma_compute_subtree_gap(struct vm_area_struct *vma) vma_compute_subtree_gap() argument
359 max = vma->vm_start; vma_compute_subtree_gap()
360 if (vma->vm_prev) vma_compute_subtree_gap()
361 max -= vma->vm_prev->vm_end; vma_compute_subtree_gap()
362 if (vma->vm_rb.rb_left) { vma_compute_subtree_gap()
363 subtree_gap = rb_entry(vma->vm_rb.rb_left, vma_compute_subtree_gap()
368 if (vma->vm_rb.rb_right) { vma_compute_subtree_gap()
369 subtree_gap = rb_entry(vma->vm_rb.rb_right, vma_compute_subtree_gap()
385 struct vm_area_struct *vma; browse_rb() local
386 vma = rb_entry(nd, struct vm_area_struct, vm_rb); browse_rb()
387 if (vma->vm_start < prev) { browse_rb()
389 vma->vm_start, prev); browse_rb()
392 if (vma->vm_start < pend) { browse_rb()
394 vma->vm_start, pend); browse_rb()
397 if (vma->vm_start > vma->vm_end) { browse_rb()
399 vma->vm_start, vma->vm_end); browse_rb()
402 if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) { browse_rb()
404 vma->rb_subtree_gap, browse_rb()
405 vma_compute_subtree_gap(vma)); browse_rb()
410 prev = vma->vm_start; browse_rb()
411 pend = vma->vm_end; browse_rb()
428 struct vm_area_struct *vma; validate_mm_rb() local
429 vma = rb_entry(nd, struct vm_area_struct, vm_rb); validate_mm_rb()
430 VM_BUG_ON_VMA(vma != ignore && validate_mm_rb()
431 vma->rb_subtree_gap != vma_compute_subtree_gap(vma), validate_mm_rb()
432 vma); validate_mm_rb()
441 struct vm_area_struct *vma = mm->mmap; validate_mm() local
443 while (vma) { validate_mm()
444 struct anon_vma *anon_vma = vma->anon_vma; validate_mm()
449 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) validate_mm()
454 highest_address = vma->vm_end; validate_mm()
455 vma = vma->vm_next; validate_mm()
484 * Update augmented rbtree rb_subtree_gap values after vma->vm_start or
485 * vma->vm_prev->vm_end values changed, without modifying the vma's position
488 static void vma_gap_update(struct vm_area_struct *vma) vma_gap_update() argument
494 vma_gap_callbacks_propagate(&vma->vm_rb, NULL); vma_gap_update()
497 static inline void vma_rb_insert(struct vm_area_struct *vma, vma_rb_insert() argument
503 rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks); vma_rb_insert()
506 static void vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root) vma_rb_erase() argument
510 * with the possible exception of the vma being erased. vma_rb_erase()
512 validate_mm_rb(root, vma); vma_rb_erase()
519 rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks); vma_rb_erase()
523 * vma has some anon_vma assigned, and is already inserted on that
526 * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
527 * vma must be removed from the anon_vma's interval trees using
530 * After the update, the vma will be reinserted using
537 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma) anon_vma_interval_tree_pre_update_vma() argument
541 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) anon_vma_interval_tree_pre_update_vma()
546 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma) anon_vma_interval_tree_post_update_vma() argument
550 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) anon_vma_interval_tree_post_update_vma()
570 /* Fail if an existing vma overlaps the area */ find_vma_links()
592 struct vm_area_struct *vma; count_vma_pages_range() local
595 vma = find_vma_intersection(mm, addr, end); count_vma_pages_range()
596 if (!vma) count_vma_pages_range()
599 nr_pages = (min(end, vma->vm_end) - count_vma_pages_range()
600 max(addr, vma->vm_start)) >> PAGE_SHIFT; count_vma_pages_range()
603 for (vma = vma->vm_next; vma; vma = vma->vm_next) { count_vma_pages_range()
606 if (vma->vm_start > end) count_vma_pages_range()
609 overlap_len = min(end, vma->vm_end) - vma->vm_start; count_vma_pages_range()
616 void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, __vma_link_rb() argument
619 /* Update tracking information for the gap following the new vma. */ __vma_link_rb()
620 if (vma->vm_next) __vma_link_rb()
621 vma_gap_update(vma->vm_next); __vma_link_rb()
623 mm->highest_vm_end = vma->vm_end; __vma_link_rb()
626 * vma->vm_prev wasn't known when we followed the rbtree to find the __vma_link_rb()
627 * correct insertion point for that vma. As a result, we could not __vma_link_rb()
628 * update the vma vm_rb parents rb_subtree_gap values on the way down. __vma_link_rb()
629 * So, we first insert the vma with a zero rb_subtree_gap value __vma_link_rb()
634 rb_link_node(&vma->vm_rb, rb_parent, rb_link); __vma_link_rb()
635 vma->rb_subtree_gap = 0; __vma_link_rb()
636 vma_gap_update(vma); __vma_link_rb()
637 vma_rb_insert(vma, &mm->mm_rb); __vma_link_rb()
640 static void __vma_link_file(struct vm_area_struct *vma) __vma_link_file() argument
644 file = vma->vm_file; __vma_link_file()
648 if (vma->vm_flags & VM_DENYWRITE) __vma_link_file()
650 if (vma->vm_flags & VM_SHARED) __vma_link_file()
654 vma_interval_tree_insert(vma, &mapping->i_mmap); __vma_link_file()
660 __vma_link(struct mm_struct *mm, struct vm_area_struct *vma, __vma_link() argument
664 __vma_link_list(mm, vma, prev, rb_parent); __vma_link()
665 __vma_link_rb(mm, vma, rb_link, rb_parent); __vma_link()
668 static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, vma_link() argument
674 if (vma->vm_file) { vma_link()
675 mapping = vma->vm_file->f_mapping; vma_link()
679 __vma_link(mm, vma, prev, rb_link, rb_parent); vma_link()
680 __vma_link_file(vma); vma_link()
690 * Helper for vma_adjust() in the split_vma insert case: insert a vma into the
693 static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) __insert_vm_struct() argument
698 if (find_vma_links(mm, vma->vm_start, vma->vm_end, __insert_vm_struct()
701 __vma_link(mm, vma, prev, rb_link, rb_parent); __insert_vm_struct()
706 __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma, __vma_unlink() argument
711 vma_rb_erase(vma, &mm->mm_rb); __vma_unlink()
712 prev->vm_next = next = vma->vm_next; __vma_unlink()
721 * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
724 * are necessary. The "insert" vma (if any) is to be inserted
727 int vma_adjust(struct vm_area_struct *vma, unsigned long start, vma_adjust() argument
730 struct mm_struct *mm = vma->vm_mm; vma_adjust()
731 struct vm_area_struct *next = vma->vm_next; vma_adjust()
736 struct file *file = vma->vm_file; vma_adjust()
746 * vma expands, overlapping all the next, and vma_adjust()
752 importer = vma; vma_adjust()
755 * vma expands, overlapping part of the next: vma_adjust()
760 importer = vma; vma_adjust()
761 } else if (end < vma->vm_end) { vma_adjust()
763 * vma shrinks, and !insert tells it's not vma_adjust()
767 adjust_next = -((vma->vm_end - end) >> PAGE_SHIFT); vma_adjust()
768 exporter = vma; vma_adjust()
774 * make sure the expanding vma has anon_vma set if the vma_adjust()
775 * shrinking vma had, to cover any anon pages imported. vma_adjust()
790 uprobe_munmap(vma, vma->vm_start, vma->vm_end); vma_adjust()
801 * space until vma start or end is updated. vma_adjust()
807 vma_adjust_trans_huge(vma, start, end, adjust_next); vma_adjust()
809 anon_vma = vma->anon_vma; vma_adjust()
816 anon_vma_interval_tree_pre_update_vma(vma); vma_adjust()
823 vma_interval_tree_remove(vma, root); vma_adjust()
828 if (start != vma->vm_start) { vma_adjust()
829 vma->vm_start = start; vma_adjust()
832 if (end != vma->vm_end) { vma_adjust()
833 vma->vm_end = end; vma_adjust()
836 vma->vm_pgoff = pgoff; vma_adjust()
845 vma_interval_tree_insert(vma, root); vma_adjust()
851 * vma_merge has merged next into vma, and needs vma_adjust()
854 __vma_unlink(mm, next, vma); vma_adjust()
859 * split_vma has split insert from vma, and needs vma_adjust()
861 * (it may either follow vma or precede it). vma_adjust()
866 vma_gap_update(vma); vma_adjust()
876 anon_vma_interval_tree_post_update_vma(vma); vma_adjust()
885 uprobe_mmap(vma); vma_adjust()
897 anon_vma_merge(vma, next); vma_adjust()
906 next = vma->vm_next; vma_adjust()
923 * If the vma has a ->close operation then the driver probably needs to release
924 * per-vma resources, so we don't attempt to merge those.
926 static inline int is_mergeable_vma(struct vm_area_struct *vma, is_mergeable_vma() argument
938 if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY) is_mergeable_vma()
940 if (vma->vm_file != file) is_mergeable_vma()
942 if (vma->vm_ops && vma->vm_ops->close) is_mergeable_vma()
944 if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx)) is_mergeable_vma()
951 struct vm_area_struct *vma) is_mergeable_anon_vma()
957 if ((!anon_vma1 || !anon_vma2) && (!vma || is_mergeable_anon_vma()
958 list_is_singular(&vma->anon_vma_chain))) is_mergeable_anon_vma()
965 * in front of (at a lower virtual address and file offset than) the vma.
975 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, can_vma_merge_before() argument
980 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) && can_vma_merge_before()
981 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { can_vma_merge_before()
982 if (vma->vm_pgoff == vm_pgoff) can_vma_merge_before()
990 * beyond (at a higher virtual address and file offset than) the vma.
996 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, can_vma_merge_after() argument
1001 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) && can_vma_merge_after()
1002 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { can_vma_merge_after()
1004 vm_pglen = vma_pages(vma); can_vma_merge_after()
1005 if (vma->vm_pgoff + vm_pglen == vm_pgoff) can_vma_merge_after()
1025 * vma, PPPPPP is the prev vma specified, and NNNNNN the next vma after:
1052 * We later require that vma->vm_flags == vm_flags, vma_merge()
1053 * so this tests vma->vm_flags & VM_SPECIAL, too. vma_merge()
1128 * we can merge the two vma's. For example, we refuse to merge a vma if
1144 * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
1160 * We also make sure that the two vma's are compatible (adjacent,
1180 * anon_vmas being allocated, preventing vma merge in subsequent
1183 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) find_mergeable_anon_vma() argument
1188 near = vma->vm_next; find_mergeable_anon_vma()
1192 anon_vma = reusable_anon_vma(near, vma, near); find_mergeable_anon_vma()
1196 near = vma->vm_prev; find_mergeable_anon_vma()
1200 anon_vma = reusable_anon_vma(near, near, vma); find_mergeable_anon_vma()
1493 int vma_wants_writenotify(struct vm_area_struct *vma) vma_wants_writenotify() argument
1495 vm_flags_t vm_flags = vma->vm_flags; vma_wants_writenotify()
1496 const struct vm_operations_struct *vm_ops = vma->vm_ops; vma_wants_writenotify()
1508 if (pgprot_val(vma->vm_page_prot) != vma_wants_writenotify()
1509 pgprot_val(vm_pgprot_modify(vma->vm_page_prot, vm_flags))) vma_wants_writenotify()
1521 return vma->vm_file && vma->vm_file->f_mapping && vma_wants_writenotify()
1522 mapping_cap_account_dirty(vma->vm_file->f_mapping); vma_wants_writenotify()
1545 struct vm_area_struct *vma, *prev; mmap_region() local
1587 vma = vma_merge(mm, prev, addr, addr + len, vm_flags, mmap_region()
1589 if (vma) mmap_region()
1597 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); mmap_region()
1598 if (!vma) { mmap_region()
1603 vma->vm_mm = mm; mmap_region()
1604 vma->vm_start = addr; mmap_region()
1605 vma->vm_end = addr + len; mmap_region()
1606 vma->vm_flags = vm_flags; mmap_region()
1607 vma->vm_page_prot = vm_get_page_prot(vm_flags); mmap_region()
1608 vma->vm_pgoff = pgoff; mmap_region()
1609 INIT_LIST_HEAD(&vma->anon_vma_chain); mmap_region()
1623 /* ->mmap() can change vma->vm_file, but must guarantee that mmap_region()
1628 vma->vm_file = get_file(file); mmap_region()
1629 error = file->f_op->mmap(file, vma); mmap_region()
1640 WARN_ON_ONCE(addr != vma->vm_start); mmap_region()
1642 addr = vma->vm_start; mmap_region()
1643 vm_flags = vma->vm_flags; mmap_region()
1645 error = shmem_zero_setup(vma); mmap_region()
1650 vma_link(mm, vma, prev, rb_link, rb_parent); mmap_region()
1651 /* Once vma denies write, undo our temporary denial count */ mmap_region()
1658 file = vma->vm_file; mmap_region()
1660 perf_event_mmap(vma); mmap_region()
1664 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) || mmap_region()
1665 vma == get_gate_vma(current->mm))) mmap_region()
1668 vma->vm_flags &= VM_LOCKED_CLEAR_MASK; mmap_region()
1672 uprobe_mmap(vma); mmap_region()
1675 * New (or expanded) vma always get soft dirty status. mmap_region()
1677 * be able to distinguish situation when vma area unmapped, mmap_region()
1681 vma->vm_flags |= VM_SOFTDIRTY; mmap_region()
1683 vma_set_page_prot(vma); mmap_region()
1688 vma->vm_file = NULL; mmap_region()
1692 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); mmap_region()
1700 kmem_cache_free(vm_area_cachep, vma); mmap_region()
1712 * - gap_start = vma->vm_prev->vm_end <= info->high_limit - length; unmapped_area()
1713 * - gap_end = vma->vm_start >= info->low_limit + length; unmapped_area()
1718 struct vm_area_struct *vma; unmapped_area() local
1738 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); unmapped_area()
1739 if (vma->rb_subtree_gap < length) unmapped_area()
1744 gap_end = vma->vm_start; unmapped_area()
1745 if (gap_end >= low_limit && vma->vm_rb.rb_left) { unmapped_area()
1747 rb_entry(vma->vm_rb.rb_left, unmapped_area()
1750 vma = left; unmapped_area()
1755 gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; unmapped_area()
1764 if (vma->vm_rb.rb_right) { unmapped_area()
1766 rb_entry(vma->vm_rb.rb_right, unmapped_area()
1769 vma = right; unmapped_area()
1776 struct rb_node *prev = &vma->vm_rb; unmapped_area()
1779 vma = rb_entry(rb_parent(prev), unmapped_area()
1781 if (prev == vma->vm_rb.rb_left) { unmapped_area()
1782 gap_start = vma->vm_prev->vm_end; unmapped_area()
1783 gap_end = vma->vm_start; unmapped_area()
1812 struct vm_area_struct *vma; unmapped_area_topdown() local
1841 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); unmapped_area_topdown()
1842 if (vma->rb_subtree_gap < length) unmapped_area_topdown()
1847 gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; unmapped_area_topdown()
1848 if (gap_start <= high_limit && vma->vm_rb.rb_right) { unmapped_area_topdown()
1850 rb_entry(vma->vm_rb.rb_right, unmapped_area_topdown()
1853 vma = right; unmapped_area_topdown()
1860 gap_end = vma->vm_start; unmapped_area_topdown()
1867 if (vma->vm_rb.rb_left) { unmapped_area_topdown()
1869 rb_entry(vma->vm_rb.rb_left, unmapped_area_topdown()
1872 vma = left; unmapped_area_topdown()
1879 struct rb_node *prev = &vma->vm_rb; unmapped_area_topdown()
1882 vma = rb_entry(rb_parent(prev), unmapped_area_topdown()
1884 if (prev == vma->vm_rb.rb_right) { unmapped_area_topdown()
1885 gap_start = vma->vm_prev ? unmapped_area_topdown()
1886 vma->vm_prev->vm_end : 0; unmapped_area_topdown()
1924 struct vm_area_struct *vma; arch_get_unmapped_area() local
1935 vma = find_vma(mm, addr); arch_get_unmapped_area()
1937 (!vma || addr + len <= vma->vm_start)) arch_get_unmapped_area()
1960 struct vm_area_struct *vma; arch_get_unmapped_area_topdown() local
1975 vma = find_vma(mm, addr); arch_get_unmapped_area_topdown()
1977 (!vma || addr + len <= vma->vm_start)) arch_get_unmapped_area_topdown()
2044 struct vm_area_struct *vma; find_vma() local
2047 vma = vmacache_find(mm, addr); find_vma()
2048 if (likely(vma)) find_vma()
2049 return vma; find_vma()
2059 vma = tmp; find_vma()
2067 if (vma) find_vma()
2068 vmacache_update(addr, vma); find_vma()
2069 return vma; find_vma()
2081 struct vm_area_struct *vma; find_vma_prev() local
2083 vma = find_vma(mm, addr); find_vma_prev()
2084 if (vma) { find_vma_prev()
2085 *pprev = vma->vm_prev; find_vma_prev()
2094 return vma; find_vma_prev()
2102 static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow) acct_stack_growth() argument
2104 struct mm_struct *mm = vma->vm_mm; acct_stack_growth()
2114 if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN))) acct_stack_growth()
2120 if (vma->vm_flags & VM_LOCKED) { acct_stack_growth()
2131 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : acct_stack_growth()
2132 vma->vm_end - size; acct_stack_growth()
2133 if (is_hugepage_only_range(vma->vm_mm, new_start, size)) acct_stack_growth()
2149 * vma is the last one with address > vma->vm_end. Have to extend vma.
2151 int expand_upwards(struct vm_area_struct *vma, unsigned long address) expand_upwards() argument
2153 struct mm_struct *mm = vma->vm_mm; expand_upwards()
2156 if (!(vma->vm_flags & VM_GROWSUP)) expand_upwards()
2166 if (unlikely(anon_vma_prepare(vma))) expand_upwards()
2170 * vma->vm_start/vm_end cannot change under us because the caller expand_upwards()
2174 anon_vma_lock_write(vma->anon_vma); expand_upwards()
2177 if (address > vma->vm_end) { expand_upwards()
2180 size = address - vma->vm_start; expand_upwards()
2181 grow = (address - vma->vm_end) >> PAGE_SHIFT; expand_upwards()
2184 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { expand_upwards()
2185 error = acct_stack_growth(vma, size, grow); expand_upwards()
2191 * concurrent vma expansions. expand_upwards()
2194 * in a mm share the same root anon vma. expand_upwards()
2196 * against concurrent vma expansions. expand_upwards()
2199 if (vma->vm_flags & VM_LOCKED) expand_upwards()
2201 vm_stat_account(mm, vma->vm_flags, expand_upwards()
2202 vma->vm_file, grow); expand_upwards()
2203 anon_vma_interval_tree_pre_update_vma(vma); expand_upwards()
2204 vma->vm_end = address; expand_upwards()
2205 anon_vma_interval_tree_post_update_vma(vma); expand_upwards()
2206 if (vma->vm_next) expand_upwards()
2207 vma_gap_update(vma->vm_next); expand_upwards()
2212 perf_event_mmap(vma); expand_upwards()
2216 anon_vma_unlock_write(vma->anon_vma); expand_upwards()
2217 khugepaged_enter_vma_merge(vma, vma->vm_flags); expand_upwards()
2224 * vma is the first one with address < vma->vm_start. Have to extend vma.
2226 int expand_downwards(struct vm_area_struct *vma, expand_downwards() argument
2229 struct mm_struct *mm = vma->vm_mm; expand_downwards()
2238 if (unlikely(anon_vma_prepare(vma))) expand_downwards()
2242 * vma->vm_start/vm_end cannot change under us because the caller expand_downwards()
2246 anon_vma_lock_write(vma->anon_vma); expand_downwards()
2249 if (address < vma->vm_start) { expand_downwards()
2252 size = vma->vm_end - address; expand_downwards()
2253 grow = (vma->vm_start - address) >> PAGE_SHIFT; expand_downwards()
2256 if (grow <= vma->vm_pgoff) { expand_downwards()
2257 error = acct_stack_growth(vma, size, grow); expand_downwards()
2263 * concurrent vma expansions. expand_downwards()
2266 * in a mm share the same root anon vma. expand_downwards()
2268 * against concurrent vma expansions. expand_downwards()
2271 if (vma->vm_flags & VM_LOCKED) expand_downwards()
2273 vm_stat_account(mm, vma->vm_flags, expand_downwards()
2274 vma->vm_file, grow); expand_downwards()
2275 anon_vma_interval_tree_pre_update_vma(vma); expand_downwards()
2276 vma->vm_start = address; expand_downwards()
2277 vma->vm_pgoff -= grow; expand_downwards()
2278 anon_vma_interval_tree_post_update_vma(vma); expand_downwards()
2279 vma_gap_update(vma); expand_downwards()
2282 perf_event_mmap(vma); expand_downwards()
2286 anon_vma_unlock_write(vma->anon_vma); expand_downwards()
2287 khugepaged_enter_vma_merge(vma, vma->vm_flags); expand_downwards()
2304 int expand_stack(struct vm_area_struct *vma, unsigned long address) expand_stack() argument
2309 next = vma->vm_next; expand_stack()
2314 return expand_upwards(vma, address); expand_stack()
2320 struct vm_area_struct *vma, *prev; find_extend_vma() local
2323 vma = find_vma_prev(mm, addr, &prev); find_extend_vma()
2324 if (vma && (vma->vm_start <= addr)) find_extend_vma()
2325 return vma; find_extend_vma()
2333 int expand_stack(struct vm_area_struct *vma, unsigned long address) expand_stack() argument
2338 prev = vma->vm_prev; expand_stack()
2343 return expand_downwards(vma, address); expand_stack()
2349 struct vm_area_struct *vma; find_extend_vma() local
2353 vma = find_vma(mm, addr); find_extend_vma()
2354 if (!vma) find_extend_vma()
2356 if (vma->vm_start <= addr) find_extend_vma()
2357 return vma; find_extend_vma()
2358 if (!(vma->vm_flags & VM_GROWSDOWN)) find_extend_vma()
2360 start = vma->vm_start; find_extend_vma()
2361 if (expand_stack(vma, addr)) find_extend_vma()
2363 if (vma->vm_flags & VM_LOCKED) find_extend_vma()
2364 populate_vma_page_range(vma, addr, start, NULL); find_extend_vma()
2365 return vma; find_extend_vma()
2372 * Ok - we have the memory areas we should free on the vma list,
2373 * so release them, and do the vma updates.
2377 static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) remove_vma_list() argument
2384 long nrpages = vma_pages(vma); remove_vma_list()
2386 if (vma->vm_flags & VM_ACCOUNT) remove_vma_list()
2388 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); remove_vma_list()
2389 vma = remove_vma(vma); remove_vma_list()
2390 } while (vma); remove_vma_list()
2401 struct vm_area_struct *vma, struct vm_area_struct *prev, unmap_region()
2410 unmap_vmas(&tlb, vma, start, end); unmap_region()
2411 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, unmap_region()
2417 * Create a list of vma's touched by the unmap, removing them from the mm's
2418 * vma list as we go..
2421 detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, detach_vmas_to_be_unmapped() argument
2428 vma->vm_prev = NULL; detach_vmas_to_be_unmapped()
2430 vma_rb_erase(vma, &mm->mm_rb); detach_vmas_to_be_unmapped()
2432 tail_vma = vma; detach_vmas_to_be_unmapped()
2433 vma = vma->vm_next; detach_vmas_to_be_unmapped()
2434 } while (vma && vma->vm_start < end); detach_vmas_to_be_unmapped()
2435 *insertion_point = vma; detach_vmas_to_be_unmapped()
2436 if (vma) { detach_vmas_to_be_unmapped()
2437 vma->vm_prev = prev; detach_vmas_to_be_unmapped()
2438 vma_gap_update(vma); detach_vmas_to_be_unmapped()
2451 static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, __split_vma() argument
2457 if (is_vm_hugetlb_page(vma) && (addr & __split_vma()
2458 ~(huge_page_mask(hstate_vma(vma))))) __split_vma()
2466 *new = *vma; __split_vma()
2474 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); __split_vma()
2477 err = vma_dup_policy(vma, new); __split_vma()
2481 err = anon_vma_clone(new, vma); __split_vma()
2492 err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff + __split_vma()
2495 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); __split_vma()
2515 * Split a vma into two pieces at address 'addr', a new vma is allocated
2518 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, split_vma() argument
2524 return __split_vma(mm, vma, addr, new_below); split_vma()
2535 struct vm_area_struct *vma, *prev, *last; do_munmap() local
2545 vma = find_vma(mm, start); do_munmap()
2546 if (!vma) do_munmap()
2548 prev = vma->vm_prev; do_munmap()
2549 /* we have start < vma->vm_end */ do_munmap()
2553 if (vma->vm_start >= end) do_munmap()
2557 * If we need to split any vma, do it now to save pain later. do_munmap()
2561 * places tmp vma above, and higher split_vma places tmp vma below. do_munmap()
2563 if (start > vma->vm_start) { do_munmap()
2571 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count) do_munmap()
2574 error = __split_vma(mm, vma, start, 0); do_munmap()
2577 prev = vma; do_munmap()
2587 vma = prev ? prev->vm_next : mm->mmap; do_munmap()
2593 struct vm_area_struct *tmp = vma; do_munmap()
2604 * Remove the vma's, and unmap the actual pages do_munmap()
2606 detach_vmas_to_be_unmapped(mm, vma, prev, end); do_munmap()
2607 unmap_region(mm, vma, prev, start, end); do_munmap()
2609 arch_unmap(mm, vma, start, end); do_munmap()
2612 remove_vma_list(mm, vma); do_munmap()
2644 struct vm_area_struct *vma; SYSCALL_DEFINE5() local
2666 vma = find_vma(mm, start); SYSCALL_DEFINE5()
2668 if (!vma || !(vma->vm_flags & VM_SHARED)) SYSCALL_DEFINE5()
2671 if (start < vma->vm_start) SYSCALL_DEFINE5()
2674 if (start + size > vma->vm_end) { SYSCALL_DEFINE5()
2677 for (next = vma->vm_next; next; next = next->vm_next) { SYSCALL_DEFINE5()
2682 if (next->vm_file != vma->vm_file) SYSCALL_DEFINE5()
2685 if (next->vm_flags != vma->vm_flags) SYSCALL_DEFINE5()
2696 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; SYSCALL_DEFINE5()
2697 prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0; SYSCALL_DEFINE5()
2698 prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0; SYSCALL_DEFINE5()
2702 if (vma->vm_flags & VM_LOCKED) { SYSCALL_DEFINE5()
2707 for (tmp = vma; tmp->vm_start >= start + size; SYSCALL_DEFINE5()
2715 file = get_file(vma->vm_file); SYSCALL_DEFINE5()
2716 ret = do_mmap_pgoff(vma->vm_file, start, size, SYSCALL_DEFINE5()
2746 struct vm_area_struct *vma, *prev; do_brk() local
2792 vma = vma_merge(mm, prev, addr, addr + len, flags, do_brk()
2794 if (vma) do_brk()
2798 * create a vma struct for an anonymous mapping do_brk()
2800 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); do_brk()
2801 if (!vma) { do_brk()
2806 INIT_LIST_HEAD(&vma->anon_vma_chain); do_brk()
2807 vma->vm_mm = mm; do_brk()
2808 vma->vm_start = addr; do_brk()
2809 vma->vm_end = addr + len; do_brk()
2810 vma->vm_pgoff = pgoff; do_brk()
2811 vma->vm_flags = flags; do_brk()
2812 vma->vm_page_prot = vm_get_page_prot(flags); do_brk()
2813 vma_link(mm, vma, prev, rb_link, rb_parent); do_brk()
2815 perf_event_mmap(vma); do_brk()
2819 vma->vm_flags |= VM_SOFTDIRTY; do_brk()
2843 struct vm_area_struct *vma; exit_mmap() local
2850 vma = mm->mmap; exit_mmap()
2851 while (vma) { exit_mmap()
2852 if (vma->vm_flags & VM_LOCKED) exit_mmap()
2853 munlock_vma_pages_all(vma); exit_mmap()
2854 vma = vma->vm_next; exit_mmap()
2860 vma = mm->mmap; exit_mmap()
2861 if (!vma) /* Can happen if dup_mmap() received an OOM */ exit_mmap()
2869 unmap_vmas(&tlb, vma, 0, -1); exit_mmap()
2871 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING); exit_mmap()
2878 while (vma) { exit_mmap()
2879 if (vma->vm_flags & VM_ACCOUNT) exit_mmap()
2880 nr_accounted += vma_pages(vma); exit_mmap()
2881 vma = remove_vma(vma); exit_mmap()
2890 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) insert_vm_struct() argument
2895 if (find_vma_links(mm, vma->vm_start, vma->vm_end, insert_vm_struct()
2898 if ((vma->vm_flags & VM_ACCOUNT) && insert_vm_struct()
2899 security_vm_enough_memory_mm(mm, vma_pages(vma))) insert_vm_struct()
2903 * The vm_pgoff of a purely anonymous vma should be irrelevant insert_vm_struct()
2910 * vma, merges and splits can happen in a seamless way, just insert_vm_struct()
2914 if (vma_is_anonymous(vma)) { insert_vm_struct()
2915 BUG_ON(vma->anon_vma); insert_vm_struct()
2916 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; insert_vm_struct()
2919 vma_link(mm, vma, prev, rb_link, rb_parent); insert_vm_struct()
2924 * Copy the vma structure to a new location in the same mm,
2931 struct vm_area_struct *vma = *vmap; copy_vma() local
2932 unsigned long vma_start = vma->vm_start; copy_vma()
2933 struct mm_struct *mm = vma->vm_mm; copy_vma()
2939 * If anonymous vma has not yet been faulted, update new pgoff copy_vma()
2942 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) { copy_vma()
2949 new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, copy_vma()
2950 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), copy_vma()
2951 vma->vm_userfaultfd_ctx); copy_vma()
2954 * Source vma may have been merged into new_vma copy_vma()
2960 * self during an mremap is if the vma hasn't copy_vma()
2962 * reset the dst vma->vm_pgoff to the copy_vma()
2971 *vmap = vma = new_vma; copy_vma()
2973 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); copy_vma()
2978 *new_vma = *vma; copy_vma()
2982 if (vma_dup_policy(vma, new_vma)) copy_vma()
2985 if (anon_vma_clone(new_vma, vma)) copy_vma()
3020 static int special_mapping_fault(struct vm_area_struct *vma,
3024 * Having a close hook prevents vma merging regardless of flags.
3026 static void special_mapping_close(struct vm_area_struct *vma) special_mapping_close() argument
3030 static const char *special_mapping_name(struct vm_area_struct *vma) special_mapping_name() argument
3032 return ((struct vm_special_mapping *)vma->vm_private_data)->name; special_mapping_name()
3046 static int special_mapping_fault(struct vm_area_struct *vma, special_mapping_fault() argument
3052 if (vma->vm_ops == &legacy_special_mapping_vmops) special_mapping_fault()
3053 pages = vma->vm_private_data; special_mapping_fault()
3055 pages = ((struct vm_special_mapping *)vma->vm_private_data)-> special_mapping_fault()
3078 struct vm_area_struct *vma; __install_special_mapping() local
3080 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); __install_special_mapping()
3081 if (unlikely(vma == NULL)) __install_special_mapping()
3084 INIT_LIST_HEAD(&vma->anon_vma_chain); __install_special_mapping()
3085 vma->vm_mm = mm; __install_special_mapping()
3086 vma->vm_start = addr; __install_special_mapping()
3087 vma->vm_end = addr + len; __install_special_mapping()
3089 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY; __install_special_mapping()
3090 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); __install_special_mapping()
3092 vma->vm_ops = ops; __install_special_mapping()
3093 vma->vm_private_data = priv; __install_special_mapping()
3095 ret = insert_vm_struct(mm, vma); __install_special_mapping()
3101 perf_event_mmap(vma); __install_special_mapping()
3103 return vma; __install_special_mapping()
3106 kmem_cache_free(vm_area_cachep, vma); __install_special_mapping()
3112 * Insert a new vma covering the given region, with the given flags.
3132 struct vm_area_struct *vma = __install_special_mapping( install_special_mapping() local
3136 return PTR_ERR_OR_ZERO(vma); install_special_mapping()
3151 * anon_vma->root->rwsem. If some other vma in this mm shares vm_lock_anon_vma()
3183 * This operation locks against the VM for all pte/vma/mm related
3193 * altering the vma layout. It's also needed in write mode to avoid new
3201 * vma in this mm is backed by the same anon_vma or address_space.
3215 struct vm_area_struct *vma; mm_take_all_locks() local
3222 for (vma = mm->mmap; vma; vma = vma->vm_next) { mm_take_all_locks()
3225 if (vma->vm_file && vma->vm_file->f_mapping) mm_take_all_locks()
3226 vm_lock_mapping(mm, vma->vm_file->f_mapping); mm_take_all_locks()
3229 for (vma = mm->mmap; vma; vma = vma->vm_next) { mm_take_all_locks()
3232 if (vma->anon_vma) mm_take_all_locks()
3233 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) mm_take_all_locks()
3252 * the vma so the users using the anon_vma->rb_root will vm_unlock_anon_vma()
3286 struct vm_area_struct *vma; mm_drop_all_locks() local
3292 for (vma = mm->mmap; vma; vma = vma->vm_next) { mm_drop_all_locks()
3293 if (vma->anon_vma) mm_drop_all_locks()
3294 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) mm_drop_all_locks()
3296 if (vma->vm_file && vma->vm_file->f_mapping) mm_drop_all_locks()
3297 vm_unlock_mapping(vma->vm_file->f_mapping); mm_drop_all_locks()
949 is_mergeable_anon_vma(struct anon_vma *anon_vma1, struct anon_vma *anon_vma2, struct vm_area_struct *vma) is_mergeable_anon_vma() argument
2400 unmap_region(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, unsigned long start, unsigned long end) unmap_region() argument
H A Dnommu.c121 struct vm_area_struct *vma; kobjsize() local
123 vma = find_vma(current->mm, (unsigned long)objp); kobjsize()
124 if (vma) kobjsize()
125 return vma->vm_end - vma->vm_start; kobjsize()
140 struct vm_area_struct *vma; __get_user_pages() local
153 vma = find_vma(mm, start); __get_user_pages()
154 if (!vma) __get_user_pages()
158 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) || __get_user_pages()
159 !(vm_flags & vma->vm_flags)) __get_user_pages()
168 vmas[i] = vma; __get_user_pages()
237 * @vma: memory mapping
245 int follow_pfn(struct vm_area_struct *vma, unsigned long address, follow_pfn() argument
248 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) follow_pfn()
281 struct vm_area_struct *vma; vmalloc_user() local
284 vma = find_vma(current->mm, (unsigned long)ret); vmalloc_user()
285 if (vma) vmalloc_user()
286 vma->vm_flags |= VM_USERMAP; vmalloc_user()
515 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, vm_insert_page() argument
696 * update protection on a vma
698 static void protect_vma(struct vm_area_struct *vma, unsigned long flags) protect_vma() argument
701 struct mm_struct *mm = vma->vm_mm; protect_vma()
702 long start = vma->vm_start & PAGE_MASK; protect_vma()
703 while (start < vma->vm_end) { protect_vma()
717 static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) add_vma_to_mm() argument
723 BUG_ON(!vma->vm_region); add_vma_to_mm()
726 vma->vm_mm = mm; add_vma_to_mm()
728 protect_vma(vma, vma->vm_flags); add_vma_to_mm()
731 if (vma->vm_file) { add_vma_to_mm()
732 mapping = vma->vm_file->f_mapping; add_vma_to_mm()
736 vma_interval_tree_insert(vma, &mapping->i_mmap); add_vma_to_mm()
750 if (vma->vm_start < pvma->vm_start) add_vma_to_mm()
752 else if (vma->vm_start > pvma->vm_start) { add_vma_to_mm()
755 } else if (vma->vm_end < pvma->vm_end) add_vma_to_mm()
757 else if (vma->vm_end > pvma->vm_end) { add_vma_to_mm()
760 } else if (vma < pvma) add_vma_to_mm()
762 else if (vma > pvma) { add_vma_to_mm()
769 rb_link_node(&vma->vm_rb, parent, p); add_vma_to_mm()
770 rb_insert_color(&vma->vm_rb, &mm->mm_rb); add_vma_to_mm()
777 __vma_link_list(mm, vma, prev, parent); add_vma_to_mm()
783 static void delete_vma_from_mm(struct vm_area_struct *vma) delete_vma_from_mm() argument
787 struct mm_struct *mm = vma->vm_mm; delete_vma_from_mm()
790 protect_vma(vma, 0); delete_vma_from_mm()
794 /* if the vma is cached, invalidate the entire cache */ delete_vma_from_mm()
795 if (curr->vmacache[i] == vma) { delete_vma_from_mm()
802 if (vma->vm_file) { delete_vma_from_mm()
803 mapping = vma->vm_file->f_mapping; delete_vma_from_mm()
807 vma_interval_tree_remove(vma, &mapping->i_mmap); delete_vma_from_mm()
813 rb_erase(&vma->vm_rb, &mm->mm_rb); delete_vma_from_mm()
815 if (vma->vm_prev) delete_vma_from_mm()
816 vma->vm_prev->vm_next = vma->vm_next; delete_vma_from_mm()
818 mm->mmap = vma->vm_next; delete_vma_from_mm()
820 if (vma->vm_next) delete_vma_from_mm()
821 vma->vm_next->vm_prev = vma->vm_prev; delete_vma_from_mm()
827 static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) delete_vma() argument
829 if (vma->vm_ops && vma->vm_ops->close) delete_vma()
830 vma->vm_ops->close(vma); delete_vma()
831 if (vma->vm_file) delete_vma()
832 fput(vma->vm_file); delete_vma()
833 put_nommu_region(vma->vm_region); delete_vma()
834 kmem_cache_free(vm_area_cachep, vma); delete_vma()
843 struct vm_area_struct *vma; find_vma() local
846 vma = vmacache_find(mm, addr); find_vma()
847 if (likely(vma)) find_vma()
848 return vma; find_vma()
852 for (vma = mm->mmap; vma; vma = vma->vm_next) { find_vma()
853 if (vma->vm_start > addr) find_vma()
855 if (vma->vm_end > addr) { find_vma()
856 vmacache_update(addr, vma); find_vma()
857 return vma; find_vma()
878 int expand_stack(struct vm_area_struct *vma, unsigned long address) expand_stack() argument
891 struct vm_area_struct *vma; find_vma_exact() local
895 vma = vmacache_find_exact(mm, addr, end); find_vma_exact()
896 if (vma) find_vma_exact()
897 return vma; find_vma_exact()
901 for (vma = mm->mmap; vma; vma = vma->vm_next) { find_vma_exact()
902 if (vma->vm_start < addr) find_vma_exact()
904 if (vma->vm_start > addr) find_vma_exact()
906 if (vma->vm_end == end) { find_vma_exact()
907 vmacache_update(addr, vma); find_vma_exact()
908 return vma; find_vma_exact()
1118 static int do_mmap_shared_file(struct vm_area_struct *vma) do_mmap_shared_file() argument
1122 ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); do_mmap_shared_file()
1124 vma->vm_region->vm_top = vma->vm_region->vm_end; do_mmap_shared_file()
1139 static int do_mmap_private(struct vm_area_struct *vma, do_mmap_private() argument
1153 ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); do_mmap_private()
1156 BUG_ON(!(vma->vm_flags & VM_MAYSHARE)); do_mmap_private()
1157 vma->vm_region->vm_top = vma->vm_region->vm_end; do_mmap_private()
1187 region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY; do_mmap_private()
1192 vma->vm_start = region->vm_start; do_mmap_private()
1193 vma->vm_end = region->vm_start + len; do_mmap_private()
1195 if (vma->vm_file) { do_mmap_private()
1200 fpos = vma->vm_pgoff; do_mmap_private()
1205 ret = __vfs_read(vma->vm_file, base, len, &fpos); do_mmap_private()
1221 region->vm_start = vma->vm_start = 0; do_mmap_private()
1222 region->vm_end = vma->vm_end = 0; do_mmap_private()
1245 struct vm_area_struct *vma; do_mmap() local
1273 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); do_mmap()
1274 if (!vma) do_mmap()
1281 INIT_LIST_HEAD(&vma->anon_vma_chain); do_mmap()
1282 vma->vm_flags = vm_flags; do_mmap()
1283 vma->vm_pgoff = pgoff; do_mmap()
1287 vma->vm_file = get_file(file); do_mmap()
1339 vma->vm_region = pregion; do_mmap()
1342 vma->vm_start = start; do_mmap()
1343 vma->vm_end = start + len; do_mmap()
1346 vma->vm_flags |= VM_MAPPED_COPY; do_mmap()
1348 ret = do_mmap_shared_file(vma); do_mmap()
1350 vma->vm_region = NULL; do_mmap()
1351 vma->vm_start = 0; do_mmap()
1352 vma->vm_end = 0; do_mmap()
1386 vma->vm_start = region->vm_start = addr; do_mmap()
1387 vma->vm_end = region->vm_end = addr + len; do_mmap()
1392 vma->vm_region = region; do_mmap()
1397 if (file && vma->vm_flags & VM_SHARED) do_mmap()
1398 ret = do_mmap_shared_file(vma); do_mmap()
1400 ret = do_mmap_private(vma, region, len, capabilities); do_mmap()
1406 if (!vma->vm_file && !(flags & MAP_UNINITIALIZED)) do_mmap()
1411 result = vma->vm_start; do_mmap()
1416 add_vma_to_mm(current->mm, vma); do_mmap()
1420 if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) { do_mmap()
1435 if (vma->vm_file) do_mmap()
1436 fput(vma->vm_file); do_mmap()
1437 kmem_cache_free(vm_area_cachep, vma); do_mmap()
1448 pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n", do_mmap()
1509 * split a vma into two pieces at address 'addr', a new vma is allocated either
1512 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, split_vma() argument
1521 if (vma->vm_file) split_vma()
1538 *new = *vma; split_vma()
1539 *region = *vma->vm_region; split_vma()
1542 npages = (addr - vma->vm_start) >> PAGE_SHIFT; split_vma()
1554 delete_vma_from_mm(vma); split_vma()
1556 delete_nommu_region(vma->vm_region); split_vma()
1558 vma->vm_region->vm_start = vma->vm_start = addr; split_vma()
1559 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages; split_vma()
1561 vma->vm_region->vm_end = vma->vm_end = addr; split_vma()
1562 vma->vm_region->vm_top = addr; split_vma()
1564 add_nommu_region(vma->vm_region); split_vma()
1567 add_vma_to_mm(mm, vma); split_vma()
1577 struct vm_area_struct *vma, shrink_vma()
1584 delete_vma_from_mm(vma); shrink_vma()
1585 if (from > vma->vm_start) shrink_vma()
1586 vma->vm_end = from; shrink_vma()
1588 vma->vm_start = to; shrink_vma()
1589 add_vma_to_mm(mm, vma); shrink_vma()
1592 region = vma->vm_region; shrink_vma()
1617 struct vm_area_struct *vma; do_munmap() local
1628 vma = find_vma(mm, start); do_munmap()
1629 if (!vma) { do_munmap()
1641 if (vma->vm_file) { do_munmap()
1643 if (start > vma->vm_start) do_munmap()
1645 if (end == vma->vm_end) do_munmap()
1647 vma = vma->vm_next; do_munmap()
1648 } while (vma); do_munmap()
1652 if (start == vma->vm_start && end == vma->vm_end) do_munmap()
1654 if (start < vma->vm_start || end > vma->vm_end) do_munmap()
1658 if (end != vma->vm_end && offset_in_page(end)) do_munmap()
1660 if (start != vma->vm_start && end != vma->vm_end) { do_munmap()
1661 ret = split_vma(mm, vma, start, 1); do_munmap()
1665 return shrink_vma(mm, vma, start, end); do_munmap()
1669 delete_vma_from_mm(vma); do_munmap()
1670 delete_vma(mm, vma); do_munmap()
1697 struct vm_area_struct *vma; exit_mmap() local
1704 while ((vma = mm->mmap)) { exit_mmap()
1705 mm->mmap = vma->vm_next; exit_mmap()
1706 delete_vma_from_mm(vma); exit_mmap()
1707 delete_vma(mm, vma); exit_mmap()
1731 struct vm_area_struct *vma; do_mremap() local
1745 vma = find_vma_exact(current->mm, addr, old_len); do_mremap()
1746 if (!vma) do_mremap()
1749 if (vma->vm_end != vma->vm_start + old_len) do_mremap()
1752 if (vma->vm_flags & VM_MAYSHARE) do_mremap()
1755 if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start) do_mremap()
1759 vma->vm_end = vma->vm_start + new_len; do_mremap()
1760 return vma->vm_start; do_mremap()
1775 struct page *follow_page_mask(struct vm_area_struct *vma, follow_page_mask() argument
1783 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, remap_pfn_range() argument
1789 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; remap_pfn_range()
1794 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) vm_iomap_memory() argument
1797 unsigned long vm_len = vma->vm_end - vma->vm_start; vm_iomap_memory()
1799 pfn += vma->vm_pgoff; vm_iomap_memory()
1800 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); vm_iomap_memory()
1804 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, remap_vmalloc_range() argument
1807 unsigned int size = vma->vm_end - vma->vm_start; remap_vmalloc_range()
1809 if (!(vma->vm_flags & VM_USERMAP)) remap_vmalloc_range()
1812 vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT)); remap_vmalloc_range()
1813 vma->vm_end = vma->vm_start + size; remap_vmalloc_range()
1926 int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) filemap_fault() argument
1933 void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf) filemap_map_pages() argument
1942 struct vm_area_struct *vma; __access_remote_vm() local
1947 vma = find_vma(mm, addr); __access_remote_vm()
1948 if (vma) { __access_remote_vm()
1950 if (addr + len >= vma->vm_end) __access_remote_vm()
1951 len = vma->vm_end - addr; __access_remote_vm()
1954 if (write && vma->vm_flags & VM_MAYWRITE) __access_remote_vm()
1955 copy_to_user_page(vma, NULL, addr, __access_remote_vm()
1957 else if (!write && vma->vm_flags & VM_MAYREAD) __access_remote_vm()
1958 copy_from_user_page(vma, NULL, addr, __access_remote_vm()
2022 struct vm_area_struct *vma; nommu_shrink_inode_mappings() local
2034 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) { nommu_shrink_inode_mappings()
2037 if (vma->vm_flags & VM_SHARED) { nommu_shrink_inode_mappings()
2050 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) { nommu_shrink_inode_mappings()
2051 if (!(vma->vm_flags & VM_SHARED)) nommu_shrink_inode_mappings()
2054 region = vma->vm_region; nommu_shrink_inode_mappings()
1576 shrink_vma(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long from, unsigned long to) shrink_vma() argument
H A Dvmacache.c9 * Flush vma caches for threads that share a given mm.
12 * exclusively and other threads accessing the vma cache will
14 * is required to maintain the vma cache.
96 struct vm_area_struct *vma = current->vmacache[i]; vmacache_find() local
98 if (!vma) vmacache_find()
100 if (WARN_ON_ONCE(vma->vm_mm != mm)) vmacache_find()
102 if (vma->vm_start <= addr && vma->vm_end > addr) { vmacache_find()
104 return vma; vmacache_find()
124 struct vm_area_struct *vma = current->vmacache[i]; vmacache_find_exact() local
126 if (vma && vma->vm_start == start && vma->vm_end == end) { vmacache_find_exact()
128 return vma; vmacache_find_exact()
H A Dmemory.c529 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, free_pgtables() argument
532 while (vma) { free_pgtables()
533 struct vm_area_struct *next = vma->vm_next; free_pgtables()
534 unsigned long addr = vma->vm_start; free_pgtables()
537 * Hide vma from rmap and truncate_pagecache before freeing free_pgtables()
540 unlink_anon_vmas(vma); free_pgtables()
541 unlink_file_vma(vma); free_pgtables()
543 if (is_vm_hugetlb_page(vma)) { free_pgtables()
544 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, free_pgtables()
550 while (next && next->vm_start <= vma->vm_end + PMD_SIZE free_pgtables()
552 vma = next; free_pgtables()
553 next = vma->vm_next; free_pgtables()
554 unlink_anon_vmas(vma); free_pgtables()
555 unlink_file_vma(vma); free_pgtables()
557 free_pgd_range(tlb, addr, vma->vm_end, free_pgtables()
560 vma = next; free_pgtables()
564 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, __pte_alloc() argument
600 wait_split_huge_page(vma->anon_vma, pmd); __pte_alloc()
647 static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, print_bad_pte() argument
650 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); print_bad_pte()
679 mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; print_bad_pte()
680 index = linear_page_index(vma, addr); print_bad_pte()
690 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); print_bad_pte()
695 vma->vm_file, print_bad_pte()
696 vma->vm_ops ? vma->vm_ops->fault : NULL, print_bad_pte()
697 vma->vm_file ? vma->vm_file->f_op->mmap : NULL, print_bad_pte()
720 * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
724 * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
730 * as the vma is not a COW mapping; in that case, we know that all ptes are
750 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, vm_normal_page() argument
758 if (vma->vm_ops && vma->vm_ops->find_special_page) vm_normal_page()
759 return vma->vm_ops->find_special_page(vma, addr); vm_normal_page()
760 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) vm_normal_page()
763 print_bad_pte(vma, addr, pte, NULL); vm_normal_page()
769 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { vm_normal_page()
770 if (vma->vm_flags & VM_MIXEDMAP) { vm_normal_page()
776 off = (addr - vma->vm_start) >> PAGE_SHIFT; vm_normal_page()
777 if (pfn == vma->vm_pgoff + off) vm_normal_page()
779 if (!is_cow_mapping(vma->vm_flags)) vm_normal_page()
788 print_bad_pte(vma, addr, pte, NULL); vm_normal_page()
801 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, vm_normal_page_pmd() argument
811 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { vm_normal_page_pmd()
812 if (vma->vm_flags & VM_MIXEDMAP) { vm_normal_page_pmd()
818 off = (addr - vma->vm_start) >> PAGE_SHIFT; vm_normal_page_pmd()
819 if (pfn == vma->vm_pgoff + off) vm_normal_page_pmd()
821 if (!is_cow_mapping(vma->vm_flags)) vm_normal_page_pmd()
843 * covered by this vma.
848 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma, copy_one_pte()
851 unsigned long vm_flags = vma->vm_flags; copy_one_pte()
913 page = vm_normal_page(vma, addr, pte); copy_one_pte()
929 pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, copy_pte_range()
968 vma, addr, rss); copy_pte_range()
992 pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma, copy_pmd_range()
1008 dst_pmd, src_pmd, addr, vma); copy_pmd_range()
1018 vma, addr, next)) copy_pmd_range()
1025 pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma, copy_pud_range()
1040 vma, addr, next)) copy_pud_range()
1047 struct vm_area_struct *vma) copy_page_range()
1051 unsigned long addr = vma->vm_start; copy_page_range()
1052 unsigned long end = vma->vm_end; copy_page_range()
1064 if (!(vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) && copy_page_range()
1065 !vma->anon_vma) copy_page_range()
1068 if (is_vm_hugetlb_page(vma)) copy_page_range()
1069 return copy_hugetlb_page_range(dst_mm, src_mm, vma); copy_page_range()
1071 if (unlikely(vma->vm_flags & VM_PFNMAP)) { copy_page_range()
1076 ret = track_pfn_copy(vma); copy_page_range()
1087 is_cow = is_cow_mapping(vma->vm_flags); copy_page_range()
1102 vma, addr, next))) { copy_page_range()
1114 struct vm_area_struct *vma, pmd_t *pmd, zap_pte_range()
1140 page = vm_normal_page(vma, addr, ptent); zap_pte_range()
1164 likely(!(vma->vm_flags & VM_SEQ_READ))) zap_pte_range()
1170 print_bad_pte(vma, addr, ptent, page); zap_pte_range()
1196 print_bad_pte(vma, addr, ptent, NULL); zap_pte_range()
1226 struct vm_area_struct *vma, pud_t *pud, zap_pmd_range()
1240 pr_err("%s: mmap_sem is unlocked! addr=0x%lx end=0x%lx vma->vm_start=0x%lx vma->vm_end=0x%lx\n", zap_pmd_range()
1242 vma->vm_start, zap_pmd_range()
1243 vma->vm_end); zap_pmd_range()
1247 split_huge_page_pmd(vma, addr, pmd); zap_pmd_range()
1248 } else if (zap_huge_pmd(tlb, vma, pmd, addr)) zap_pmd_range()
1261 next = zap_pte_range(tlb, vma, pmd, addr, next, details); zap_pmd_range()
1270 struct vm_area_struct *vma, pgd_t *pgd, zap_pud_range()
1282 next = zap_pmd_range(tlb, vma, pud, addr, next, details); zap_pud_range()
1289 struct vm_area_struct *vma, unmap_page_range()
1300 tlb_start_vma(tlb, vma); unmap_page_range()
1301 pgd = pgd_offset(vma->vm_mm, addr); unmap_page_range()
1306 next = zap_pud_range(tlb, vma, pgd, addr, next, details); unmap_page_range()
1308 tlb_end_vma(tlb, vma); unmap_page_range()
1313 struct vm_area_struct *vma, unsigned long start_addr, unmap_single_vma()
1317 unsigned long start = max(vma->vm_start, start_addr); unmap_single_vma()
1320 if (start >= vma->vm_end) unmap_single_vma()
1322 end = min(vma->vm_end, end_addr); unmap_single_vma()
1323 if (end <= vma->vm_start) unmap_single_vma()
1326 if (vma->vm_file) unmap_single_vma()
1327 uprobe_munmap(vma, start, end); unmap_single_vma()
1329 if (unlikely(vma->vm_flags & VM_PFNMAP)) unmap_single_vma()
1330 untrack_pfn(vma, 0, 0); unmap_single_vma()
1333 if (unlikely(is_vm_hugetlb_page(vma))) { unmap_single_vma()
1335 * It is undesirable to test vma->vm_file as it unmap_single_vma()
1340 * mmap_region() nullifies vma->vm_file unmap_single_vma()
1345 if (vma->vm_file) { unmap_single_vma()
1346 i_mmap_lock_write(vma->vm_file->f_mapping); unmap_single_vma()
1347 __unmap_hugepage_range_final(tlb, vma, start, end, NULL); unmap_single_vma()
1348 i_mmap_unlock_write(vma->vm_file->f_mapping); unmap_single_vma()
1351 unmap_page_range(tlb, vma, start, end, details); unmap_single_vma()
1356 * unmap_vmas - unmap a range of memory covered by a list of vma's
1358 * @vma: the starting vma
1362 * Unmap all pages in the vma list.
1374 struct vm_area_struct *vma, unsigned long start_addr, unmap_vmas()
1377 struct mm_struct *mm = vma->vm_mm; unmap_vmas()
1380 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) unmap_vmas()
1381 unmap_single_vma(tlb, vma, start_addr, end_addr, NULL); unmap_vmas()
1387 * @vma: vm_area_struct holding the applicable pages
1394 void zap_page_range(struct vm_area_struct *vma, unsigned long start, zap_page_range() argument
1397 struct mm_struct *mm = vma->vm_mm; zap_page_range()
1405 for ( ; vma && vma->vm_start < end; vma = vma->vm_next) zap_page_range()
1406 unmap_single_vma(&tlb, vma, start, end, details); zap_page_range()
1413 * @vma: vm_area_struct holding the applicable pages
1420 static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, zap_page_range_single() argument
1423 struct mm_struct *mm = vma->vm_mm; zap_page_range_single()
1431 unmap_single_vma(&tlb, vma, address, end, details); zap_page_range_single()
1437 * zap_vma_ptes - remove ptes mapping the vma
1438 * @vma: vm_area_struct holding ptes to be zapped
1444 * The entire address range must be fully contained within the vma.
1448 int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, zap_vma_ptes() argument
1451 if (address < vma->vm_start || address + size > vma->vm_end || zap_vma_ptes()
1452 !(vma->vm_flags & VM_PFNMAP)) zap_vma_ptes()
1454 zap_page_range_single(vma, address, size, NULL); zap_vma_ptes()
1481 static int insert_page(struct vm_area_struct *vma, unsigned long addr, insert_page() argument
1484 struct mm_struct *mm = vma->vm_mm; insert_page()
1517 * vm_insert_page - insert single page into user vma
1518 * @vma: user vma to map to
1523 * into a user vma.
1532 * that. Your vma protection will have to be set up correctly, which
1539 * under mm->mmap_sem write-lock, so it can change vma->vm_flags.
1540 * Caller must set VM_MIXEDMAP on vma if it wants to call this
1543 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, vm_insert_page() argument
1546 if (addr < vma->vm_start || addr >= vma->vm_end) vm_insert_page()
1550 if (!(vma->vm_flags & VM_MIXEDMAP)) { vm_insert_page()
1551 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem)); vm_insert_page()
1552 BUG_ON(vma->vm_flags & VM_PFNMAP); vm_insert_page()
1553 vma->vm_flags |= VM_MIXEDMAP; vm_insert_page()
1555 return insert_page(vma, addr, page, vma->vm_page_prot); vm_insert_page()
1559 static int insert_pfn(struct vm_area_struct *vma, unsigned long addr, insert_pfn() argument
1562 struct mm_struct *mm = vma->vm_mm; insert_pfn()
1578 update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */ insert_pfn()
1588 * vm_insert_pfn - insert single pfn into user vma
1589 * @vma: user vma to map to
1594 * they've allocated into a user vma. Same comments apply.
1599 * vma cannot be a COW mapping.
1604 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, vm_insert_pfn() argument
1608 pgprot_t pgprot = vma->vm_page_prot; vm_insert_pfn()
1615 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); vm_insert_pfn()
1616 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == vm_insert_pfn()
1618 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); vm_insert_pfn()
1619 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); vm_insert_pfn()
1621 if (addr < vma->vm_start || addr >= vma->vm_end) vm_insert_pfn()
1623 if (track_pfn_insert(vma, &pgprot, pfn)) vm_insert_pfn()
1626 ret = insert_pfn(vma, addr, pfn, pgprot); vm_insert_pfn()
1632 int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, vm_insert_mixed() argument
1635 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP)); vm_insert_mixed()
1637 if (addr < vma->vm_start || addr >= vma->vm_end) vm_insert_mixed()
1651 return insert_page(vma, addr, page, vma->vm_page_prot); vm_insert_mixed()
1653 return insert_pfn(vma, addr, pfn, vma->vm_page_prot); vm_insert_mixed()
1726 * @vma: user vma to map to
1734 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, remap_pfn_range() argument
1740 struct mm_struct *mm = vma->vm_mm; remap_pfn_range()
1752 * Disable vma merging and expanding with mremap(). remap_pfn_range()
1754 * Omit vma from core dump, even when VM_IO turned off. remap_pfn_range()
1758 * un-COW'ed pages by matching them up with "vma->vm_pgoff". remap_pfn_range()
1761 if (is_cow_mapping(vma->vm_flags)) { remap_pfn_range()
1762 if (addr != vma->vm_start || end != vma->vm_end) remap_pfn_range()
1764 vma->vm_pgoff = pfn; remap_pfn_range()
1767 err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size)); remap_pfn_range()
1771 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; remap_pfn_range()
1776 flush_cache_range(vma, addr, end); remap_pfn_range()
1786 untrack_pfn(vma, pfn, PAGE_ALIGN(size)); remap_pfn_range()
1794 * @vma: user vma to map to
1800 * we'll figure out the rest from the vma information.
1802 * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
1805 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) vm_iomap_memory() argument
1824 if (vma->vm_pgoff > pages) vm_iomap_memory()
1826 pfn += vma->vm_pgoff; vm_iomap_memory()
1827 pages -= vma->vm_pgoff; vm_iomap_memory()
1830 vm_len = vma->vm_end - vma->vm_start; vm_iomap_memory()
1835 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); vm_iomap_memory()
1964 static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) cow_user_page() argument
1989 copy_user_highpage(dst, src, va, vma); cow_user_page()
1998 static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page, do_page_mkwrite() argument
2010 ret = vma->vm_ops->page_mkwrite(vma, &vmf); do_page_mkwrite()
2026 * Handle write page faults for pages that can be reused in the current vma
2034 struct vm_area_struct *vma, unsigned long address,
2049 flush_cache_page(vma, address, pte_pfn(orig_pte)); __releases()
2051 entry = maybe_mkwrite(pte_mkdirty(entry), vma); __releases()
2052 if (ptep_set_access_flags(vma, address, page_table, entry, 1)) __releases()
2053 update_mmu_cache(vma, address, page_table); __releases()
2078 file_update_time(vma->vm_file); __releases()
2100 static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma, wp_page_copy() argument
2112 if (unlikely(anon_vma_prepare(vma))) wp_page_copy()
2116 new_page = alloc_zeroed_user_highpage_movable(vma, address); wp_page_copy()
2120 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); wp_page_copy()
2123 cow_user_page(new_page, old_page, address, vma); wp_page_copy()
2146 flush_cache_page(vma, address, pte_pfn(orig_pte)); wp_page_copy()
2147 entry = mk_pte(new_page, vma->vm_page_prot); wp_page_copy()
2148 entry = maybe_mkwrite(pte_mkdirty(entry), vma); wp_page_copy()
2155 ptep_clear_flush_notify(vma, address, page_table); wp_page_copy()
2156 page_add_new_anon_rmap(new_page, vma, address); wp_page_copy()
2158 lru_cache_add_active_or_unevictable(new_page, vma); wp_page_copy()
2165 update_mmu_cache(vma, address, page_table); wp_page_copy()
2206 * Don't let another task, with possibly unlocked vma, wp_page_copy()
2209 if (page_copied && (vma->vm_flags & VM_LOCKED)) { wp_page_copy()
2230 struct vm_area_struct *vma, unsigned long address, wp_pfn_shared()
2234 if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) { wp_pfn_shared()
2237 .pgoff = linear_page_index(vma, address), wp_pfn_shared()
2244 ret = vma->vm_ops->pfn_mkwrite(vma, &vmf); wp_pfn_shared()
2257 return wp_page_reuse(mm, vma, address, page_table, ptl, orig_pte, wp_pfn_shared()
2261 static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma,
2276 if (vma->vm_ops && vma->vm_ops->page_mkwrite) { __releases()
2280 tmp = do_page_mkwrite(vma, old_page, address); __releases()
2303 return wp_page_reuse(mm, vma, address, page_table, ptl, __releases()
2321 * We enter with non-exclusive mmap_sem (to exclude vma changes,
2325 static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
2332 old_page = vm_normal_page(vma, address, orig_pte); __releases()
2341 if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == __releases()
2343 return wp_pfn_shared(mm, vma, address, page_table, ptl, __releases()
2347 return wp_page_copy(mm, vma, address, page_table, pmd, __releases()
2376 page_move_anon_rmap(old_page, vma, address); __releases()
2378 return wp_page_reuse(mm, vma, address, page_table, ptl, __releases()
2382 } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == __releases()
2384 return wp_page_shared(mm, vma, address, page_table, pmd, __releases()
2394 return wp_page_copy(mm, vma, address, page_table, pmd, __releases()
2398 static void unmap_mapping_range_vma(struct vm_area_struct *vma, unmap_mapping_range_vma() argument
2402 zap_page_range_single(vma, start_addr, end_addr - start_addr, details); unmap_mapping_range_vma()
2408 struct vm_area_struct *vma; unmap_mapping_range_tree() local
2411 vma_interval_tree_foreach(vma, root, unmap_mapping_range_tree()
2414 vba = vma->vm_pgoff; unmap_mapping_range_tree()
2415 vea = vba + vma_pages(vma) - 1; unmap_mapping_range_tree()
2424 unmap_mapping_range_vma(vma, unmap_mapping_range_tree()
2425 ((zba - vba) << PAGE_SHIFT) + vma->vm_start, unmap_mapping_range_tree()
2426 ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, unmap_mapping_range_tree()
2479 * We enter with non-exclusive mmap_sem (to exclude vma changes,
2486 static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, do_swap_page() argument
2509 print_bad_pte(vma, address, orig_pte, NULL); do_swap_page()
2518 GFP_HIGHUSER_MOVABLE, vma, address); do_swap_page()
2564 page = ksm_might_need_to_copy(page, vma, address); do_swap_page()
2600 pte = mk_pte(page, vma->vm_page_prot); do_swap_page()
2602 pte = maybe_mkwrite(pte_mkdirty(pte), vma); do_swap_page()
2607 flush_icache_page(vma, page); do_swap_page()
2612 do_page_add_anon_rmap(page, vma, address, exclusive); do_swap_page()
2615 page_add_new_anon_rmap(page, vma, address); do_swap_page()
2617 lru_cache_add_active_or_unevictable(page, vma); do_swap_page()
2621 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) do_swap_page()
2638 ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); do_swap_page()
2645 update_mmu_cache(vma, address, page_table); do_swap_page()
2667 * doesn't hit another vma.
2669 static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) check_stack_guard_page() argument
2672 if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { check_stack_guard_page()
2673 struct vm_area_struct *prev = vma->vm_prev; check_stack_guard_page()
2684 return expand_downwards(vma, address - PAGE_SIZE); check_stack_guard_page()
2686 if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { check_stack_guard_page()
2687 struct vm_area_struct *next = vma->vm_next; check_stack_guard_page()
2693 return expand_upwards(vma, address + PAGE_SIZE); check_stack_guard_page()
2699 * We enter with non-exclusive mmap_sem (to exclude vma changes,
2703 static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, do_anonymous_page() argument
2715 if (vma->vm_flags & VM_SHARED) do_anonymous_page()
2719 if (check_stack_guard_page(vma, address) < 0) do_anonymous_page()
2725 vma->vm_page_prot)); do_anonymous_page()
2730 if (userfaultfd_missing(vma)) { do_anonymous_page()
2732 return handle_userfault(vma, address, flags, do_anonymous_page()
2739 if (unlikely(anon_vma_prepare(vma))) do_anonymous_page()
2741 page = alloc_zeroed_user_highpage_movable(vma, address); do_anonymous_page()
2755 entry = mk_pte(page, vma->vm_page_prot); do_anonymous_page()
2756 if (vma->vm_flags & VM_WRITE) do_anonymous_page()
2764 if (userfaultfd_missing(vma)) { do_anonymous_page()
2768 return handle_userfault(vma, address, flags, do_anonymous_page()
2773 page_add_new_anon_rmap(page, vma, address); do_anonymous_page()
2775 lru_cache_add_active_or_unevictable(page, vma); do_anonymous_page()
2780 update_mmu_cache(vma, address, page_table); do_anonymous_page()
2796 * released depending on flags and vma->vm_ops->fault() return value.
2799 static int __do_fault(struct vm_area_struct *vma, unsigned long address, __do_fault() argument
2812 ret = vma->vm_ops->fault(vma, &vmf); __do_fault()
2838 * @vma: virtual memory area
2850 void do_set_pte(struct vm_area_struct *vma, unsigned long address, do_set_pte() argument
2855 flush_icache_page(vma, page); do_set_pte()
2856 entry = mk_pte(page, vma->vm_page_prot); do_set_pte()
2858 entry = maybe_mkwrite(pte_mkdirty(entry), vma); do_set_pte()
2860 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); do_set_pte()
2861 page_add_new_anon_rmap(page, vma, address); do_set_pte()
2863 inc_mm_counter_fast(vma->vm_mm, MM_FILEPAGES); do_set_pte()
2866 set_pte_at(vma->vm_mm, address, pte, entry); do_set_pte()
2869 update_mmu_cache(vma, address, pte); do_set_pte()
2936 static void do_fault_around(struct vm_area_struct *vma, unsigned long address, do_fault_around() argument
2947 start_addr = max(address & mask, vma->vm_start); do_fault_around()
2953 * max_pgoff is either end of page table or end of vma do_fault_around()
2958 max_pgoff = min3(max_pgoff, vma_pages(vma) + vma->vm_pgoff - 1, do_fault_around()
2966 if (start_addr >= vma->vm_end) do_fault_around()
2976 vma->vm_ops->map_pages(vma, &vmf); do_fault_around()
2979 static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma, do_read_fault() argument
2993 if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) { do_read_fault()
2995 do_fault_around(vma, address, pte, pgoff, flags); do_read_fault()
3001 ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page); do_read_fault()
3012 do_set_pte(vma, address, fault_page, pte, false, false); do_read_fault()
3019 static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, do_cow_fault() argument
3029 if (unlikely(anon_vma_prepare(vma))) do_cow_fault()
3032 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); do_cow_fault()
3041 ret = __do_fault(vma, address, pgoff, flags, new_page, &fault_page); do_cow_fault()
3046 copy_user_highpage(new_page, fault_page, address, vma); do_cow_fault()
3060 i_mmap_unlock_read(vma->vm_file->f_mapping); do_cow_fault()
3064 do_set_pte(vma, address, new_page, pte, true, true); do_cow_fault()
3066 lru_cache_add_active_or_unevictable(new_page, vma); do_cow_fault()
3076 i_mmap_unlock_read(vma->vm_file->f_mapping); do_cow_fault()
3085 static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma, do_shared_fault() argument
3096 ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page); do_shared_fault()
3104 if (vma->vm_ops->page_mkwrite) { do_shared_fault()
3106 tmp = do_page_mkwrite(vma, fault_page, address); do_shared_fault()
3121 do_set_pte(vma, address, fault_page, pte, true, false); do_shared_fault()
3129 * pinned by vma->vm_file's reference. We rely on unlock_page()'s do_shared_fault()
3134 if ((dirtied || vma->vm_ops->page_mkwrite) && mapping) { do_shared_fault()
3142 if (!vma->vm_ops->page_mkwrite) do_shared_fault()
3143 file_update_time(vma->vm_file); do_shared_fault()
3149 * We enter with non-exclusive mmap_sem (to exclude vma changes,
3154 static int do_fault(struct mm_struct *mm, struct vm_area_struct *vma, do_fault() argument
3159 - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; do_fault()
3163 if (!vma->vm_ops->fault) do_fault()
3166 return do_read_fault(mm, vma, address, pmd, pgoff, flags, do_fault()
3168 if (!(vma->vm_flags & VM_SHARED)) do_fault()
3169 return do_cow_fault(mm, vma, address, pmd, pgoff, flags, do_fault()
3171 return do_shared_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); do_fault()
3174 static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, numa_migrate_prep() argument
3186 return mpol_misplaced(page, vma, addr); numa_migrate_prep()
3189 static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, do_numa_page() argument
3202 BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))); do_numa_page()
3221 pte = pte_modify(pte, vma->vm_page_prot); do_numa_page()
3226 update_mmu_cache(vma, addr, ptep); do_numa_page()
3228 page = vm_normal_page(vma, addr, pte); do_numa_page()
3242 if (!(vma->vm_flags & VM_WRITE)) do_numa_page()
3249 if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED)) do_numa_page()
3254 target_nid = numa_migrate_prep(page, vma, addr, page_nid, &flags); do_numa_page()
3262 migrated = migrate_misplaced_page(page, vma, target_nid); do_numa_page()
3275 static int create_huge_pmd(struct mm_struct *mm, struct vm_area_struct *vma, create_huge_pmd() argument
3278 if (vma_is_anonymous(vma)) create_huge_pmd()
3279 return do_huge_pmd_anonymous_page(mm, vma, address, pmd, flags); create_huge_pmd()
3280 if (vma->vm_ops->pmd_fault) create_huge_pmd()
3281 return vma->vm_ops->pmd_fault(vma, address, pmd, flags); create_huge_pmd()
3285 static int wp_huge_pmd(struct mm_struct *mm, struct vm_area_struct *vma, wp_huge_pmd() argument
3289 if (vma_is_anonymous(vma)) wp_huge_pmd()
3290 return do_huge_pmd_wp_page(mm, vma, address, pmd, orig_pmd); wp_huge_pmd()
3291 if (vma->vm_ops->pmd_fault) wp_huge_pmd()
3292 return vma->vm_ops->pmd_fault(vma, address, pmd, flags); wp_huge_pmd()
3305 * We enter with non-exclusive mmap_sem (to exclude vma changes,
3313 struct vm_area_struct *vma, unsigned long address, handle_pte_fault()
3331 if (vma_is_anonymous(vma)) handle_pte_fault()
3332 return do_anonymous_page(mm, vma, address, handle_pte_fault()
3335 return do_fault(mm, vma, address, pte, pmd, handle_pte_fault()
3338 return do_swap_page(mm, vma, address, handle_pte_fault()
3343 return do_numa_page(mm, vma, address, entry, pte, pmd); handle_pte_fault()
3351 return do_wp_page(mm, vma, address, handle_pte_fault()
3356 if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) { handle_pte_fault()
3357 update_mmu_cache(vma, address, pte); handle_pte_fault()
3366 flush_tlb_fix_spurious_fault(vma, address); handle_pte_fault()
3379 static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, __handle_mm_fault() argument
3387 if (unlikely(is_vm_hugetlb_page(vma))) __handle_mm_fault()
3388 return hugetlb_fault(mm, vma, address, flags); __handle_mm_fault()
3397 if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) { __handle_mm_fault()
3398 int ret = create_huge_pmd(mm, vma, address, pmd, flags); __handle_mm_fault()
3418 return do_huge_pmd_numa_page(mm, vma, address, __handle_mm_fault()
3422 ret = wp_huge_pmd(mm, vma, address, pmd, __handle_mm_fault()
3427 huge_pmd_set_accessed(mm, vma, address, pmd, __handle_mm_fault()
3440 unlikely(__pte_alloc(mm, vma, pmd, address))) __handle_mm_fault()
3463 return handle_pte_fault(mm, vma, address, pte, pmd, flags); __handle_mm_fault()
3472 int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, handle_mm_fault() argument
3492 ret = __handle_mm_fault(mm, vma, address, flags); handle_mm_fault()
3616 * @vma: memory mapping
3624 int follow_pfn(struct vm_area_struct *vma, unsigned long address, follow_pfn() argument
3631 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) follow_pfn()
3634 ret = follow_pte(vma->vm_mm, address, &ptep, &ptl); follow_pfn()
3644 int follow_phys(struct vm_area_struct *vma, follow_phys() argument
3652 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) follow_phys()
3655 if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) follow_phys()
3672 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, generic_access_phys() argument
3680 if (follow_phys(vma, addr, write, &prot, &phys_addr)) generic_access_phys()
3702 struct vm_area_struct *vma; __access_remote_vm() local
3713 write, 1, &page, &vma); __access_remote_vm()
3722 vma = find_vma(mm, addr); __access_remote_vm()
3723 if (!vma || vma->vm_start > addr) __access_remote_vm()
3725 if (vma->vm_ops && vma->vm_ops->access) __access_remote_vm()
3726 ret = vma->vm_ops->access(vma, addr, buf, __access_remote_vm()
3740 copy_to_user_page(vma, page, addr, __access_remote_vm()
3744 copy_from_user_page(vma, page, addr, __access_remote_vm()
3802 struct vm_area_struct *vma; print_vma_addr() local
3812 vma = find_vma(mm, ip); print_vma_addr()
3813 if (vma && vma->vm_file) { print_vma_addr()
3814 struct file *f = vma->vm_file; print_vma_addr()
3823 vma->vm_start, print_vma_addr()
3824 vma->vm_end - vma->vm_start); print_vma_addr()
3887 struct vm_area_struct *vma, copy_user_gigantic_page()
3896 copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma); copy_user_gigantic_page()
3905 unsigned long addr, struct vm_area_struct *vma, copy_user_huge_page()
3911 copy_user_gigantic_page(dst, src, addr, vma, copy_user_huge_page()
3919 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma); copy_user_huge_page()
847 copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma, unsigned long addr, int *rss) copy_one_pte() argument
928 copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, unsigned long addr, unsigned long end) copy_pte_range() argument
991 copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma, unsigned long addr, unsigned long end) copy_pmd_range() argument
1024 copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma, unsigned long addr, unsigned long end) copy_pud_range() argument
1046 copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, struct vm_area_struct *vma) copy_page_range() argument
1113 zap_pte_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, struct zap_details *details) zap_pte_range() argument
1225 zap_pmd_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, struct zap_details *details) zap_pmd_range() argument
1269 zap_pud_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, struct zap_details *details) zap_pud_range() argument
1288 unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long addr, unsigned long end, struct zap_details *details) unmap_page_range() argument
1312 unmap_single_vma(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start_addr, unsigned long end_addr, struct zap_details *details) unmap_single_vma() argument
1373 unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start_addr, unsigned long end_addr) unmap_vmas() argument
2229 wp_pfn_shared(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, spinlock_t *ptl, pte_t orig_pte, pmd_t *pmd) wp_pfn_shared() argument
3312 handle_pte_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, unsigned int flags) handle_pte_fault() argument
3885 copy_user_gigantic_page(struct page *dst, struct page *src, unsigned long addr, struct vm_area_struct *vma, unsigned int pages_per_huge_page) copy_user_gigantic_page() argument
3904 copy_user_huge_page(struct page *dst, struct page *src, unsigned long addr, struct vm_area_struct *vma, unsigned int pages_per_huge_page) copy_user_huge_page() argument
H A Dmlock.c48 * vma's VM_LOCKED status is not concurrently being modified, otherwise we
50 * the mmap_sem for read, and verify that the vma really is locked
145 * the page back to the unevictable list if some other vma has it mlocked.
156 * munlock_vma_page - munlock a vma page
163 * When we munlock a page, because the vma where we found the page is being
360 struct vm_area_struct *vma, int zoneid, unsigned long start, __munlock_pagevec_fill()
371 pte = get_locked_pte(vma->vm_mm, start, &ptl); __munlock_pagevec_fill()
383 page = vm_normal_page(vma, start, *pte); __munlock_pagevec_fill()
405 * munlock_vma_pages_range() - munlock all pages in the vma range.'
406 * @vma - vma containing range to be munlock()ed.
407 * @start - start address in @vma of the range
408 * @end - end of range in @vma.
412 * Called with @vma VM_LOCKED.
422 void munlock_vma_pages_range(struct vm_area_struct *vma, munlock_vma_pages_range() argument
425 vma->vm_flags &= VM_LOCKED_CLEAR_MASK; munlock_vma_pages_range()
443 page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP, munlock_vma_pages_range()
474 start = __munlock_pagevec_fill(&pvec, vma, munlock_vma_pages_range()
498 static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, mlock_fixup() argument
501 struct mm_struct *mm = vma->vm_mm; mlock_fixup()
507 if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) || mlock_fixup()
508 is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm)) mlock_fixup()
512 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); mlock_fixup()
513 *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma, mlock_fixup()
514 vma->vm_file, pgoff, vma_policy(vma), mlock_fixup()
515 vma->vm_userfaultfd_ctx); mlock_fixup()
517 vma = *prev; mlock_fixup()
521 if (start != vma->vm_start) { mlock_fixup()
522 ret = split_vma(mm, vma, start, 1); mlock_fixup()
527 if (end != vma->vm_end) { mlock_fixup()
528 ret = split_vma(mm, vma, end, 0); mlock_fixup()
549 vma->vm_flags = newflags; mlock_fixup()
551 munlock_vma_pages_range(vma, start, end); mlock_fixup()
554 *prev = vma; mlock_fixup()
562 struct vm_area_struct * vma, * prev; apply_vma_lock_flags() local
572 vma = find_vma(current->mm, start); apply_vma_lock_flags()
573 if (!vma || vma->vm_start > start) apply_vma_lock_flags()
576 prev = vma->vm_prev; apply_vma_lock_flags()
577 if (start > vma->vm_start) apply_vma_lock_flags()
578 prev = vma; apply_vma_lock_flags()
581 vm_flags_t newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; apply_vma_lock_flags()
585 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ apply_vma_lock_flags()
586 tmp = vma->vm_end; apply_vma_lock_flags()
589 error = mlock_fixup(vma, &prev, nstart, tmp, newflags); apply_vma_lock_flags()
598 vma = prev->vm_next; apply_vma_lock_flags()
599 if (!vma || vma->vm_start != nstart) { apply_vma_lock_flags()
687 struct vm_area_struct * vma, * prev = NULL; apply_mlockall_flags() local
707 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) { apply_mlockall_flags()
710 newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; apply_mlockall_flags()
714 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags); apply_mlockall_flags()
359 __munlock_pagevec_fill(struct pagevec *pvec, struct vm_area_struct *vma, int zoneid, unsigned long start, unsigned long end) __munlock_pagevec_fill() argument
H A Ddebug.c154 void dump_vma(const struct vm_area_struct *vma) dump_vma() argument
156 pr_emerg("vma %p start %p end %p\n" dump_vma()
160 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next, dump_vma()
161 vma->vm_prev, vma->vm_mm, dump_vma()
162 (unsigned long)pgprot_val(vma->vm_page_prot), dump_vma()
163 vma->anon_vma, vma->vm_ops, vma->vm_pgoff, dump_vma()
164 vma->vm_file, vma->vm_private_data); dump_vma()
165 dump_flags(vma->vm_flags, vmaflags_names, ARRAY_SIZE(vmaflags_names)); dump_vma()
H A Drmap.c80 anon_vma->degree = 1; /* Reference for first vma */ anon_vma_alloc()
132 static void anon_vma_chain_link(struct vm_area_struct *vma, anon_vma_chain_link() argument
136 avc->vma = vma; anon_vma_chain_link()
138 list_add(&avc->same_vma, &vma->anon_vma_chain); anon_vma_chain_link()
144 * @vma: the memory region in question
146 * This makes sure the memory mapping described by 'vma' has
153 * reason for splitting a vma has been mprotect()), or we
156 * Anon-vma allocations are very subtle, because we may have
159 * allocated vma (it depends on RCU to make sure that the
169 int anon_vma_prepare(struct vm_area_struct *vma) anon_vma_prepare() argument
171 struct anon_vma *anon_vma = vma->anon_vma; anon_vma_prepare()
176 struct mm_struct *mm = vma->vm_mm; anon_vma_prepare()
183 anon_vma = find_mergeable_anon_vma(vma); anon_vma_prepare()
195 if (likely(!vma->anon_vma)) { anon_vma_prepare()
196 vma->anon_vma = anon_vma; anon_vma_prepare()
197 anon_vma_chain_link(vma, avc, anon_vma); anon_vma_prepare()
198 /* vma reference or self-parent link for new root */ anon_vma_prepare()
221 * we traverse the vma->anon_vma_chain, looping over anon_vma's that
222 * have the same vma.
253 * child isn't reused even if there was no alive vma, thus rmap walker has a
279 * that means it has no vma and only one anon_vma child. anon_vma_clone()
307 * Attach vma to its own anon_vma, as well as to the anon_vmas that
311 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) anon_vma_fork() argument
322 vma->anon_vma = NULL; anon_vma_fork()
328 error = anon_vma_clone(vma, pvma); anon_vma_fork()
333 if (vma->anon_vma) anon_vma_fork()
357 vma->anon_vma = anon_vma; anon_vma_fork()
359 anon_vma_chain_link(vma, avc, anon_vma); anon_vma_fork()
368 unlink_anon_vmas(vma); anon_vma_fork()
372 void unlink_anon_vmas(struct vm_area_struct *vma) unlink_anon_vmas() argument
381 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { unlink_anon_vmas()
399 if (vma->anon_vma) unlink_anon_vmas()
400 vma->anon_vma->degree--; unlink_anon_vmas()
408 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { unlink_anon_vmas()
569 * At what user virtual address is page expected in @vma?
572 __vma_address(struct page *page, struct vm_area_struct *vma) __vma_address() argument
575 return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); __vma_address()
579 vma_address(struct page *page, struct vm_area_struct *vma) vma_address() argument
581 unsigned long address = __vma_address(page, vma); vma_address()
583 /* page should be within @vma mapping range */ vma_address()
584 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); vma_address()
691 * At what user virtual address is page expected in vma?
692 * Caller should check the page is actually part of the vma.
694 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) page_address_in_vma() argument
703 if (!vma->anon_vma || !page__anon_vma || page_address_in_vma()
704 vma->anon_vma->root != page__anon_vma->root) page_address_in_vma()
707 if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping) page_address_in_vma()
711 address = __vma_address(page, vma); page_address_in_vma()
712 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) page_address_in_vma()
797 * @vma: the VMA to test
803 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) page_mapped_in_vma() argument
809 address = __vma_address(page, vma); page_mapped_in_vma()
810 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) page_mapped_in_vma()
812 pte = page_check_address(page, vma->vm_mm, address, &ptl, 1); page_mapped_in_vma()
829 static int page_referenced_one(struct page *page, struct vm_area_struct *vma, page_referenced_one() argument
832 struct mm_struct *mm = vma->vm_mm; page_referenced_one()
849 if (vma->vm_flags & VM_LOCKED) { page_referenced_one()
856 if (pmdp_clear_flush_young_notify(vma, address, pmd)) page_referenced_one()
870 if (vma->vm_flags & VM_LOCKED) { page_referenced_one()
876 if (ptep_clear_flush_young_notify(vma, address, pte)) { page_referenced_one()
884 if (likely(!(vma->vm_flags & VM_SEQ_READ))) page_referenced_one()
897 pra->vm_flags |= vma->vm_flags; page_referenced_one()
907 static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg) invalid_page_referenced_vma() argument
912 if (!mm_match_cgroup(vma->vm_mm, memcg)) invalid_page_referenced_vma()
923 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
976 static int page_mkclean_one(struct page *page, struct vm_area_struct *vma, page_mkclean_one() argument
979 struct mm_struct *mm = vma->vm_mm; page_mkclean_one()
992 flush_cache_page(vma, address, pte_pfn(*pte)); page_mkclean_one()
993 entry = ptep_clear_flush(vma, address, pte); page_mkclean_one()
1010 static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) invalid_mkclean_vma() argument
1012 if (vma->vm_flags & VM_SHARED) invalid_mkclean_vma()
1046 * @vma: the vma the page belongs to
1055 struct vm_area_struct *vma, unsigned long address) page_move_anon_rmap()
1057 struct anon_vma *anon_vma = vma->anon_vma; page_move_anon_rmap()
1060 VM_BUG_ON_VMA(!anon_vma, vma); page_move_anon_rmap()
1061 VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page); page_move_anon_rmap()
1075 * @vma: VM area to add page to.
1080 struct vm_area_struct *vma, unsigned long address, int exclusive) __page_set_anon_rmap()
1082 struct anon_vma *anon_vma = vma->anon_vma; __page_set_anon_rmap()
1090 * If the page isn't exclusively mapped into this vma, __page_set_anon_rmap()
1099 page->index = linear_page_index(vma, address); __page_set_anon_rmap()
1105 * @vma: the vm area in which the mapping is added
1109 struct vm_area_struct *vma, unsigned long address) __page_check_anon_rmap()
1124 BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root); __page_check_anon_rmap()
1125 BUG_ON(page->index != linear_page_index(vma, address)); __page_check_anon_rmap()
1132 * @vma: the vm area in which the mapping is added
1141 struct vm_area_struct *vma, unsigned long address) page_add_anon_rmap()
1143 do_page_add_anon_rmap(page, vma, address, 0); page_add_anon_rmap()
1152 struct vm_area_struct *vma, unsigned long address, int exclusive) do_page_add_anon_rmap()
1172 /* address might be in next vma when migration races vma_adjust */ do_page_add_anon_rmap()
1174 __page_set_anon_rmap(page, vma, address, exclusive); do_page_add_anon_rmap()
1176 __page_check_anon_rmap(page, vma, address); do_page_add_anon_rmap()
1182 * @vma: the vm area in which the mapping is added
1190 struct vm_area_struct *vma, unsigned long address) page_add_new_anon_rmap()
1192 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); page_add_new_anon_rmap()
1199 __page_set_anon_rmap(page, vma, address, 1); page_add_new_anon_rmap()
1297 static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, try_to_unmap_one() argument
1300 struct mm_struct *mm = vma->vm_mm; try_to_unmap_one()
1308 if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED)) try_to_unmap_one()
1321 if (vma->vm_flags & VM_LOCKED) { try_to_unmap_one()
1331 if (ptep_clear_flush_young_notify(vma, address, pte)) { try_to_unmap_one()
1338 flush_cache_page(vma, address, page_to_pfn(page)); try_to_unmap_one()
1351 pteval = ptep_clear_flush(vma, address, pte); try_to_unmap_one()
1434 bool is_vma_temporary_stack(struct vm_area_struct *vma) is_vma_temporary_stack() argument
1436 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); is_vma_temporary_stack()
1441 if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) == is_vma_temporary_stack()
1448 static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) invalid_migration_vma() argument
1450 return is_vma_temporary_stack(vma); invalid_migration_vma()
1512 * SWAP_AGAIN - no vma is holding page mlocked, or,
1513 * SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem
1571 * Find all the mappings of a page using the mapping pointer and the vma chains
1574 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
1576 * vm_flags for that VMA. That should be OK, because that vma shouldn't be
1592 struct vm_area_struct *vma = avc->vma; rmap_walk_anon() local
1593 unsigned long address = vma_address(page, vma); rmap_walk_anon()
1597 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) rmap_walk_anon()
1600 ret = rwc->rmap_one(page, vma, address, rwc->arg); rmap_walk_anon()
1615 * Find all the mappings of a page using the mapping pointer and the vma chains
1618 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
1620 * vm_flags for that VMA. That should be OK, because that vma shouldn't be
1627 struct vm_area_struct *vma; rmap_walk_file() local
1643 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { rmap_walk_file()
1644 unsigned long address = vma_address(page, vma); rmap_walk_file()
1648 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) rmap_walk_file()
1651 ret = rwc->rmap_one(page, vma, address, rwc->arg); rmap_walk_file()
1680 struct vm_area_struct *vma, unsigned long address, int exclusive) __hugepage_set_anon_rmap()
1682 struct anon_vma *anon_vma = vma->anon_vma; __hugepage_set_anon_rmap()
1693 page->index = linear_page_index(vma, address); __hugepage_set_anon_rmap()
1697 struct vm_area_struct *vma, unsigned long address) hugepage_add_anon_rmap()
1699 struct anon_vma *anon_vma = vma->anon_vma; hugepage_add_anon_rmap()
1704 /* address might be in next vma when migration races vma_adjust */ hugepage_add_anon_rmap()
1707 __hugepage_set_anon_rmap(page, vma, address, 0); hugepage_add_anon_rmap()
1711 struct vm_area_struct *vma, unsigned long address) hugepage_add_new_anon_rmap()
1713 BUG_ON(address < vma->vm_start || address >= vma->vm_end); hugepage_add_new_anon_rmap()
1715 __hugepage_set_anon_rmap(page, vma, address, 1); hugepage_add_new_anon_rmap()
1054 page_move_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) page_move_anon_rmap() argument
1079 __page_set_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address, int exclusive) __page_set_anon_rmap() argument
1108 __page_check_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) __page_check_anon_rmap() argument
1140 page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) page_add_anon_rmap() argument
1151 do_page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address, int exclusive) do_page_add_anon_rmap() argument
1189 page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) page_add_new_anon_rmap() argument
1679 __hugepage_set_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address, int exclusive) __hugepage_set_anon_rmap() argument
1696 hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) hugepage_add_anon_rmap() argument
1710 hugepage_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) hugepage_add_new_anon_rmap() argument
H A Dhuge_memory.c66 * it would have happened if the vma was large enough during page
699 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) maybe_pmd_mkwrite() argument
701 if (likely(vma->vm_flags & VM_WRITE)) maybe_pmd_mkwrite()
715 struct vm_area_struct *vma, __do_huge_pmd_anonymous_page()
758 if (userfaultfd_missing(vma)) { __do_huge_pmd_anonymous_page()
765 ret = handle_userfault(vma, address, flags, __do_huge_pmd_anonymous_page()
771 entry = mk_huge_pmd(page, vma->vm_page_prot); __do_huge_pmd_anonymous_page()
772 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); __do_huge_pmd_anonymous_page()
773 page_add_new_anon_rmap(page, vma, haddr); __do_huge_pmd_anonymous_page()
775 lru_cache_add_active_or_unevictable(page, vma); __do_huge_pmd_anonymous_page()
794 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, set_huge_zero_page()
800 entry = mk_pmd(zero_page, vma->vm_page_prot); set_huge_zero_page()
808 int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, do_huge_pmd_anonymous_page() argument
816 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) do_huge_pmd_anonymous_page()
818 if (unlikely(anon_vma_prepare(vma))) do_huge_pmd_anonymous_page()
820 if (unlikely(khugepaged_enter(vma, vma->vm_flags))) do_huge_pmd_anonymous_page()
842 if (userfaultfd_missing(vma)) { do_huge_pmd_anonymous_page()
844 ret = handle_userfault(vma, address, flags, do_huge_pmd_anonymous_page()
848 set_huge_zero_page(pgtable, mm, vma, do_huge_pmd_anonymous_page()
862 gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0); do_huge_pmd_anonymous_page()
863 page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER); do_huge_pmd_anonymous_page()
868 return __do_huge_pmd_anonymous_page(mm, vma, address, pmd, page, gfp, do_huge_pmd_anonymous_page()
872 static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, insert_pfn_pmd() argument
875 struct mm_struct *mm = vma->vm_mm; insert_pfn_pmd()
884 entry = maybe_pmd_mkwrite(entry, vma); insert_pfn_pmd()
887 update_mmu_cache_pmd(vma, addr, pmd); insert_pfn_pmd()
892 int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, vmf_insert_pfn_pmd() argument
895 pgprot_t pgprot = vma->vm_page_prot; vmf_insert_pfn_pmd()
901 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); vmf_insert_pfn_pmd()
902 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == vmf_insert_pfn_pmd()
904 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); vmf_insert_pfn_pmd()
905 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); vmf_insert_pfn_pmd()
907 if (addr < vma->vm_start || addr >= vma->vm_end) vmf_insert_pfn_pmd()
909 if (track_pfn_insert(vma, &pgprot, pfn)) vmf_insert_pfn_pmd()
911 insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write); vmf_insert_pfn_pmd()
917 struct vm_area_struct *vma) copy_huge_pmd()
953 set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd, copy_huge_pmd()
965 wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */ copy_huge_pmd()
989 struct vm_area_struct *vma, huge_pmd_set_accessed()
1004 if (pmdp_set_access_flags(vma, haddr, pmd, entry, dirty)) huge_pmd_set_accessed()
1005 update_mmu_cache_pmd(vma, address, pmd); huge_pmd_set_accessed()
1043 struct vm_area_struct *vma, do_huge_pmd_wp_page_fallback()
1068 vma, address, page_to_nid(page)); do_huge_pmd_wp_page_fallback()
1089 haddr + PAGE_SIZE * i, vma); do_huge_pmd_wp_page_fallback()
1103 pmdp_huge_clear_flush_notify(vma, haddr, pmd); do_huge_pmd_wp_page_fallback()
1111 entry = mk_pte(pages[i], vma->vm_page_prot); do_huge_pmd_wp_page_fallback()
1112 entry = maybe_mkwrite(pte_mkdirty(entry), vma); do_huge_pmd_wp_page_fallback()
1115 page_add_new_anon_rmap(pages[i], vma, haddr); do_huge_pmd_wp_page_fallback()
1117 lru_cache_add_active_or_unevictable(pages[i], vma); do_huge_pmd_wp_page_fallback()
1151 int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, do_huge_pmd_wp_page() argument
1164 VM_BUG_ON_VMA(!vma->anon_vma, vma); do_huge_pmd_wp_page()
1177 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); do_huge_pmd_wp_page()
1178 if (pmdp_set_access_flags(vma, haddr, pmd, entry, 1)) do_huge_pmd_wp_page()
1179 update_mmu_cache_pmd(vma, address, pmd); do_huge_pmd_wp_page()
1186 if (transparent_hugepage_enabled(vma) && do_huge_pmd_wp_page()
1188 huge_gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0); do_huge_pmd_wp_page()
1189 new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER); do_huge_pmd_wp_page()
1195 split_huge_page_pmd(vma, address, pmd); do_huge_pmd_wp_page()
1198 ret = do_huge_pmd_wp_page_fallback(mm, vma, address, do_huge_pmd_wp_page()
1216 split_huge_page_pmd(vma, address, pmd); do_huge_pmd_wp_page()
1227 copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); do_huge_pmd_wp_page()
1244 entry = mk_huge_pmd(new_page, vma->vm_page_prot); do_huge_pmd_wp_page()
1245 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); do_huge_pmd_wp_page()
1246 pmdp_huge_clear_flush_notify(vma, haddr, pmd); do_huge_pmd_wp_page()
1247 page_add_new_anon_rmap(new_page, vma, haddr); do_huge_pmd_wp_page()
1249 lru_cache_add_active_or_unevictable(new_page, vma); do_huge_pmd_wp_page()
1251 update_mmu_cache_pmd(vma, address, pmd); do_huge_pmd_wp_page()
1272 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, follow_trans_huge_pmd() argument
1277 struct mm_struct *mm = vma->vm_mm; follow_trans_huge_pmd()
1306 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, follow_trans_huge_pmd()
1308 update_mmu_cache_pmd(vma, addr, pmd); follow_trans_huge_pmd()
1310 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { follow_trans_huge_pmd()
1328 int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, do_huge_pmd_numa_page() argument
1343 BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))); do_huge_pmd_numa_page()
1372 if (!(vma->vm_flags & VM_WRITE)) do_huge_pmd_numa_page()
1380 target_nid = mpol_misplaced(page, vma, haddr); do_huge_pmd_numa_page()
1424 migrated = migrate_misplaced_transhuge_page(mm, vma, do_huge_pmd_numa_page()
1436 pmd = pmd_modify(pmd, vma->vm_page_prot); do_huge_pmd_numa_page()
1441 update_mmu_cache_pmd(vma, addr, pmdp); do_huge_pmd_numa_page()
1456 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, zap_huge_pmd() argument
1462 if (__pmd_trans_huge_lock(pmd, vma, &ptl) != 1) zap_huge_pmd()
1473 if (vma_is_dax(vma)) { zap_huge_pmd()
1496 int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, move_huge_pmd() argument
1505 struct mm_struct *mm = vma->vm_mm; move_huge_pmd()
1526 ret = __pmd_trans_huge_lock(old_pmd, vma, &old_ptl); move_huge_pmd()
1554 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, change_huge_pmd() argument
1557 struct mm_struct *mm = vma->vm_mm; change_huge_pmd()
1561 if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { change_huge_pmd()
1598 int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, __pmd_trans_huge_lock() argument
1601 *ptl = pmd_lock(vma->vm_mm, pmd); __pmd_trans_huge_lock()
1605 wait_split_huge_page(vma->anon_vma, pmd); __pmd_trans_huge_lock()
1672 struct vm_area_struct *vma, __split_huge_page_splitting()
1675 struct mm_struct *mm = vma->vm_mm; __split_huge_page_splitting()
1694 pmdp_splitting_flush(vma, address, pmd); __split_huge_page_splitting()
1824 struct vm_area_struct *vma, __split_huge_page_map()
1827 struct mm_struct *mm = vma->vm_mm; __split_huge_page_map()
1851 entry = mk_pte(page + i, vma->vm_page_prot); __split_huge_page_map()
1852 entry = maybe_mkwrite(pte_mkdirty(entry), vma); __split_huge_page_map()
1890 pmdp_invalidate(vma, address, pmd); __split_huge_page_map()
1913 struct vm_area_struct *vma = avc->vma; __split_huge_page() local
1914 unsigned long addr = vma_address(page, vma); __split_huge_page()
1915 BUG_ON(is_vma_temporary_stack(vma)); __split_huge_page()
1916 mapcount += __split_huge_page_splitting(page, vma, addr); __split_huge_page()
1938 struct vm_area_struct *vma = avc->vma; __split_huge_page() local
1939 unsigned long addr = vma_address(page, vma); __split_huge_page()
1940 BUG_ON(is_vma_temporary_stack(vma)); __split_huge_page()
1941 mapcount2 += __split_huge_page_map(page, vma, addr); __split_huge_page()
1995 int hugepage_madvise(struct vm_area_struct *vma, hugepage_madvise() argument
2006 if (mm_has_pgste(vma->vm_mm)) hugepage_madvise()
2017 * If the vma become good for khugepaged to scan, hugepage_madvise()
2021 if (unlikely(khugepaged_enter_vma_merge(vma, *vm_flags))) hugepage_madvise()
2034 * this vma even if we leave the mm registered in khugepaged if hugepage_madvise()
2127 int khugepaged_enter_vma_merge(struct vm_area_struct *vma, khugepaged_enter_vma_merge() argument
2131 if (!vma->anon_vma) khugepaged_enter_vma_merge()
2137 if (vma->vm_ops || (vm_flags & VM_NO_THP)) khugepaged_enter_vma_merge()
2140 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; khugepaged_enter_vma_merge()
2141 hend = vma->vm_end & HPAGE_PMD_MASK; khugepaged_enter_vma_merge()
2143 return khugepaged_enter(vma, vm_flags); khugepaged_enter_vma_merge()
2196 static int __collapse_huge_page_isolate(struct vm_area_struct *vma, __collapse_huge_page_isolate() argument
2209 if (!userfaultfd_armed(vma) && __collapse_huge_page_isolate()
2217 page = vm_normal_page(vma, address, pteval); __collapse_huge_page_isolate()
2272 mmu_notifier_test_young(vma->vm_mm, address)) __collapse_huge_page_isolate()
2283 struct vm_area_struct *vma, __collapse_huge_page_copy()
2294 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); __collapse_huge_page_copy()
2304 pte_clear(vma->vm_mm, address, _pte); __collapse_huge_page_copy()
2309 copy_user_highpage(page, src_page, address, vma); __collapse_huge_page_copy()
2322 pte_clear(vma->vm_mm, address, _pte); __collapse_huge_page_copy()
2422 * that. We will recheck the vma after taking it again in write mode. khugepaged_alloc_page()
2490 static bool hugepage_vma_check(struct vm_area_struct *vma) hugepage_vma_check() argument
2492 if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) || hugepage_vma_check()
2493 (vma->vm_flags & VM_NOHUGEPAGE)) hugepage_vma_check()
2496 if (!vma->anon_vma || vma->vm_ops) hugepage_vma_check()
2498 if (is_vma_temporary_stack(vma)) hugepage_vma_check()
2500 return !(vma->vm_flags & VM_NO_THP); hugepage_vma_check()
2506 struct vm_area_struct *vma, collapse_huge_page()
2545 vma = find_vma(mm, address); collapse_huge_page()
2546 if (!vma) collapse_huge_page()
2548 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; collapse_huge_page()
2549 hend = vma->vm_end & HPAGE_PMD_MASK; collapse_huge_page()
2552 if (!hugepage_vma_check(vma)) collapse_huge_page()
2558 anon_vma_lock_write(vma->anon_vma); collapse_huge_page()
2573 _pmd = pmdp_collapse_flush(vma, address, pmd); collapse_huge_page()
2578 isolated = __collapse_huge_page_isolate(vma, address, pte); collapse_huge_page()
2592 anon_vma_unlock_write(vma->anon_vma); collapse_huge_page()
2600 anon_vma_unlock_write(vma->anon_vma); collapse_huge_page()
2602 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl); collapse_huge_page()
2607 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot); collapse_huge_page()
2608 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); collapse_huge_page()
2619 page_add_new_anon_rmap(new_page, vma, address); collapse_huge_page()
2621 lru_cache_add_active_or_unevictable(new_page, vma); collapse_huge_page()
2624 update_mmu_cache_pmd(vma, address, pmd); collapse_huge_page()
2640 struct vm_area_struct *vma, khugepaged_scan_pmd()
2665 if (!userfaultfd_armed(vma) && khugepaged_scan_pmd()
2676 page = vm_normal_page(vma, _address, pteval); khugepaged_scan_pmd()
2701 mmu_notifier_test_young(vma->vm_mm, address)) khugepaged_scan_pmd()
2711 collapse_huge_page(mm, address, hpage, vma, node); khugepaged_scan_pmd()
2747 struct vm_area_struct *vma; variable in typeref:struct:vm_area_struct
2766 vma = NULL;
2768 vma = find_vma(mm, khugepaged_scan.address);
2771 for (; vma; vma = vma->vm_next) {
2779 if (!hugepage_vma_check(vma)) {
2784 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2785 hend = vma->vm_end & HPAGE_PMD_MASK;
2803 ret = khugepaged_scan_pmd(mm, vma,
2826 if (khugepaged_test_exit(mm) || !vma) {
2931 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, __split_huge_zero_page_pmd() argument
2934 struct mm_struct *mm = vma->vm_mm; __split_huge_zero_page_pmd()
2939 pmdp_huge_clear_flush_notify(vma, haddr, pmd); __split_huge_zero_page_pmd()
2947 entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot); __split_huge_zero_page_pmd()
2959 void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address, __split_huge_page_pmd() argument
2964 struct mm_struct *mm = vma->vm_mm; __split_huge_page_pmd()
2969 BUG_ON(vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE); __split_huge_page_pmd()
2978 if (vma_is_dax(vma)) { __split_huge_page_pmd()
2979 pmd_t _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd); __split_huge_page_pmd()
2983 __split_huge_zero_page_pmd(vma, haddr, pmd); __split_huge_page_pmd()
3011 struct vm_area_struct *vma; split_huge_page_pmd_mm() local
3013 vma = find_vma(mm, address); split_huge_page_pmd_mm()
3014 BUG_ON(vma == NULL); split_huge_page_pmd_mm()
3015 split_huge_page_pmd(vma, address, pmd); split_huge_page_pmd_mm()
3045 void vma_adjust_trans_huge(struct vm_area_struct *vma, vma_adjust_trans_huge() argument
3056 (start & HPAGE_PMD_MASK) >= vma->vm_start && vma_adjust_trans_huge()
3057 (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) vma_adjust_trans_huge()
3058 split_huge_page_address(vma->vm_mm, start); vma_adjust_trans_huge()
3066 (end & HPAGE_PMD_MASK) >= vma->vm_start && vma_adjust_trans_huge()
3067 (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) vma_adjust_trans_huge()
3068 split_huge_page_address(vma->vm_mm, end); vma_adjust_trans_huge()
3071 * If we're also updating the vma->vm_next->vm_start, if the new vma_adjust_trans_huge()
3076 struct vm_area_struct *next = vma->vm_next; vma_adjust_trans_huge()
714 __do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, struct page *page, gfp_t gfp, unsigned int flags) __do_huge_pmd_anonymous_page() argument
793 set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, struct page *zero_page) set_huge_zero_page() argument
915 copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, struct vm_area_struct *vma) copy_huge_pmd() argument
988 huge_pmd_set_accessed(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pmd_t orig_pmd, int dirty) huge_pmd_set_accessed() argument
1042 do_huge_pmd_wp_page_fallback(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pmd_t orig_pmd, struct page *page, unsigned long haddr) do_huge_pmd_wp_page_fallback() argument
1671 __split_huge_page_splitting(struct page *page, struct vm_area_struct *vma, unsigned long address) __split_huge_page_splitting() argument
1823 __split_huge_page_map(struct page *page, struct vm_area_struct *vma, unsigned long address) __split_huge_page_map() argument
2282 __collapse_huge_page_copy(pte_t *pte, struct page *page, struct vm_area_struct *vma, unsigned long address, spinlock_t *ptl) __collapse_huge_page_copy() argument
2503 collapse_huge_page(struct mm_struct *mm, unsigned long address, struct page **hpage, struct vm_area_struct *vma, int node) collapse_huge_page() argument
2639 khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, struct page **hpage) khugepaged_scan_pmd() argument
H A Dgup.c21 static struct page *no_page_table(struct vm_area_struct *vma, no_page_table() argument
32 if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault)) no_page_table()
37 static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, follow_pfn_pte() argument
52 set_pte_at(vma->vm_mm, address, pte, entry); follow_pfn_pte()
53 update_mmu_cache(vma, address, pte); follow_pfn_pte()
61 static struct page *follow_page_pte(struct vm_area_struct *vma, follow_page_pte() argument
64 struct mm_struct *mm = vma->vm_mm; follow_page_pte()
71 return no_page_table(vma, flags); follow_page_pte()
100 page = vm_normal_page(vma, address, pte); follow_page_pte()
113 ret = follow_pfn_pte(vma, address, ptep, flags); follow_page_pte()
132 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { follow_page_pte()
161 return no_page_table(vma, flags); follow_page_pte()
166 * @vma: vm_area_struct mapping @address
177 struct page *follow_page_mask(struct vm_area_struct *vma, follow_page_mask() argument
186 struct mm_struct *mm = vma->vm_mm; follow_page_mask()
198 return no_page_table(vma, flags); follow_page_mask()
202 return no_page_table(vma, flags); follow_page_mask()
203 if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { follow_page_mask()
207 return no_page_table(vma, flags); follow_page_mask()
210 return no_page_table(vma, flags); follow_page_mask()
214 return no_page_table(vma, flags); follow_page_mask()
215 if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) { follow_page_mask()
219 return no_page_table(vma, flags); follow_page_mask()
222 return no_page_table(vma, flags); follow_page_mask()
225 split_huge_page_pmd(vma, address, pmd); follow_page_mask()
226 return follow_page_pte(vma, address, pmd, flags); follow_page_mask()
232 wait_split_huge_page(vma->anon_vma, pmd); follow_page_mask()
234 page = follow_trans_huge_pmd(vma, address, follow_page_mask()
243 return follow_page_pte(vma, address, pmd, flags); follow_page_mask()
247 unsigned int gup_flags, struct vm_area_struct **vma, get_gate_page()
273 *vma = get_gate_vma(mm); get_gate_page()
276 *page = vm_normal_page(*vma, address, *pte); get_gate_page()
295 static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, faultin_page() argument
298 struct mm_struct *mm = vma->vm_mm; faultin_page()
307 (stack_guard_page_start(vma, address) || faultin_page()
308 stack_guard_page_end(vma, address + PAGE_SIZE))) faultin_page()
321 ret = handle_mm_fault(mm, vma, address, fault_flags); faultin_page()
354 if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) faultin_page()
359 static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) check_vma_flags() argument
361 vm_flags_t vm_flags = vma->vm_flags; check_vma_flags()
372 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could check_vma_flags()
388 * Is there actually any vma we can reach here which does not check_vma_flags()
460 struct vm_area_struct *vma = NULL; __get_user_pages() local
480 /* first iteration or cross vma bound */ __get_user_pages()
481 if (!vma || start >= vma->vm_end) { __get_user_pages()
482 vma = find_extend_vma(mm, start); __get_user_pages()
483 if (!vma && in_gate_area(mm, start)) { __get_user_pages()
486 gup_flags, &vma, __get_user_pages()
494 if (!vma || check_vma_flags(vma, gup_flags)) __get_user_pages()
496 if (is_vm_hugetlb_page(vma)) { __get_user_pages()
497 i = follow_hugetlb_page(mm, vma, pages, vmas, __get_user_pages()
511 page = follow_page_mask(vma, start, foll_flags, &page_mask); __get_user_pages()
514 ret = faultin_page(tsk, vma, start, &foll_flags, __get_user_pages()
540 flush_anon_page(vma, page, start); __get_user_pages()
546 vmas[i] = vma; __get_user_pages()
590 struct vm_area_struct *vma; fixup_user_fault() local
594 vma = find_extend_vma(mm, address); fixup_user_fault()
595 if (!vma || address < vma->vm_start) fixup_user_fault()
599 if (!(vm_flags & vma->vm_flags)) fixup_user_fault()
602 ret = handle_mm_fault(mm, vma, address, fault_flags); fixup_user_fault()
865 * populate_vma_page_range() - populate a range of pages in the vma.
866 * @vma: target vma
875 * vma->vm_mm->mmap_sem must be held.
883 long populate_vma_page_range(struct vm_area_struct *vma, populate_vma_page_range() argument
886 struct mm_struct *mm = vma->vm_mm; populate_vma_page_range()
892 VM_BUG_ON_VMA(start < vma->vm_start, vma); populate_vma_page_range()
893 VM_BUG_ON_VMA(end > vma->vm_end, vma); populate_vma_page_range()
897 if (vma->vm_flags & VM_LOCKONFAULT) populate_vma_page_range()
905 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) populate_vma_page_range()
912 if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) populate_vma_page_range()
934 struct vm_area_struct *vma = NULL; __mm_populate() local
950 vma = find_vma(mm, nstart); __mm_populate()
951 } else if (nstart >= vma->vm_end) __mm_populate()
952 vma = vma->vm_next; __mm_populate()
953 if (!vma || vma->vm_start >= end) __mm_populate()
959 nend = min(end, vma->vm_end); __mm_populate()
960 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) __mm_populate()
962 if (nstart < vma->vm_start) __mm_populate()
963 nstart = vma->vm_start; __mm_populate()
966 * double checks the vma flags, so that it won't mlock pages __mm_populate()
967 * if the vma was already munlocked. __mm_populate()
969 ret = populate_vma_page_range(vma, nstart, nend, &locked); __mm_populate()
1002 struct vm_area_struct *vma; get_dump_page() local
1006 FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma, get_dump_page()
1009 flush_cache_page(vma, addr, page_to_pfn(page)); get_dump_page()
246 get_gate_page(struct mm_struct *mm, unsigned long address, unsigned int gup_flags, struct vm_area_struct **vma, struct page **page) get_gate_page() argument
H A Dmincore.c85 struct vm_area_struct *vma, unsigned char *vec) __mincore_unmapped_range()
90 if (vma->vm_file) { __mincore_unmapped_range()
93 pgoff = linear_page_index(vma, addr); __mincore_unmapped_range()
95 vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff); __mincore_unmapped_range()
107 walk->vma, walk->private); mincore_unmapped_range()
115 struct vm_area_struct *vma = walk->vma; mincore_pte_range() local
120 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { mincore_pte_range()
127 __mincore_unmapped_range(addr, end, vma, vec); mincore_pte_range()
137 vma, vec); mincore_pte_range()
175 struct vm_area_struct *vma; do_mincore() local
185 vma = find_vma(current->mm, addr); do_mincore()
186 if (!vma || addr < vma->vm_start) do_mincore()
188 mincore_walk.mm = vma->vm_mm; do_mincore()
189 end = min(vma->vm_end, addr + (pages << PAGE_SHIFT)); do_mincore()
84 __mincore_unmapped_range(unsigned long addr, unsigned long end, struct vm_area_struct *vma, unsigned char *vec) __mincore_unmapped_range() argument
H A Dmempolicy.c438 * Rebind each vma in mm to new nodemask.
445 struct vm_area_struct *vma; mpol_rebind_mm() local
448 for (vma = mm->mmap; vma; vma = vma->vm_next) mpol_rebind_mm()
449 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE); mpol_rebind_mm()
488 struct vm_area_struct *vma = walk->vma; queue_pages_pte_range() local
496 split_huge_page_pmd(vma, addr, pmd); queue_pages_pte_range()
504 page = vm_normal_page(vma, addr, *pte); queue_pages_pte_range()
537 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); queue_pages_hugetlb()
567 unsigned long change_prot_numa(struct vm_area_struct *vma, change_prot_numa() argument
572 nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1); change_prot_numa()
579 static unsigned long change_prot_numa(struct vm_area_struct *vma, change_prot_numa() argument
589 struct vm_area_struct *vma = walk->vma; queue_pages_test_walk() local
591 unsigned long endvma = vma->vm_end; queue_pages_test_walk()
594 if (vma->vm_flags & VM_PFNMAP) queue_pages_test_walk()
599 if (vma->vm_start > start) queue_pages_test_walk()
600 start = vma->vm_start; queue_pages_test_walk()
603 if (!vma->vm_next && vma->vm_end < end) queue_pages_test_walk()
605 if (qp->prev && qp->prev->vm_end < vma->vm_start) queue_pages_test_walk()
609 qp->prev = vma; queue_pages_test_walk()
613 if (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) queue_pages_test_walk()
614 change_prot_numa(vma, start, endvma); queue_pages_test_walk()
620 vma_migratable(vma))) queue_pages_test_walk()
621 /* queue pages from current vma */ queue_pages_test_walk()
659 static int vma_replace_policy(struct vm_area_struct *vma, vma_replace_policy() argument
666 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", vma_replace_policy()
667 vma->vm_start, vma->vm_end, vma->vm_pgoff, vma_replace_policy()
668 vma->vm_ops, vma->vm_file, vma_replace_policy()
669 vma->vm_ops ? vma->vm_ops->set_policy : NULL); vma_replace_policy()
675 if (vma->vm_ops && vma->vm_ops->set_policy) { vma_replace_policy()
676 err = vma->vm_ops->set_policy(vma, new); vma_replace_policy()
681 old = vma->vm_policy; vma_replace_policy()
682 vma->vm_policy = new; /* protected by mmap_sem */ vma_replace_policy()
697 struct vm_area_struct *vma; mbind_range() local
703 vma = find_vma(mm, start); mbind_range()
704 if (!vma || vma->vm_start > start) mbind_range()
707 prev = vma->vm_prev; mbind_range()
708 if (start > vma->vm_start) mbind_range()
709 prev = vma; mbind_range()
711 for (; vma && vma->vm_start < end; prev = vma, vma = next) { mbind_range()
712 next = vma->vm_next; mbind_range()
713 vmstart = max(start, vma->vm_start); mbind_range()
714 vmend = min(end, vma->vm_end); mbind_range()
716 if (mpol_equal(vma_policy(vma), new_pol)) mbind_range()
719 pgoff = vma->vm_pgoff + mbind_range()
720 ((vmstart - vma->vm_start) >> PAGE_SHIFT); mbind_range()
721 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, mbind_range()
722 vma->anon_vma, vma->vm_file, pgoff, mbind_range()
723 new_pol, vma->vm_userfaultfd_ctx); mbind_range()
725 vma = prev; mbind_range()
726 next = vma->vm_next; mbind_range()
727 if (mpol_equal(vma_policy(vma), new_pol)) mbind_range()
729 /* vma_merge() joined vma && vma->next, case 8 */ mbind_range()
732 if (vma->vm_start != vmstart) { mbind_range()
733 err = split_vma(vma->vm_mm, vma, vmstart, 1); mbind_range()
737 if (vma->vm_end != vmend) { mbind_range()
738 err = split_vma(vma->vm_mm, vma, vmend, 0); mbind_range()
743 err = vma_replace_policy(vma, new_pol); mbind_range()
835 struct vm_area_struct *vma = NULL; do_get_mempolicy() local
855 * vma/shared policy at addr is NULL. We do_get_mempolicy()
859 vma = find_vma_intersection(mm, addr, addr+1); do_get_mempolicy()
860 if (!vma) { do_get_mempolicy()
864 if (vma->vm_ops && vma->vm_ops->get_policy) do_get_mempolicy()
865 pol = vma->vm_ops->get_policy(vma, addr); do_get_mempolicy()
867 pol = vma->vm_policy; do_get_mempolicy()
897 if (vma) { do_get_mempolicy()
899 vma = NULL; do_get_mempolicy()
915 if (vma) do_get_mempolicy()
1088 * Allocate a new page for page migration based on vma policy.
1089 * Start by assuming the page is mapped by the same vma as contains @start.
1096 struct vm_area_struct *vma; new_page() local
1099 vma = find_vma(current->mm, start); new_page()
1100 while (vma) { new_page()
1101 address = page_address_in_vma(page, vma); new_page()
1104 vma = vma->vm_next; new_page()
1108 BUG_ON(!vma); new_page()
1109 return alloc_huge_page_noerr(vma, address, 1); new_page()
1112 * if !vma, alloc_page_vma() will use task or system default policy new_page()
1114 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); new_page()
1541 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, __get_vma_policy() argument
1546 if (vma) { __get_vma_policy()
1547 if (vma->vm_ops && vma->vm_ops->get_policy) { __get_vma_policy()
1548 pol = vma->vm_ops->get_policy(vma, addr); __get_vma_policy()
1549 } else if (vma->vm_policy) { __get_vma_policy()
1550 pol = vma->vm_policy; __get_vma_policy()
1554 * a pseudo vma whose vma->vm_ops=NULL. Take a reference __get_vma_policy()
1567 * get_vma_policy(@vma, @addr)
1568 * @vma: virtual memory area whose policy is sought
1569 * @addr: address in @vma for shared policy lookup
1578 static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, get_vma_policy() argument
1581 struct mempolicy *pol = __get_vma_policy(vma, addr); get_vma_policy()
1589 bool vma_policy_mof(struct vm_area_struct *vma) vma_policy_mof() argument
1593 if (vma->vm_ops && vma->vm_ops->get_policy) { vma_policy_mof()
1596 pol = vma->vm_ops->get_policy(vma, vma->vm_start); vma_policy_mof()
1604 pol = vma->vm_policy; vma_policy_mof()
1735 struct vm_area_struct *vma, unsigned long off) offset_il_node()
1755 struct vm_area_struct *vma, unsigned long addr, int shift) interleave_nid()
1757 if (vma) { interleave_nid()
1768 off = vma->vm_pgoff >> (shift - PAGE_SHIFT); interleave_nid()
1769 off += (addr - vma->vm_start) >> shift; interleave_nid()
1770 return offset_il_node(pol, vma, off); interleave_nid()
1792 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1793 * @vma: virtual memory area whose policy is sought
1794 * @addr: address in @vma for shared policy lookup and interleave policy
1806 struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, huge_zonelist() argument
1812 *mpol = get_vma_policy(vma, addr); huge_zonelist()
1816 zl = node_zonelist(interleave_nid(*mpol, vma, addr, huge_zonelist()
1817 huge_page_shift(hstate_vma(vma))), gfp_flags); huge_zonelist()
1946 * @vma: Pointer to VMA or NULL if not available.
1959 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, alloc_pages_vma() argument
1969 pol = get_vma_policy(vma, addr); alloc_pages_vma()
1975 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); alloc_pages_vma()
2234 * @vma: vm area where page mapped
2237 * Lookup current policy node id for vma,addr and "compare to" page's
2245 * Called from fault path where we know the vma and faulting address.
2247 int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) mpol_misplaced() argument
2258 BUG_ON(!vma); mpol_misplaced()
2260 pol = get_vma_policy(vma, addr); mpol_misplaced()
2266 BUG_ON(addr >= vma->vm_end); mpol_misplaced()
2267 BUG_ON(addr < vma->vm_start); mpol_misplaced()
2269 pgoff = vma->vm_pgoff; mpol_misplaced()
2270 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT; mpol_misplaced()
2271 polnid = offset_il_node(pol, vma, pgoff); mpol_misplaced()
2454 /* Create pseudo-vma that contains just the policy */ mpol_shared_policy_init()
2469 struct vm_area_struct *vma, struct mempolicy *npol) mpol_set_shared_policy()
2473 unsigned long sz = vma_pages(vma); mpol_set_shared_policy()
2476 vma->vm_pgoff, mpol_set_shared_policy()
2482 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); mpol_set_shared_policy()
2486 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); mpol_set_shared_policy()
1734 offset_il_node(struct mempolicy *pol, struct vm_area_struct *vma, unsigned long off) offset_il_node() argument
1754 interleave_nid(struct mempolicy *pol, struct vm_area_struct *vma, unsigned long addr, int shift) interleave_nid() argument
2468 mpol_set_shared_policy(struct shared_policy *info, struct vm_area_struct *vma, struct mempolicy *npol) mpol_set_shared_policy() argument
H A Dhugetlb.c211 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) subpool_vma() argument
213 return subpool_inode(file_inode(vma->vm_file)); subpool_vma()
613 * Convert the address within this vma to the page offset within
617 struct vm_area_struct *vma, unsigned long address) vma_hugecache_offset()
619 return ((address - vma->vm_start) >> huge_page_shift(h)) + vma_hugecache_offset()
620 (vma->vm_pgoff >> huge_page_order(h)); vma_hugecache_offset()
623 pgoff_t linear_hugepage_index(struct vm_area_struct *vma, linear_hugepage_index() argument
626 return vma_hugecache_offset(hstate_vma(vma), vma, address); linear_hugepage_index()
633 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) vma_kernel_pagesize() argument
637 if (!is_vm_hugetlb_page(vma)) vma_kernel_pagesize()
640 hstate = hstate_vma(vma); vma_kernel_pagesize()
653 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) vma_mmu_pagesize() argument
655 return vma_kernel_pagesize(vma); vma_mmu_pagesize()
687 static unsigned long get_vma_private_data(struct vm_area_struct *vma) get_vma_private_data() argument
689 return (unsigned long)vma->vm_private_data; get_vma_private_data()
692 static void set_vma_private_data(struct vm_area_struct *vma, set_vma_private_data() argument
695 vma->vm_private_data = (void *)value; set_vma_private_data()
747 static struct resv_map *vma_resv_map(struct vm_area_struct *vma) vma_resv_map() argument
749 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); vma_resv_map()
750 if (vma->vm_flags & VM_MAYSHARE) { vma_resv_map()
751 struct address_space *mapping = vma->vm_file->f_mapping; vma_resv_map()
757 return (struct resv_map *)(get_vma_private_data(vma) & vma_resv_map()
762 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) set_vma_resv_map() argument
764 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); set_vma_resv_map()
765 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); set_vma_resv_map()
767 set_vma_private_data(vma, (get_vma_private_data(vma) & set_vma_resv_map()
771 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) set_vma_resv_flags() argument
773 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); set_vma_resv_flags()
774 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); set_vma_resv_flags()
776 set_vma_private_data(vma, get_vma_private_data(vma) | flags); set_vma_resv_flags()
779 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) is_vma_resv_set() argument
781 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); is_vma_resv_set()
783 return (get_vma_private_data(vma) & flag) != 0; is_vma_resv_set()
787 void reset_vma_resv_huge_pages(struct vm_area_struct *vma) reset_vma_resv_huge_pages() argument
789 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); reset_vma_resv_huge_pages()
790 if (!(vma->vm_flags & VM_MAYSHARE)) reset_vma_resv_huge_pages()
791 vma->vm_private_data = (void *)0; reset_vma_resv_huge_pages()
795 static bool vma_has_reserves(struct vm_area_struct *vma, long chg) vma_has_reserves() argument
797 if (vma->vm_flags & VM_NORESERVE) { vma_has_reserves()
807 if (vma->vm_flags & VM_MAYSHARE && chg == 0) vma_has_reserves()
814 if (vma->vm_flags & VM_MAYSHARE) { vma_has_reserves()
832 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) vma_has_reserves()
876 struct vm_area_struct *vma, dequeue_huge_page_vma()
893 if (!vma_has_reserves(vma, chg) && dequeue_huge_page_vma()
903 zonelist = huge_zonelist(vma, address, dequeue_huge_page_vma()
913 if (!vma_has_reserves(vma, chg)) dequeue_huge_page_vma()
1448 * 1. With vma+addr: we use the VMA's memory policy
1449 * 2. With !vma, but nid=NUMA_NO_NODE: We try to allocate a huge
1452 * 3. With !vma, but nid!=NUMA_NO_NODE. We allocate a huge page
1456 struct vm_area_struct *vma, unsigned long addr, int nid) __hugetlb_alloc_buddy_huge_page()
1471 if (!IS_ENABLED(CONFIG_NUMA) || !vma) { __hugetlb_alloc_buddy_huge_page()
1498 zl = huge_zonelist(vma, addr, gfp, &mpol, &nodemask); __hugetlb_alloc_buddy_huge_page()
1513 * 'vma' and 'addr' are only for (1). 'nid' is always NUMA_NO_NODE in
1517 * For (2), we ignore 'vma' and 'addr' and use 'nid' exclusively. This
1521 struct vm_area_struct *vma, unsigned long addr, int nid) __alloc_buddy_huge_page()
1534 if (vma || (addr != -1)) { __alloc_buddy_huge_page()
1571 page = __hugetlb_alloc_buddy_huge_page(h, vma, addr, nid); __alloc_buddy_huge_page()
1613 struct vm_area_struct *vma, unsigned long addr) __alloc_buddy_huge_page_with_mpol()
1615 return __alloc_buddy_huge_page(h, vma, addr, NUMA_NO_NODE); __alloc_buddy_huge_page_with_mpol()
1619 * This allocation function is useful in the context where vma is irrelevant.
1765 * within the vma has an associated reservation. If a reservation is
1785 struct vm_area_struct *vma, unsigned long addr, __vma_reservation_common()
1792 resv = vma_resv_map(vma); __vma_reservation_common()
1796 idx = vma_hugecache_offset(h, vma, addr); __vma_reservation_common()
1812 if (vma->vm_flags & VM_MAYSHARE) __vma_reservation_common()
1819 struct vm_area_struct *vma, unsigned long addr) vma_needs_reservation()
1821 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV); vma_needs_reservation()
1825 struct vm_area_struct *vma, unsigned long addr) vma_commit_reservation()
1827 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV); vma_commit_reservation()
1831 struct vm_area_struct *vma, unsigned long addr) vma_end_reservation()
1833 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); vma_end_reservation()
1836 struct page *alloc_huge_page(struct vm_area_struct *vma, alloc_huge_page() argument
1839 struct hugepage_subpool *spool = subpool_vma(vma); alloc_huge_page()
1840 struct hstate *h = hstate_vma(vma); alloc_huge_page()
1853 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr); alloc_huge_page()
1867 vma_end_reservation(h, vma, addr); alloc_huge_page()
1893 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg); alloc_huge_page()
1896 page = __alloc_buddy_huge_page_with_mpol(h, vma, addr); alloc_huge_page()
1899 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) { alloc_huge_page()
1912 map_commit = vma_commit_reservation(h, vma, addr); alloc_huge_page()
1935 vma_end_reservation(h, vma, addr); alloc_huge_page()
1944 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma, alloc_huge_page_noerr() argument
1947 struct page *page = alloc_huge_page(vma, addr, avoid_reserve); alloc_huge_page_noerr()
2961 static void hugetlb_vm_op_open(struct vm_area_struct *vma) hugetlb_vm_op_open() argument
2963 struct resv_map *resv = vma_resv_map(vma); hugetlb_vm_op_open()
2973 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) hugetlb_vm_op_open()
2977 static void hugetlb_vm_op_close(struct vm_area_struct *vma) hugetlb_vm_op_close() argument
2979 struct hstate *h = hstate_vma(vma); hugetlb_vm_op_close()
2980 struct resv_map *resv = vma_resv_map(vma); hugetlb_vm_op_close()
2981 struct hugepage_subpool *spool = subpool_vma(vma); hugetlb_vm_op_close()
2985 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER)) hugetlb_vm_op_close()
2988 start = vma_hugecache_offset(h, vma, vma->vm_start); hugetlb_vm_op_close()
2989 end = vma_hugecache_offset(h, vma, vma->vm_end); hugetlb_vm_op_close()
3011 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf) hugetlb_vm_op_fault() argument
3023 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, make_huge_pte() argument
3030 vma->vm_page_prot))); make_huge_pte()
3033 vma->vm_page_prot)); make_huge_pte()
3037 entry = arch_make_huge_pte(entry, vma, page, writable); make_huge_pte()
3042 static void set_huge_ptep_writable(struct vm_area_struct *vma, set_huge_ptep_writable() argument
3048 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) set_huge_ptep_writable()
3049 update_mmu_cache(vma, address, ptep); set_huge_ptep_writable()
3079 struct vm_area_struct *vma) copy_hugetlb_page_range()
3085 struct hstate *h = hstate_vma(vma); copy_hugetlb_page_range()
3091 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; copy_hugetlb_page_range()
3093 mmun_start = vma->vm_start; copy_hugetlb_page_range()
3094 mmun_end = vma->vm_end; copy_hugetlb_page_range()
3098 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { copy_hugetlb_page_range()
3156 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, __unmap_hugepage_range() argument
3161 struct mm_struct *mm = vma->vm_mm; __unmap_hugepage_range()
3167 struct hstate *h = hstate_vma(vma); __unmap_hugepage_range()
3172 WARN_ON(!is_vm_hugetlb_page(vma)); __unmap_hugepage_range()
3176 tlb_start_vma(tlb, vma); __unmap_hugepage_range()
3217 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); __unmap_hugepage_range()
3253 tlb_end_vma(tlb, vma); __unmap_hugepage_range()
3257 struct vm_area_struct *vma, unsigned long start, __unmap_hugepage_range_final()
3260 __unmap_hugepage_range(tlb, vma, start, end, ref_page); __unmap_hugepage_range_final()
3264 * test will fail on a vma being torn down, and not grab a page table __unmap_hugepage_range_final()
3272 vma->vm_flags &= ~VM_MAYSHARE; __unmap_hugepage_range_final()
3275 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unmap_hugepage_range() argument
3281 mm = vma->vm_mm; unmap_hugepage_range()
3284 __unmap_hugepage_range(&tlb, vma, start, end, ref_page); unmap_hugepage_range()
3294 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, unmap_ref_private() argument
3297 struct hstate *h = hstate_vma(vma); unmap_ref_private()
3307 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + unmap_ref_private()
3308 vma->vm_pgoff; unmap_ref_private()
3309 mapping = file_inode(vma->vm_file)->i_mapping; unmap_ref_private()
3319 if (iter_vma == vma) unmap_ref_private()
3350 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, hugetlb_cow() argument
3354 struct hstate *h = hstate_vma(vma); hugetlb_cow()
3366 page_move_anon_rmap(old_page, vma, address); hugetlb_cow()
3367 set_huge_ptep_writable(vma, address, ptep); hugetlb_cow()
3380 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && hugetlb_cow()
3391 new_page = alloc_huge_page(vma, address, outside_reserve); hugetlb_cow()
3404 unmap_ref_private(mm, vma, old_page, address); hugetlb_cow()
3427 if (unlikely(anon_vma_prepare(vma))) { hugetlb_cow()
3432 copy_user_huge_page(new_page, old_page, address, vma, hugetlb_cow()
3451 huge_ptep_clear_flush(vma, address, ptep); hugetlb_cow()
3454 make_huge_pte(vma, new_page, 1)); hugetlb_cow()
3456 hugepage_add_new_anon_rmap(new_page, vma, address); hugetlb_cow()
3473 struct vm_area_struct *vma, unsigned long address) hugetlbfs_pagecache_page()
3478 mapping = vma->vm_file->f_mapping; hugetlbfs_pagecache_page()
3479 idx = vma_hugecache_offset(h, vma, address); hugetlbfs_pagecache_page()
3489 struct vm_area_struct *vma, unsigned long address) hugetlbfs_pagecache_present()
3495 mapping = vma->vm_file->f_mapping; hugetlbfs_pagecache_present()
3496 idx = vma_hugecache_offset(h, vma, address); hugetlbfs_pagecache_present()
3521 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, hugetlb_no_page() argument
3525 struct hstate *h = hstate_vma(vma); hugetlb_no_page()
3538 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { hugetlb_no_page()
3554 page = alloc_huge_page(vma, address, 0); hugetlb_no_page()
3567 if (vma->vm_flags & VM_MAYSHARE) { hugetlb_no_page()
3577 if (unlikely(anon_vma_prepare(vma))) { hugetlb_no_page()
3602 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { hugetlb_no_page()
3603 if (vma_needs_reservation(h, vma, address) < 0) { hugetlb_no_page()
3608 vma_end_reservation(h, vma, address); hugetlb_no_page()
3623 hugepage_add_new_anon_rmap(page, vma, address); hugetlb_no_page()
3626 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) hugetlb_no_page()
3627 && (vma->vm_flags & VM_SHARED))); hugetlb_no_page()
3631 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { hugetlb_no_page()
3633 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl); hugetlb_no_page()
3651 struct vm_area_struct *vma, hugetlb_fault_mutex_hash()
3658 if (vma->vm_flags & VM_SHARED) { hugetlb_fault_mutex_hash()
3676 struct vm_area_struct *vma, hugetlb_fault_mutex_hash()
3684 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, hugetlb_fault() argument
3694 struct hstate *h = hstate_vma(vma); hugetlb_fault()
3704 migration_entry_wait_huge(vma, mm, ptep); hugetlb_fault()
3715 mapping = vma->vm_file->f_mapping; hugetlb_fault()
3716 idx = vma_hugecache_offset(h, vma, address); hugetlb_fault()
3723 hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, address); hugetlb_fault()
3728 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags); hugetlb_fault()
3753 if (vma_needs_reservation(h, vma, address) < 0) { hugetlb_fault()
3758 vma_end_reservation(h, vma, address); hugetlb_fault()
3760 if (!(vma->vm_flags & VM_MAYSHARE)) hugetlb_fault()
3762 vma, address); hugetlb_fault()
3787 ret = hugetlb_cow(mm, vma, address, ptep, entry, hugetlb_fault()
3794 if (huge_ptep_set_access_flags(vma, address, ptep, entry, hugetlb_fault()
3796 update_mmu_cache(vma, address, ptep); hugetlb_fault()
3822 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, follow_hugetlb_page() argument
3830 struct hstate *h = hstate_vma(vma); follow_hugetlb_page()
3832 while (vaddr < vma->vm_end && remainder) { follow_hugetlb_page()
3867 !hugetlbfs_pagecache_present(h, vma, vaddr)) { follow_hugetlb_page()
3891 ret = hugetlb_fault(mm, vma, vaddr, follow_hugetlb_page()
3909 vmas[i] = vma; follow_hugetlb_page()
3915 if (vaddr < vma->vm_end && remainder && follow_hugetlb_page()
3931 unsigned long hugetlb_change_protection(struct vm_area_struct *vma, hugetlb_change_protection() argument
3934 struct mm_struct *mm = vma->vm_mm; hugetlb_change_protection()
3938 struct hstate *h = hstate_vma(vma); hugetlb_change_protection()
3942 flush_cache_range(vma, address, end); hugetlb_change_protection()
3945 i_mmap_lock_write(vma->vm_file->f_mapping); hugetlb_change_protection()
3979 pte = arch_make_huge_pte(pte, vma, NULL, 0); hugetlb_change_protection()
3991 flush_tlb_range(vma, start, end); hugetlb_change_protection()
3993 i_mmap_unlock_write(vma->vm_file->f_mapping); hugetlb_change_protection()
4001 struct vm_area_struct *vma, hugetlb_reserve_pages()
4022 * called to make the mapping read-write. Assume !vma is a shm mapping hugetlb_reserve_pages()
4024 if (!vma || vma->vm_flags & VM_MAYSHARE) { hugetlb_reserve_pages()
4036 set_vma_resv_map(vma, resv_map); hugetlb_reserve_pages()
4037 set_vma_resv_flags(vma, HPAGE_RESV_OWNER); hugetlb_reserve_pages()
4078 if (!vma || vma->vm_flags & VM_MAYSHARE) { hugetlb_reserve_pages()
4098 if (!vma || vma->vm_flags & VM_MAYSHARE) hugetlb_reserve_pages()
4100 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) hugetlb_reserve_pages()
4141 struct vm_area_struct *vma, page_table_shareable()
4150 unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; page_table_shareable()
4165 static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr) vma_shareable() argument
4173 if (vma->vm_flags & VM_MAYSHARE && vma_shareable()
4174 vma->vm_start <= base && end <= vma->vm_end) vma_shareable()
4190 struct vm_area_struct *vma = find_vma(mm, addr); huge_pmd_share() local
4191 struct address_space *mapping = vma->vm_file->f_mapping; huge_pmd_share()
4192 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + huge_pmd_share()
4193 vma->vm_pgoff; huge_pmd_share()
4200 if (!vma_shareable(vma, addr)) huge_pmd_share()
4205 if (svma == vma) huge_pmd_share()
4208 saddr = page_table_shareable(svma, vma, addr, idx); huge_pmd_share()
4222 ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte); huge_pmd_share()
616 vma_hugecache_offset(struct hstate *h, struct vm_area_struct *vma, unsigned long address) vma_hugecache_offset() argument
875 dequeue_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, unsigned long address, int avoid_reserve, long chg) dequeue_huge_page_vma() argument
1455 __hugetlb_alloc_buddy_huge_page(struct hstate *h, struct vm_area_struct *vma, unsigned long addr, int nid) __hugetlb_alloc_buddy_huge_page() argument
1520 __alloc_buddy_huge_page(struct hstate *h, struct vm_area_struct *vma, unsigned long addr, int nid) __alloc_buddy_huge_page() argument
1612 __alloc_buddy_huge_page_with_mpol(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) __alloc_buddy_huge_page_with_mpol() argument
1784 __vma_reservation_common(struct hstate *h, struct vm_area_struct *vma, unsigned long addr, enum vma_resv_mode mode) __vma_reservation_common() argument
1818 vma_needs_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) vma_needs_reservation() argument
1824 vma_commit_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) vma_commit_reservation() argument
1830 vma_end_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) vma_end_reservation() argument
3078 copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma) copy_hugetlb_page_range() argument
3256 __unmap_hugepage_range_final(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct page *ref_page) __unmap_hugepage_range_final() argument
3472 hugetlbfs_pagecache_page(struct hstate *h, struct vm_area_struct *vma, unsigned long address) hugetlbfs_pagecache_page() argument
3488 hugetlbfs_pagecache_present(struct hstate *h, struct vm_area_struct *vma, unsigned long address) hugetlbfs_pagecache_present() argument
3650 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, struct vm_area_struct *vma, struct address_space *mapping, pgoff_t idx, unsigned long address) hugetlb_fault_mutex_hash() argument
3675 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, struct vm_area_struct *vma, struct address_space *mapping, pgoff_t idx, unsigned long address) hugetlb_fault_mutex_hash() argument
3999 hugetlb_reserve_pages(struct inode *inode, long from, long to, struct vm_area_struct *vma, vm_flags_t vm_flags) hugetlb_reserve_pages() argument
4140 page_table_shareable(struct vm_area_struct *svma, struct vm_area_struct *vma, unsigned long addr, pgoff_t idx) page_table_shareable() argument
H A Dframe_vector.c23 * depends on the type of the vma underlying the virtual address). If @start
24 * belongs to a normal vma, the function grabs reference to each of the pages
25 * to pin them in memory. If @start belongs to VM_IO | VM_PFNMAP vma, we don't
40 struct vm_area_struct *vma; get_vaddr_frames() local
53 vma = find_vma_intersection(mm, start, start + 1); get_vaddr_frames()
54 if (!vma) { get_vaddr_frames()
58 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) { get_vaddr_frames()
71 while (ret < nr_frames && start + PAGE_SIZE <= vma->vm_end) { get_vaddr_frames()
72 err = follow_pfn(vma, start, &nums[ret]); get_vaddr_frames()
85 if (ret >= nr_frames || start < vma->vm_end) get_vaddr_frames()
87 vma = find_vma_intersection(mm, start, start + 1); get_vaddr_frames()
88 } while (vma && vma->vm_flags & (VM_IO | VM_PFNMAP)); get_vaddr_frames()
/linux-4.4.14/arch/mips/include/asm/
H A Dtlb.h5 * MIPS doesn't need any special per-pte or per-vma handling, except
8 #define tlb_start_vma(tlb, vma) \
11 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
13 #define tlb_end_vma(tlb, vma) do { } while (0)
H A Dfb.h8 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, fb_pgprotect() argument
11 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); fb_pgprotect()
H A Dtlbflush.h11 * - flush_tlb_page(vma, vmaddr) flushes one page
12 * - flush_tlb_range(vma, start, end) flushes a range of pages
17 extern void local_flush_tlb_range(struct vm_area_struct *vma,
21 extern void local_flush_tlb_page(struct vm_area_struct *vma,
29 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long,
39 #define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, end)
42 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
H A Dhugetlb.h67 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, huge_ptep_clear_flush() argument
70 flush_tlb_page(vma, addr & huge_page_mask(hstate_vma(vma))); huge_ptep_clear_flush()
90 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, huge_ptep_set_access_flags() argument
98 set_pte_at(vma->vm_mm, addr, ptep, pte); huge_ptep_set_access_flags()
103 flush_tlb_range(vma, addr, addr + HPAGE_SIZE); huge_ptep_set_access_flags()
/linux-4.4.14/arch/m68k/include/asm/
H A Dfb.h11 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, fb_pgprotect() argument
14 pgprot_val(vma->vm_page_prot) |= SUN3_PAGE_NOCACHE; fb_pgprotect()
17 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, fb_pgprotect() argument
21 pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE030; fb_pgprotect()
23 pgprot_val(vma->vm_page_prot) &= _CACHEMASK040; fb_pgprotect()
25 pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE_S; fb_pgprotect()
H A Dtlb.h6 * per-vma handling..
8 #define tlb_start_vma(tlb, vma) do { } while (0)
9 #define tlb_end_vma(tlb, vma) do { } while (0)
H A Dcacheflush_no.h13 #define flush_cache_range(vma, start, end) do { } while (0)
14 #define flush_cache_page(vma, vmaddr) do { } while (0)
21 #define flush_icache_page(vma,pg) do { } while (0)
22 #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
26 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
28 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
H A Dtlbflush.h84 static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) flush_tlb_page() argument
86 if (vma->vm_mm == current->active_mm) { flush_tlb_page()
94 static inline void flush_tlb_range(struct vm_area_struct *vma, flush_tlb_range() argument
97 if (vma->vm_mm == current->active_mm) flush_tlb_range()
170 static inline void flush_tlb_page (struct vm_area_struct *vma, flush_tlb_page() argument
177 sun3_put_context(vma->vm_mm->context); flush_tlb_page()
191 static inline void flush_tlb_range (struct vm_area_struct *vma, flush_tlb_range() argument
194 struct mm_struct *mm = vma->vm_mm; flush_tlb_range()
260 static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) flush_tlb_page() argument
H A Dpage_no.h18 #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
19 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
/linux-4.4.14/arch/m32r/include/asm/
H A Dcacheflush.h13 #define flush_cache_range(vma, start, end) do { } while (0)
14 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
21 #define flush_icache_page(vma,pg) _flush_cache_copyback_all()
22 #define flush_icache_user_range(vma,pg,adr,len) _flush_cache_copyback_all()
27 #define flush_icache_page(vma,pg) smp_flush_cache_all()
28 #define flush_icache_user_range(vma,pg,adr,len) smp_flush_cache_all()
35 #define flush_cache_range(vma, start, end) do { } while (0)
36 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
42 #define flush_icache_page(vma,pg) _flush_cache_all()
43 #define flush_icache_user_range(vma,pg,adr,len) _flush_cache_all()
49 #define flush_cache_range(vma, start, end) do { } while (0)
50 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
56 #define flush_icache_page(vma,pg) do { } while (0)
57 #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
64 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
67 flush_icache_user_range(vma, page, vaddr, len); \
69 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
H A Dfb.h8 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, fb_pgprotect() argument
11 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); fb_pgprotect()
H A Dtlb.h6 * per-vma handling..
8 #define tlb_start_vma(tlb, vma) do { } while (0)
9 #define tlb_end_vma(tlb, vma) do { } while (0)
H A Dtlbflush.h12 * - flush_tlb_page(vma, vmaddr) flushes one page
13 * - flush_tlb_range(vma, start, end) flushes a range of pages
27 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
28 #define flush_tlb_range(vma, start, end) \
29 local_flush_tlb_range(vma, start, end)
34 #define flush_tlb_page(vma, vmaddr) do { } while (0)
35 #define flush_tlb_range(vma, start, end) do { } while (0)
46 #define flush_tlb_page(vma, page) smp_flush_tlb_page(vma, page)
47 #define flush_tlb_range(vma, start, end) \
48 smp_flush_tlb_range(vma, start, end)
/linux-4.4.14/arch/x86/include/asm/
H A Dfb.h8 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, fb_pgprotect() argument
13 prot = pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK; fb_pgprotect()
15 pgprot_val(vma->vm_page_prot) = fb_pgprotect()
H A Dtlb.h4 #define tlb_start_vma(tlb, vma) do { } while (0)
5 #define tlb_end_vma(tlb, vma) do { } while (0)
H A Dhugetlb.h50 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, huge_ptep_clear_flush() argument
53 ptep_clear_flush(vma, addr, ptep); huge_ptep_clear_flush()
72 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, huge_ptep_set_access_flags() argument
76 return ptep_set_access_flags(vma, addr, ptep, pte, dirty); huge_ptep_set_access_flags()
/linux-4.4.14/arch/score/include/asm/
H A Dtlb.h5 * SCORE doesn't need any special per-pte or per-vma handling, except
8 #define tlb_start_vma(tlb, vma) do {} while (0)
9 #define tlb_end_vma(tlb, vma) do {} while (0)
H A Dcacheflush.h9 extern void flush_cache_range(struct vm_area_struct *vma,
11 extern void flush_cache_page(struct vm_area_struct *vma,
28 static inline void flush_icache_page(struct vm_area_struct *vma, flush_icache_page() argument
31 if (vma->vm_flags & VM_EXEC) { flush_icache_page()
38 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
41 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
44 if ((vma->vm_flags & VM_EXEC)) \
45 flush_cache_page(vma, vaddr, page_to_pfn(page));\
H A Dtlbflush.h11 * - flush_tlb_page(vma, vmaddr) flushes one page
12 * - flush_tlb_range(vma, start, end) flushes a range of pages
17 extern void local_flush_tlb_range(struct vm_area_struct *vma,
21 extern void local_flush_tlb_page(struct vm_area_struct *vma,
27 #define flush_tlb_range(vma, vmaddr, end) \
28 local_flush_tlb_range(vma, vmaddr, end)
31 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
/linux-4.4.14/arch/sh/include/asm/
H A Dfb.h8 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, fb_pgprotect() argument
11 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); fb_pgprotect()
H A Dtlbflush.h9 * - flush_tlb_page(vma, vmaddr) flushes one page
10 * - flush_tlb_range(vma, start, end) flushes a range of pages
15 extern void local_flush_tlb_range(struct vm_area_struct *vma,
18 extern void local_flush_tlb_page(struct vm_area_struct *vma,
30 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
32 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
40 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
43 #define flush_tlb_range(vma, start, end) \
44 local_flush_tlb_range(vma, start, end)
H A Dtlb.h69 * In the case of tlb vma handling, we can optimise these away in the
74 tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) tlb_start_vma() argument
77 flush_cache_range(vma, vma->vm_start, vma->vm_end); tlb_start_vma()
81 tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) tlb_end_vma() argument
84 flush_tlb_range(vma, tlb->start, tlb->end); tlb_end_vma()
122 static inline void tlb_wire_entry(struct vm_area_struct *vma , tlb_wire_entry()
136 #define tlb_start_vma(tlb, vma) do { } while (0)
137 #define tlb_end_vma(tlb, vma) do { } while (0)
H A Dcacheflush.h15 * - flush_cache_range(vma, start, end) flushes a range of pages
19 * - flush_icache_page(vma, pg) flushes(invalidates) a page for icache
41 extern void flush_cache_page(struct vm_area_struct *vma, cache_noop()
43 extern void flush_cache_range(struct vm_area_struct *vma, cache_noop()
48 extern void flush_icache_page(struct vm_area_struct *vma, cache_noop()
53 struct vm_area_struct *vma; cache_noop() member in struct:flusher_data
60 static inline void flush_anon_page(struct vm_area_struct *vma, flush_anon_page() argument
81 extern void copy_to_user_page(struct vm_area_struct *vma,
85 extern void copy_from_user_page(struct vm_area_struct *vma,
/linux-4.4.14/arch/cris/include/asm/
H A Dtlb.h10 * per-vma handling..
12 #define tlb_start_vma(tlb, vma) do { } while (0)
13 #define tlb_end_vma(tlb, vma) do { } while (0)
H A Dcacheflush.h13 #define flush_cache_range(vma, start, end) do { } while (0)
14 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
20 #define flush_icache_page(vma,pg) do { } while (0)
21 #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
25 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
27 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
H A Dtlbflush.h15 * - flush_tlb_page(vma, vmaddr) flushes one page
22 extern void __flush_tlb_page(struct vm_area_struct *vma,
29 static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end) flush_tlb_range() argument
31 flush_tlb_mm(vma->vm_mm); flush_tlb_range()
/linux-4.4.14/arch/arm/include/asm/
H A Dfb.h8 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, fb_pgprotect() argument
11 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); fb_pgprotect()
H A Dtlb.h72 struct vm_area_struct *vma; member in struct:mmu_gather
89 * tlb->vma will be non-NULL.
92 * tlb->vma will be non-NULL. Additionally, page tables will be freed.
95 * tlb->vma will be NULL.
99 if (tlb->fullmm || !tlb->vma) tlb_flush()
102 flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end); tlb_flush()
157 tlb->vma = NULL; tlb_gather_mmu()
190 * In the case of tlb vma handling, we can optimise these away in the
195 tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) tlb_start_vma() argument
198 flush_cache_range(vma, vma->vm_start, vma->vm_end); tlb_start_vma()
199 tlb->vma = vma; tlb_start_vma()
206 tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) tlb_end_vma() argument
/linux-4.4.14/drivers/staging/rdma/ehca/
H A Dehca_uverbs.c71 static void ehca_mm_open(struct vm_area_struct *vma) ehca_mm_open() argument
73 u32 *count = (u32 *)vma->vm_private_data; ehca_mm_open()
75 ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx", ehca_mm_open()
76 vma->vm_start, vma->vm_end); ehca_mm_open()
82 vma->vm_start, vma->vm_end); ehca_mm_open()
84 vma->vm_start, vma->vm_end, *count); ehca_mm_open()
87 static void ehca_mm_close(struct vm_area_struct *vma) ehca_mm_close() argument
89 u32 *count = (u32 *)vma->vm_private_data; ehca_mm_close()
91 ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx", ehca_mm_close()
92 vma->vm_start, vma->vm_end); ehca_mm_close()
97 vma->vm_start, vma->vm_end, *count); ehca_mm_close()
105 static int ehca_mmap_fw(struct vm_area_struct *vma, struct h_galpas *galpas, ehca_mmap_fw() argument
111 vsize = vma->vm_end - vma->vm_start; ehca_mmap_fw()
113 ehca_gen_err("invalid vsize=%lx", vma->vm_end - vma->vm_start); ehca_mmap_fw()
118 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); ehca_mmap_fw()
121 ret = remap_4k_pfn(vma, vma->vm_start, physical >> EHCA_PAGESHIFT, ehca_mmap_fw()
122 vma->vm_page_prot); ehca_mmap_fw()
128 vma->vm_private_data = mm_count; ehca_mmap_fw()
130 vma->vm_ops = &vm_ops; ehca_mmap_fw()
135 static int ehca_mmap_queue(struct vm_area_struct *vma, struct ipz_queue *queue, ehca_mmap_queue() argument
142 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; ehca_mmap_queue()
143 start = vma->vm_start; ehca_mmap_queue()
147 ret = vm_insert_page(vma, start, page); ehca_mmap_queue()
154 vma->vm_private_data = mm_count; ehca_mmap_queue()
156 vma->vm_ops = &vm_ops; ehca_mmap_queue()
161 static int ehca_mmap_cq(struct vm_area_struct *vma, struct ehca_cq *cq, ehca_mmap_cq() argument
169 ret = ehca_mmap_fw(vma, &cq->galpas, &cq->mm_count_galpa); ehca_mmap_cq()
180 ret = ehca_mmap_queue(vma, &cq->ipz_queue, &cq->mm_count_queue); ehca_mmap_cq()
198 static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp, ehca_mmap_qp() argument
206 ret = ehca_mmap_fw(vma, &qp->galpas, &qp->mm_count_galpa); ehca_mmap_qp()
217 ret = ehca_mmap_queue(vma, &qp->ipz_rqueue, ehca_mmap_qp()
229 ret = ehca_mmap_queue(vma, &qp->ipz_squeue, ehca_mmap_qp()
248 int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) ehca_mmap() argument
250 u64 fileoffset = vma->vm_pgoff; ehca_mmap()
272 ret = ehca_mmap_cq(vma, cq, rsrc_type); ehca_mmap()
294 ret = ehca_mmap_qp(vma, qp, rsrc_type); ehca_mmap()
/linux-4.4.14/include/asm-generic/
H A Dcacheflush.h14 #define flush_cache_range(vma, start, end) do { } while (0)
15 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
21 #define flush_icache_page(vma,pg) do { } while (0)
22 #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
26 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
29 flush_icache_user_range(vma, page, vaddr, len); \
31 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
H A Dmm_hooks.h19 struct vm_area_struct *vma, arch_unmap()
25 struct vm_area_struct *vma) arch_bprm_mm_init()
18 arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long start, unsigned long end) arch_unmap() argument
24 arch_bprm_mm_init(struct mm_struct *mm, struct vm_area_struct *vma) arch_bprm_mm_init() argument
H A Ddma-coherent.h13 int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
29 #define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
/linux-4.4.14/drivers/xen/xenfs/
H A Dxenstored.c33 static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma) xsd_kva_mmap() argument
35 size_t size = vma->vm_end - vma->vm_start; xsd_kva_mmap()
37 if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0)) xsd_kva_mmap()
40 if (remap_pfn_range(vma, vma->vm_start, xsd_kva_mmap()
42 size, vma->vm_page_prot)) xsd_kva_mmap()
/linux-4.4.14/arch/metag/mm/
H A Dhugetlbpage.c35 struct vm_area_struct *vma; prepare_hugepage_range() local
44 vma = find_vma(mm, ALIGN_HUGEPT(addr)); prepare_hugepage_range()
45 if (vma && !(vma->vm_flags & MAP_HUGETLB)) prepare_hugepage_range()
48 vma = find_vma(mm, addr); prepare_hugepage_range()
49 if (vma) { prepare_hugepage_range()
50 if (addr + len > vma->vm_start) prepare_hugepage_range()
52 if (!(vma->vm_flags & MAP_HUGETLB) && prepare_hugepage_range()
53 (ALIGN_HUGEPT(addr + len) > vma->vm_start)) prepare_hugepage_range()
111 * Look for an unmapped area starting after another hugetlb vma.
126 struct vm_area_struct *vma; hugetlb_get_unmapped_area_existing() local
140 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { hugetlb_get_unmapped_area_existing()
141 if ((!vma && !after_huge) || TASK_SIZE - len < addr) { hugetlb_get_unmapped_area_existing()
153 if (vma && vma->vm_end <= addr) hugetlb_get_unmapped_area_existing()
155 /* space before the next vma? */ hugetlb_get_unmapped_area_existing()
156 if (after_huge && (!vma || ALIGN_HUGEPT(addr + len) hugetlb_get_unmapped_area_existing()
157 <= vma->vm_start)) { hugetlb_get_unmapped_area_existing()
165 if (vma->vm_flags & MAP_HUGETLB) { hugetlb_get_unmapped_area_existing()
166 /* space after a huge vma in 2nd level page table? */ hugetlb_get_unmapped_area_existing()
167 if (vma->vm_end & HUGEPT_MASK) { hugetlb_get_unmapped_area_existing()
170 addr = vma->vm_end; hugetlb_get_unmapped_area_existing()
175 addr = ALIGN_HUGEPT(vma->vm_end); hugetlb_get_unmapped_area_existing()
219 * Look for an existing hugetlb vma with space after it (this is to to hugetlb_get_unmapped_area()
H A Dfault.c53 struct vm_area_struct *vma, *prev_vma; do_page_fault() local
116 vma = find_vma_prev(mm, address, &prev_vma); do_page_fault()
118 if (!vma || address < vma->vm_start) do_page_fault()
123 if (!(vma->vm_flags & VM_WRITE)) do_page_fault()
127 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) do_page_fault()
136 fault = handle_mm_fault(mm, vma, address, flags); do_page_fault()
173 vma = prev_vma; do_page_fault()
174 if (vma && (expand_stack(vma, address) == 0)) do_page_fault()
/linux-4.4.14/drivers/media/v4l2-core/
H A Dvideobuf2-memops.c84 * vb2_common_vm_open() - increase refcount of the vma
85 * @vma: virtual memory region for the mapping
87 * This function adds another user to the provided vma. It expects
88 * struct vb2_vmarea_handler pointer in vma->vm_private_data.
90 static void vb2_common_vm_open(struct vm_area_struct *vma) vb2_common_vm_open() argument
92 struct vb2_vmarea_handler *h = vma->vm_private_data; vb2_common_vm_open()
94 pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n", vb2_common_vm_open()
95 __func__, h, atomic_read(h->refcount), vma->vm_start, vb2_common_vm_open()
96 vma->vm_end); vb2_common_vm_open()
102 * vb2_common_vm_close() - decrease refcount of the vma
103 * @vma: virtual memory region for the mapping
105 * This function releases the user from the provided vma. It expects
106 * struct vb2_vmarea_handler pointer in vma->vm_private_data.
108 static void vb2_common_vm_close(struct vm_area_struct *vma) vb2_common_vm_close() argument
110 struct vb2_vmarea_handler *h = vma->vm_private_data; vb2_common_vm_close()
112 pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n", vb2_common_vm_close()
113 __func__, h, atomic_read(h->refcount), vma->vm_start, vb2_common_vm_close()
114 vma->vm_end); vb2_common_vm_close()
H A Dvideobuf-vmalloc.c54 static void videobuf_vm_open(struct vm_area_struct *vma) videobuf_vm_open() argument
56 struct videobuf_mapping *map = vma->vm_private_data; videobuf_vm_open()
58 dprintk(2, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", map, videobuf_vm_open()
59 map->count, vma->vm_start, vma->vm_end); videobuf_vm_open()
64 static void videobuf_vm_close(struct vm_area_struct *vma) videobuf_vm_close() argument
66 struct videobuf_mapping *map = vma->vm_private_data; videobuf_vm_close()
70 dprintk(2, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", map, videobuf_vm_close()
71 map->count, vma->vm_start, vma->vm_end); videobuf_vm_close()
209 The code below won't work, since mem->vma = NULL __videobuf_iolock()
212 rc = remap_vmalloc_range(mem->vma, (void *)vb->baddr, 0); __videobuf_iolock()
234 struct vm_area_struct *vma) __videobuf_mmap_mapper()
250 buf->baddr = vma->vm_start; __videobuf_mmap_mapper()
256 pages = PAGE_ALIGN(vma->vm_end - vma->vm_start); __videobuf_mmap_mapper()
265 retval = remap_vmalloc_range(vma, mem->vaddr, 0); __videobuf_mmap_mapper()
272 vma->vm_ops = &videobuf_vm_ops; __videobuf_mmap_mapper()
273 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; __videobuf_mmap_mapper()
274 vma->vm_private_data = map; __videobuf_mmap_mapper()
277 map, q, vma->vm_start, vma->vm_end, __videobuf_mmap_mapper()
279 vma->vm_pgoff, buf->i); __videobuf_mmap_mapper()
281 videobuf_vm_open(vma); __videobuf_mmap_mapper()
232 __videobuf_mmap_mapper(struct videobuf_queue *q, struct videobuf_buffer *buf, struct vm_area_struct *vma) __videobuf_mmap_mapper() argument
H A Dvideobuf-dma-contig.c66 static void videobuf_vm_open(struct vm_area_struct *vma) videobuf_vm_open() argument
68 struct videobuf_mapping *map = vma->vm_private_data; videobuf_vm_open()
70 dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", videobuf_vm_open()
71 map, map->count, vma->vm_start, vma->vm_end); videobuf_vm_open()
76 static void videobuf_vm_close(struct vm_area_struct *vma) videobuf_vm_close() argument
78 struct videobuf_mapping *map = vma->vm_private_data; videobuf_vm_close()
82 dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", videobuf_vm_close()
83 map, map->count, vma->vm_start, vma->vm_end); videobuf_vm_close()
164 struct vm_area_struct *vma; videobuf_dma_contig_user_get() local
176 vma = find_vma(mm, vb->baddr); videobuf_dma_contig_user_get()
177 if (!vma) videobuf_dma_contig_user_get()
180 if ((vb->baddr + mem->size) > vma->vm_end) videobuf_dma_contig_user_get()
188 ret = follow_pfn(vma, user_address, &this_pfn); videobuf_dma_contig_user_get()
278 struct vm_area_struct *vma) __videobuf_mmap_mapper()
295 buf->baddr = vma->vm_start; __videobuf_mmap_mapper()
306 size = vma->vm_end - vma->vm_start; __videobuf_mmap_mapper()
307 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); __videobuf_mmap_mapper()
315 vma->vm_pgoff = 0; __videobuf_mmap_mapper()
317 retval = vm_iomap_memory(vma, mem->dma_handle, size); __videobuf_mmap_mapper()
326 vma->vm_ops = &videobuf_vm_ops; __videobuf_mmap_mapper()
327 vma->vm_flags |= VM_DONTEXPAND; __videobuf_mmap_mapper()
328 vma->vm_private_data = map; __videobuf_mmap_mapper()
331 map, q, vma->vm_start, vma->vm_end, __videobuf_mmap_mapper()
332 (long int)buf->bsize, vma->vm_pgoff, buf->i); __videobuf_mmap_mapper()
334 videobuf_vm_open(vma); __videobuf_mmap_mapper()
276 __videobuf_mmap_mapper(struct videobuf_queue *q, struct videobuf_buffer *buf, struct vm_area_struct *vma) __videobuf_mmap_mapper() argument
H A Dvideobuf-dma-sg.c384 static void videobuf_vm_open(struct vm_area_struct *vma) videobuf_vm_open() argument
386 struct videobuf_mapping *map = vma->vm_private_data; videobuf_vm_open()
388 dprintk(2, "vm_open %p [count=%d,vma=%08lx-%08lx]\n", map, videobuf_vm_open()
389 map->count, vma->vm_start, vma->vm_end); videobuf_vm_open()
394 static void videobuf_vm_close(struct vm_area_struct *vma) videobuf_vm_close() argument
396 struct videobuf_mapping *map = vma->vm_private_data; videobuf_vm_close()
401 dprintk(2, "vm_close %p [count=%d,vma=%08lx-%08lx]\n", map, videobuf_vm_close()
402 map->count, vma->vm_start, vma->vm_end); videobuf_vm_close()
435 static int videobuf_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) videobuf_vm_fault() argument
439 dprintk(3, "fault: fault @ %08lx [vma %08lx-%08lx]\n", videobuf_vm_fault()
441 vma->vm_start, vma->vm_end); videobuf_vm_fault()
589 struct vm_area_struct *vma) __videobuf_mmap_mapper()
612 (vma->vm_pgoff << PAGE_SHIFT)); __videobuf_mmap_mapper()
629 q->bufs[i]->baddr = vma->vm_start + size; __videobuf_mmap_mapper()
635 vma->vm_ops = &videobuf_vm_ops; __videobuf_mmap_mapper()
636 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; __videobuf_mmap_mapper()
637 vma->vm_flags &= ~VM_IO; /* using shared anonymous pages */ __videobuf_mmap_mapper()
638 vma->vm_private_data = map; __videobuf_mmap_mapper()
640 map, q, vma->vm_start, vma->vm_end, vma->vm_pgoff, first, last); __videobuf_mmap_mapper()
587 __videobuf_mmap_mapper(struct videobuf_queue *q, struct videobuf_buffer *buf, struct vm_area_struct *vma) __videobuf_mmap_mapper() argument
/linux-4.4.14/arch/nios2/include/asm/
H A Dtlb.h19 * NiosII doesn't need any special per-pte or per-vma handling, except
22 #define tlb_start_vma(tlb, vma) \
25 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
28 #define tlb_end_vma(tlb, vma) do { } while (0)
H A Dcacheflush.h26 extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
28 extern void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
34 extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
39 extern void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
42 extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
H A Dtlbflush.h29 * - flush_tlb_page(vma, vmaddr) flushes one page
30 * - flush_tlb_range(vma, start, end) flushes a range of pages
35 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
40 static inline void flush_tlb_page(struct vm_area_struct *vma, flush_tlb_page() argument
/linux-4.4.14/arch/nios2/kernel/
H A Dsys_nios2.c24 struct vm_area_struct *vma; sys_cacheflush() local
41 vma = find_vma(current->mm, addr); sys_cacheflush()
42 if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) sys_cacheflush()
45 flush_cache_range(vma, addr, addr + len); sys_cacheflush()
/linux-4.4.14/arch/frv/include/asm/
H A Dtlb.h13 * we don't need any special per-pte or per-vma handling...
15 #define tlb_start_vma(tlb, vma) do { } while (0)
16 #define tlb_end_vma(tlb, vma) do { } while (0)
H A Dtlbflush.h42 #define flush_tlb_range(vma,start,end) \
45 __flush_tlb_range((vma)->vm_mm->context.id, start, end); \
49 #define flush_tlb_page(vma,addr) \
52 __flush_tlb_page((vma)->vm_mm->context.id, addr); \
66 #define flush_tlb_page(vma,addr) BUG()
H A Dcacheflush.h25 #define flush_cache_page(vma, vmaddr, pfn) do {} while(0)
77 extern void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
80 static inline void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, flush_icache_user_range() argument
87 static inline void flush_icache_page(struct vm_area_struct *vma, struct page *page) flush_icache_page() argument
89 flush_icache_user_range(vma, page, page_to_phys(page), PAGE_SIZE); flush_icache_page()
96 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
99 flush_icache_user_range((vma), (page), (vaddr), (len)); \
102 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
/linux-4.4.14/arch/um/include/asm/
H A Dtlbflush.h17 * - flush_tlb_page(vma, vmaddr) flushes one page
19 * - flush_tlb_range(vma, start, end) flushes a range of pages
24 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
26 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long address);
/linux-4.4.14/arch/arc/kernel/
H A Darc_hostlink.c21 static int arc_hl_mmap(struct file *fp, struct vm_area_struct *vma) arc_hl_mmap() argument
23 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); arc_hl_mmap()
25 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, arc_hl_mmap()
26 vma->vm_end - vma->vm_start, arc_hl_mmap()
27 vma->vm_page_prot)) { arc_hl_mmap()
/linux-4.4.14/arch/alpha/include/asm/
H A Dcacheflush.h10 #define flush_cache_range(vma, start, end) do { } while (0)
11 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
50 flush_icache_user_range(struct vm_area_struct *vma, struct page *page, flush_icache_user_range() argument
53 if (vma->vm_flags & VM_EXEC) { flush_icache_user_range()
54 struct mm_struct *mm = vma->vm_mm; flush_icache_user_range()
62 extern void flush_icache_user_range(struct vm_area_struct *vma,
67 #define flush_icache_page(vma, page) \
68 flush_icache_user_range((vma), (page), 0, 0)
70 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
72 flush_icache_user_range(vma, page, vaddr, len); \
74 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
H A Dtlb.h4 #define tlb_start_vma(tlb, vma) do { } while (0)
5 #define tlb_end_vma(tlb, vma) do { } while (0)
H A Dtlbflush.h39 struct vm_area_struct *vma, ev4_flush_tlb_current_page()
43 if (vma->vm_flags & VM_EXEC) { ev4_flush_tlb_current_page()
52 struct vm_area_struct *vma, ev5_flush_tlb_current_page()
55 if (vma->vm_flags & VM_EXEC) ev5_flush_tlb_current_page()
117 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) flush_tlb_page() argument
119 struct mm_struct *mm = vma->vm_mm; flush_tlb_page()
122 flush_tlb_current_page(mm, vma, addr); flush_tlb_page()
130 flush_tlb_range(struct vm_area_struct *vma, unsigned long start, flush_tlb_range() argument
133 flush_tlb_mm(vma->vm_mm); flush_tlb_range()
38 ev4_flush_tlb_current_page(struct mm_struct * mm, struct vm_area_struct *vma, unsigned long addr) ev4_flush_tlb_current_page() argument
51 ev5_flush_tlb_current_page(struct mm_struct * mm, struct vm_area_struct *vma, unsigned long addr) ev5_flush_tlb_current_page() argument
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
H A Dbase.c30 nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node) nvkm_vm_map_at() argument
32 struct nvkm_vm *vm = vma->vm; nvkm_vm_map_at()
35 int big = vma->node->type != mmu->func->spg_shift; nvkm_vm_map_at()
36 u32 offset = vma->node->offset + (delta >> 12); nvkm_vm_map_at()
37 u32 bits = vma->node->type - 12; nvkm_vm_map_at()
56 mmu->func->map(vma, pgt, node, pte, len, phys, delta); nvkm_vm_map_at()
66 delta += (u64)len << vma->node->type; nvkm_vm_map_at()
74 nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length, nvkm_vm_map_sg_table() argument
77 struct nvkm_vm *vm = vma->vm; nvkm_vm_map_sg_table()
79 int big = vma->node->type != mmu->func->spg_shift; nvkm_vm_map_sg_table()
80 u32 offset = vma->node->offset + (delta >> 12); nvkm_vm_map_sg_table()
81 u32 bits = vma->node->type - 12; nvkm_vm_map_sg_table()
82 u32 num = length >> vma->node->type; nvkm_vm_map_sg_table()
103 mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr); nvkm_vm_map_sg_table()
118 mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr); nvkm_vm_map_sg_table()
132 nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length, nvkm_vm_map_sg() argument
135 struct nvkm_vm *vm = vma->vm; nvkm_vm_map_sg()
138 int big = vma->node->type != mmu->func->spg_shift; nvkm_vm_map_sg()
139 u32 offset = vma->node->offset + (delta >> 12); nvkm_vm_map_sg()
140 u32 bits = vma->node->type - 12; nvkm_vm_map_sg()
141 u32 num = length >> vma->node->type; nvkm_vm_map_sg()
155 mmu->func->map_sg(vma, pgt, mem, pte, len, list); nvkm_vm_map_sg()
170 nvkm_vm_map(struct nvkm_vma *vma, struct nvkm_mem *node) nvkm_vm_map() argument
173 nvkm_vm_map_sg_table(vma, 0, node->size << 12, node); nvkm_vm_map()
176 nvkm_vm_map_sg(vma, 0, node->size << 12, node); nvkm_vm_map()
178 nvkm_vm_map_at(vma, 0, node); nvkm_vm_map()
182 nvkm_vm_unmap_at(struct nvkm_vma *vma, u64 delta, u64 length) nvkm_vm_unmap_at() argument
184 struct nvkm_vm *vm = vma->vm; nvkm_vm_unmap_at()
186 int big = vma->node->type != mmu->func->spg_shift; nvkm_vm_unmap_at()
187 u32 offset = vma->node->offset + (delta >> 12); nvkm_vm_unmap_at()
188 u32 bits = vma->node->type - 12; nvkm_vm_unmap_at()
189 u32 num = length >> vma->node->type; nvkm_vm_unmap_at()
203 mmu->func->unmap(vma, pgt, pte, len); nvkm_vm_unmap_at()
217 nvkm_vm_unmap(struct nvkm_vma *vma) nvkm_vm_unmap() argument
219 nvkm_vm_unmap_at(vma, 0, (u64)vma->node->length << 12); nvkm_vm_unmap()
275 struct nvkm_vma *vma) nvkm_vm_get()
285 &vma->node); nvkm_vm_get()
291 fpde = (vma->node->offset >> mmu->func->pgt_bits); nvkm_vm_get()
292 lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits; nvkm_vm_get()
296 int big = (vma->node->type != mmu->func->spg_shift); nvkm_vm_get()
303 ret = nvkm_vm_map_pgt(vm, pde, vma->node->type); nvkm_vm_get()
307 nvkm_mm_free(&vm->mm, &vma->node); nvkm_vm_get()
314 vma->vm = NULL; nvkm_vm_get()
315 nvkm_vm_ref(vm, &vma->vm, NULL); nvkm_vm_get()
316 vma->offset = (u64)vma->node->offset << 12; nvkm_vm_get()
317 vma->access = access; nvkm_vm_get()
322 nvkm_vm_put(struct nvkm_vma *vma) nvkm_vm_put() argument
328 if (unlikely(vma->node == NULL)) nvkm_vm_put()
330 vm = vma->vm; nvkm_vm_put()
333 fpde = (vma->node->offset >> mmu->func->pgt_bits); nvkm_vm_put()
334 lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits; nvkm_vm_put()
337 nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->func->spg_shift, fpde, lpde); nvkm_vm_put()
338 nvkm_mm_free(&vm->mm, &vma->node); nvkm_vm_put()
341 nvkm_vm_ref(NULL, &vma->vm, NULL); nvkm_vm_put()
274 nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access, struct nvkm_vma *vma) nvkm_vm_get() argument
/linux-4.4.14/drivers/xen/
H A Dprivcmd.c47 struct vm_area_struct *vma,
198 struct vm_area_struct *vma; member in struct:mmap_gfn_state
206 struct vm_area_struct *vma = st->vma; mmap_gfn_range() local
216 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end)) mmap_gfn_range()
219 rc = xen_remap_domain_gfn_range(vma, mmap_gfn_range()
222 vma->vm_page_prot, mmap_gfn_range()
236 struct vm_area_struct *vma; privcmd_ioctl_mmap() local
262 vma = find_vma(mm, msg->va); privcmd_ioctl_mmap()
265 if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data) privcmd_ioctl_mmap()
267 vma->vm_private_data = PRIV_VMA_LOCKED; privcmd_ioctl_mmap()
270 state.va = vma->vm_start; privcmd_ioctl_mmap()
271 state.vma = vma; privcmd_ioctl_mmap()
291 struct vm_area_struct *vma; member in struct:mmap_batch_state
315 struct vm_area_struct *vma = st->vma; mmap_batch_fn() local
316 struct page **pages = vma->vm_private_data; mmap_batch_fn()
324 ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr, mmap_batch_fn()
325 (int *)gfnp, st->vma->vm_page_prot, mmap_batch_fn()
392 * the vma with the page info to use later.
395 static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs) alloc_empty_pages() argument
411 BUG_ON(vma->vm_private_data != NULL); alloc_empty_pages()
412 vma->vm_private_data = pages; alloc_empty_pages()
424 struct vm_area_struct *vma; privcmd_ioctl_mmap_batch() local
472 vma = find_vma(mm, m.addr); privcmd_ioctl_mmap_batch()
473 if (!vma || privcmd_ioctl_mmap_batch()
474 vma->vm_ops != &privcmd_vm_ops) { privcmd_ioctl_mmap_batch()
490 if (vma->vm_private_data == NULL) { privcmd_ioctl_mmap_batch()
491 if (m.addr != vma->vm_start || privcmd_ioctl_mmap_batch()
492 m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) { privcmd_ioctl_mmap_batch()
497 ret = alloc_empty_pages(vma, nr_pages); privcmd_ioctl_mmap_batch()
501 vma->vm_private_data = PRIV_VMA_LOCKED; privcmd_ioctl_mmap_batch()
503 if (m.addr < vma->vm_start || privcmd_ioctl_mmap_batch()
504 m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) { privcmd_ioctl_mmap_batch()
508 if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) { privcmd_ioctl_mmap_batch()
515 state.vma = vma; privcmd_ioctl_mmap_batch()
582 static void privcmd_close(struct vm_area_struct *vma) privcmd_close() argument
584 struct page **pages = vma->vm_private_data; privcmd_close()
585 int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; privcmd_close()
586 int numgfns = (vma->vm_end - vma->vm_start) >> XEN_PAGE_SHIFT; privcmd_close()
592 rc = xen_unmap_domain_gfn_range(vma, numgfns, pages); privcmd_close()
601 static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf) privcmd_fault() argument
603 printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n", privcmd_fault()
604 vma, vma->vm_start, vma->vm_end, privcmd_fault()
615 static int privcmd_mmap(struct file *file, struct vm_area_struct *vma) privcmd_mmap() argument
619 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY | privcmd_mmap()
621 vma->vm_ops = &privcmd_vm_ops; privcmd_mmap()
622 vma->vm_private_data = NULL; privcmd_mmap()
639 struct vm_area_struct *vma, privcmd_vma_range_is_mapped()
643 return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT, privcmd_vma_range_is_mapped()
638 privcmd_vma_range_is_mapped( struct vm_area_struct *vma, unsigned long addr, unsigned long nr_pages) privcmd_vma_range_is_mapped() argument
H A Dgntdev.c84 struct vm_area_struct *vma; member in struct:grant_map
241 unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT; find_grant_ptes()
397 static void gntdev_vma_open(struct vm_area_struct *vma) gntdev_vma_open() argument
399 struct grant_map *map = vma->vm_private_data; gntdev_vma_open()
401 pr_debug("gntdev_vma_open %p\n", vma); gntdev_vma_open()
405 static void gntdev_vma_close(struct vm_area_struct *vma) gntdev_vma_close() argument
407 struct grant_map *map = vma->vm_private_data; gntdev_vma_close()
408 struct file *file = vma->vm_file; gntdev_vma_close()
411 pr_debug("gntdev_vma_close %p\n", vma); gntdev_vma_close()
414 * concurrently, so take priv->lock to ensure that the vma won't gntdev_vma_close()
418 * closing the vma, but it may still iterate the unmap_ops list. gntdev_vma_close()
421 map->vma = NULL; gntdev_vma_close()
424 vma->vm_private_data = NULL; gntdev_vma_close()
428 static struct page *gntdev_vma_find_special_page(struct vm_area_struct *vma, gntdev_vma_find_special_page() argument
431 struct grant_map *map = vma->vm_private_data; gntdev_vma_find_special_page()
450 if (!map->vma) unmap_if_in_range()
452 if (map->vma->vm_start >= end) unmap_if_in_range()
454 if (map->vma->vm_end <= start) unmap_if_in_range()
456 mstart = max(start, map->vma->vm_start); unmap_if_in_range()
457 mend = min(end, map->vma->vm_end); unmap_if_in_range()
460 map->vma->vm_start, map->vma->vm_end, unmap_if_in_range()
463 (mstart - map->vma->vm_start) >> PAGE_SHIFT, unmap_if_in_range()
501 if (!map->vma) mn_release()
505 map->vma->vm_start, map->vma->vm_end); mn_release()
510 if (!map->vma) mn_release()
514 map->vma->vm_start, map->vma->vm_end); mn_release()
656 struct vm_area_struct *vma; gntdev_ioctl_get_offset_for_vaddr() local
665 vma = find_vma(current->mm, op.vaddr); gntdev_ioctl_get_offset_for_vaddr()
666 if (!vma || vma->vm_ops != &gntdev_vmops) gntdev_ioctl_get_offset_for_vaddr()
669 map = vma->vm_private_data; gntdev_ioctl_get_offset_for_vaddr()
778 static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) gntdev_mmap() argument
781 int index = vma->vm_pgoff; gntdev_mmap()
782 int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; gntdev_mmap()
786 if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED)) gntdev_mmap()
790 index, count, vma->vm_start, vma->vm_pgoff); gntdev_mmap()
796 if (use_ptemod && map->vma) gntdev_mmap()
798 if (use_ptemod && priv->mm != vma->vm_mm) { gntdev_mmap()
805 vma->vm_ops = &gntdev_vmops; gntdev_mmap()
807 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO; gntdev_mmap()
810 vma->vm_flags |= VM_DONTCOPY; gntdev_mmap()
812 vma->vm_private_data = map; gntdev_mmap()
815 map->vma = vma; gntdev_mmap()
818 if ((vma->vm_flags & VM_WRITE) && gntdev_mmap()
823 if (!(vma->vm_flags & VM_WRITE)) gntdev_mmap()
830 err = apply_to_page_range(vma->vm_mm, vma->vm_start, gntdev_mmap()
831 vma->vm_end - vma->vm_start, gntdev_mmap()
845 err = vm_insert_page(vma, vma->vm_start + i*PAGE_SIZE, gntdev_mmap()
862 apply_to_page_range(vma->vm_mm, vma->vm_start, gntdev_mmap()
863 vma->vm_end - vma->vm_start, gntdev_mmap()
867 map->pages_vm_start = vma->vm_start; gntdev_mmap()
880 map->vma = NULL; gntdev_mmap()
/linux-4.4.14/arch/blackfin/include/asm/
H A Dtlb.h10 #define tlb_start_vma(tlb, vma) do { } while (0)
11 #define tlb_end_vma(tlb, vma) do { } while (0)
/linux-4.4.14/fs/ext2/
H A Dfile.c42 static int ext2_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ext2_dax_fault() argument
44 struct inode *inode = file_inode(vma->vm_file); ext2_dax_fault()
50 file_update_time(vma->vm_file); ext2_dax_fault()
54 ret = __dax_fault(vma, vmf, ext2_get_block, NULL); ext2_dax_fault()
62 static int ext2_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, ext2_dax_pmd_fault() argument
65 struct inode *inode = file_inode(vma->vm_file); ext2_dax_pmd_fault()
71 file_update_time(vma->vm_file); ext2_dax_pmd_fault()
75 ret = __dax_pmd_fault(vma, addr, pmd, flags, ext2_get_block, NULL); ext2_dax_pmd_fault()
83 static int ext2_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) ext2_dax_mkwrite() argument
85 struct inode *inode = file_inode(vma->vm_file); ext2_dax_mkwrite()
90 file_update_time(vma->vm_file); ext2_dax_mkwrite()
93 ret = __dax_mkwrite(vma, vmf, ext2_get_block, NULL); ext2_dax_mkwrite()
100 static int ext2_dax_pfn_mkwrite(struct vm_area_struct *vma, ext2_dax_pfn_mkwrite() argument
103 struct inode *inode = file_inode(vma->vm_file); ext2_dax_pfn_mkwrite()
109 file_update_time(vma->vm_file); ext2_dax_pfn_mkwrite()
129 static int ext2_file_mmap(struct file *file, struct vm_area_struct *vma) ext2_file_mmap() argument
132 return generic_file_mmap(file, vma); ext2_file_mmap()
135 vma->vm_ops = &ext2_dax_vm_ops; ext2_file_mmap()
136 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE; ext2_file_mmap()
/linux-4.4.14/fs/proc/
H A Dtask_nommu.c20 struct vm_area_struct *vma; task_mem() local
27 vma = rb_entry(p, struct vm_area_struct, vm_rb); task_mem()
29 bytes += kobjsize(vma); task_mem()
31 region = vma->vm_region; task_mem()
36 size = vma->vm_end - vma->vm_start; task_mem()
40 vma->vm_flags & VM_MAYSHARE) { task_mem()
45 slack = region->vm_end - vma->vm_end; task_mem()
82 struct vm_area_struct *vma; task_vsize() local
88 vma = rb_entry(p, struct vm_area_struct, vm_rb); task_vsize()
89 vsize += vma->vm_end - vma->vm_start; task_vsize()
99 struct vm_area_struct *vma; task_statm() local
106 vma = rb_entry(p, struct vm_area_struct, vm_rb); task_statm()
107 size += kobjsize(vma); task_statm()
108 region = vma->vm_region; task_statm()
127 struct vm_area_struct *vma, bool is_pid) pid_of_stack()
136 task = task_of_stack(task, vma, is_pid); pid_of_stack()
148 static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma, nommu_vma_show() argument
151 struct mm_struct *mm = vma->vm_mm; nommu_vma_show()
159 flags = vma->vm_flags; nommu_vma_show()
160 file = vma->vm_file; nommu_vma_show()
163 struct inode *inode = file_inode(vma->vm_file); nommu_vma_show()
166 pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT; nommu_vma_show()
172 vma->vm_start, nommu_vma_show()
173 vma->vm_end, nommu_vma_show()
185 pid_t tid = pid_of_stack(priv, vma, is_pid); nommu_vma_show()
193 if (!is_pid || (vma->vm_start <= mm->start_stack && nommu_vma_show()
194 vma->vm_end >= mm->start_stack)) nommu_vma_show()
126 pid_of_stack(struct proc_maps_private *priv, struct vm_area_struct *vma, bool is_pid) pid_of_stack() argument
H A Dtask_mmu.c129 m_next_vma(struct proc_maps_private *priv, struct vm_area_struct *vma) m_next_vma() argument
131 if (vma == priv->tail_vma) m_next_vma()
133 return vma->vm_next ?: priv->tail_vma; m_next_vma()
136 static void m_cache_vma(struct seq_file *m, struct vm_area_struct *vma) m_cache_vma() argument
138 if (m->count < m->size) /* vma is copied successfully */ m_cache_vma()
139 m->version = m_next_vma(m->private, vma) ? vma->vm_start : -1UL; m_cache_vma()
147 struct vm_area_struct *vma; m_start() local
167 vma = find_vma(mm, last_addr); m_start()
168 if (vma && (vma = m_next_vma(priv, vma))) m_start()
169 return vma; m_start()
174 for (vma = mm->mmap; pos; pos--) { m_start()
175 m->version = vma->vm_start; m_start()
176 vma = vma->vm_next; m_start()
178 return vma; m_start()
252 struct vm_area_struct *vma, bool is_pid) pid_of_stack()
261 task = task_of_stack(task, vma, is_pid); pid_of_stack()
271 show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) show_map_vma() argument
273 struct mm_struct *mm = vma->vm_mm; show_map_vma()
274 struct file *file = vma->vm_file; show_map_vma()
276 vm_flags_t flags = vma->vm_flags; show_map_vma()
284 struct inode *inode = file_inode(vma->vm_file); show_map_vma()
287 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; show_map_vma()
291 start = vma->vm_start; show_map_vma()
292 if (stack_guard_page_start(vma, start)) show_map_vma()
294 end = vma->vm_end; show_map_vma()
295 if (stack_guard_page_end(vma, end)) show_map_vma()
319 if (vma->vm_ops && vma->vm_ops->name) { show_map_vma()
320 name = vma->vm_ops->name(vma); show_map_vma()
325 name = arch_vma_name(vma); show_map_vma()
334 if (vma->vm_start <= mm->brk && show_map_vma()
335 vma->vm_end >= mm->start_brk) { show_map_vma()
340 tid = pid_of_stack(priv, vma, is_pid); show_map_vma()
346 if (!is_pid || (vma->vm_start <= mm->start_stack && show_map_vma()
347 vma->vm_end >= mm->start_stack)) { show_map_vma()
492 struct vm_area_struct *vma = walk->vma; smaps_pte_entry() local
496 page = vm_normal_page(vma, addr, *pte); smaps_pte_entry()
527 struct vm_area_struct *vma = walk->vma; smaps_pmd_entry() local
531 page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP); smaps_pmd_entry()
548 struct vm_area_struct *vma = walk->vma; smaps_pte_range() local
552 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { smaps_pte_range()
565 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); smaps_pte_range()
573 static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma) show_smap_vma_flags() argument
623 if (vma->vm_flags & (1UL << i)) { show_smap_vma_flags()
637 struct vm_area_struct *vma = walk->vma; smaps_hugetlb_range() local
641 page = vm_normal_page(vma, addr, *pte); smaps_hugetlb_range()
652 mss->shared_hugetlb += huge_page_size(hstate_vma(vma)); smaps_hugetlb_range()
654 mss->private_hugetlb += huge_page_size(hstate_vma(vma)); smaps_hugetlb_range()
662 struct vm_area_struct *vma = v; show_smap() local
669 .mm = vma->vm_mm, show_smap()
675 walk_page_vma(vma, &smaps_walk); show_smap()
677 show_map_vma(m, vma, is_pid); show_smap()
697 (vma->vm_end - vma->vm_start) >> 10, show_smap()
711 vma_kernel_pagesize(vma) >> 10, show_smap()
712 vma_mmu_pagesize(vma) >> 10, show_smap()
713 (vma->vm_flags & VM_LOCKED) ? show_smap()
716 show_smap_vma_flags(m, vma); show_smap()
717 m_cache_vma(m, vma); show_smap()
783 static inline void clear_soft_dirty(struct vm_area_struct *vma, clear_soft_dirty() argument
795 ptent = ptep_modify_prot_start(vma->vm_mm, addr, pte); clear_soft_dirty()
798 ptep_modify_prot_commit(vma->vm_mm, addr, pte, ptent); clear_soft_dirty()
801 set_pte_at(vma->vm_mm, addr, pte, ptent); clear_soft_dirty()
805 static inline void clear_soft_dirty(struct vm_area_struct *vma, clear_soft_dirty() argument
812 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, clear_soft_dirty_pmd() argument
815 pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp); clear_soft_dirty_pmd()
820 if (vma->vm_flags & VM_SOFTDIRTY) clear_soft_dirty_pmd()
821 vma->vm_flags &= ~VM_SOFTDIRTY; clear_soft_dirty_pmd()
823 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); clear_soft_dirty_pmd()
826 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, clear_soft_dirty_pmd() argument
836 struct vm_area_struct *vma = walk->vma; clear_refs_pte_range() local
841 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { clear_refs_pte_range()
843 clear_soft_dirty_pmd(vma, addr, pmd); clear_refs_pte_range()
850 pmdp_test_and_clear_young(vma, addr, pmd); clear_refs_pte_range()
861 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); clear_refs_pte_range()
866 clear_soft_dirty(vma, addr, pte); clear_refs_pte_range()
873 page = vm_normal_page(vma, addr, ptent); clear_refs_pte_range()
878 ptep_test_and_clear_young(vma, addr, pte); clear_refs_pte_range()
891 struct vm_area_struct *vma = walk->vma; clear_refs_test_walk() local
893 if (vma->vm_flags & VM_PFNMAP) clear_refs_test_walk()
902 if (cp->type == CLEAR_REFS_ANON && vma->vm_file) clear_refs_test_walk()
904 if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file) clear_refs_test_walk()
915 struct vm_area_struct *vma; clear_refs_write() local
960 for (vma = mm->mmap; vma; vma = vma->vm_next) { clear_refs_write()
961 if (!(vma->vm_flags & VM_SOFTDIRTY)) clear_refs_write()
965 for (vma = mm->mmap; vma; vma = vma->vm_next) { clear_refs_write()
966 vma->vm_flags &= ~VM_SOFTDIRTY; clear_refs_write()
967 vma_set_page_prot(vma); clear_refs_write()
1038 struct vm_area_struct *vma = find_vma(walk->mm, addr); pagemap_pte_hole() local
1043 if (vma) pagemap_pte_hole()
1044 hole_end = min(end, vma->vm_start); pagemap_pte_hole()
1054 if (!vma) pagemap_pte_hole()
1058 if (vma->vm_flags & VM_SOFTDIRTY) pagemap_pte_hole()
1060 for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) { pagemap_pte_hole()
1071 struct vm_area_struct *vma, unsigned long addr, pte_t pte) pte_to_pagemap_entry()
1080 page = vm_normal_page(vma, addr, pte); pte_to_pagemap_entry()
1099 if (vma->vm_flags & VM_SOFTDIRTY) pte_to_pagemap_entry()
1108 struct vm_area_struct *vma = walk->vma; pagemap_pmd_range() local
1115 if (pmd_trans_huge_lock(pmdp, vma, &ptl) == 1) { pagemap_pmd_range()
1119 if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(pmd)) pagemap_pmd_range()
1158 * We can assume that @vma always points to a valid one and @end never pagemap_pmd_range()
1159 * goes beyond vma->vm_end. pagemap_pmd_range()
1165 pme = pte_to_pagemap_entry(pm, vma, addr, *pte); pagemap_pmd_range()
1184 struct vm_area_struct *vma = walk->vma; pagemap_hugetlb_range() local
1189 if (vma->vm_flags & VM_SOFTDIRTY) pagemap_hugetlb_range()
1415 static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma, can_gather_numa_stats() argument
1424 page = vm_normal_page(vma, addr, pte); can_gather_numa_stats()
1440 struct vm_area_struct *vma, can_gather_numa_stats_pmd()
1449 page = vm_normal_page_pmd(vma, addr, pmd); can_gather_numa_stats_pmd()
1468 struct vm_area_struct *vma = walk->vma; gather_pte_stats() local
1474 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { gather_pte_stats()
1477 page = can_gather_numa_stats_pmd(*pmd, vma, addr); gather_pte_stats()
1490 struct page *page = can_gather_numa_stats(*pte, vma, addr); gather_pte_stats()
1534 struct vm_area_struct *vma = v; show_numa_map() local
1536 struct file *file = vma->vm_file; show_numa_map()
1537 struct mm_struct *mm = vma->vm_mm; show_numa_map()
1554 pol = __get_vma_policy(vma, vma->vm_start); show_numa_map()
1562 seq_printf(m, "%08lx %s", vma->vm_start, buffer); show_numa_map()
1567 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { show_numa_map()
1570 pid_t tid = pid_of_stack(proc_priv, vma, is_pid); show_numa_map()
1576 if (!is_pid || (vma->vm_start <= mm->start_stack && show_numa_map()
1577 vma->vm_end >= mm->start_stack)) show_numa_map()
1584 if (is_vm_hugetlb_page(vma)) show_numa_map()
1588 walk_page_vma(vma, &walk); show_numa_map()
1608 if (md->active < md->pages && !is_vm_hugetlb_page(vma)) show_numa_map()
1618 seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10); show_numa_map()
1621 m_cache_vma(m, vma); show_numa_map()
251 pid_of_stack(struct proc_maps_private *priv, struct vm_area_struct *vma, bool is_pid) pid_of_stack() argument
1070 pte_to_pagemap_entry(struct pagemapread *pm, struct vm_area_struct *vma, unsigned long addr, pte_t pte) pte_to_pagemap_entry() argument
1439 can_gather_numa_stats_pmd(pmd_t pmd, struct vm_area_struct *vma, unsigned long addr) can_gather_numa_stats_pmd() argument
/linux-4.4.14/drivers/sbus/char/
H A Dflash.c36 flash_mmap(struct file *file, struct vm_area_struct *vma) flash_mmap() argument
46 if ((vma->vm_flags & VM_READ) && flash_mmap()
47 (vma->vm_flags & VM_WRITE)) { flash_mmap()
51 if (vma->vm_flags & VM_READ) { flash_mmap()
54 } else if (vma->vm_flags & VM_WRITE) { flash_mmap()
64 if ((vma->vm_pgoff << PAGE_SHIFT) > size) flash_mmap()
66 addr = vma->vm_pgoff + (addr >> PAGE_SHIFT); flash_mmap()
68 if (vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT)) > size) flash_mmap()
69 size = vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT)); flash_mmap()
71 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); flash_mmap()
73 if (io_remap_pfn_range(vma, vma->vm_start, addr, size, vma->vm_page_prot)) flash_mmap()
/linux-4.4.14/arch/alpha/kernel/
H A Dpci-sysfs.c18 struct vm_area_struct *vma, hose_mmap_page_range()
28 vma->vm_pgoff += base >> PAGE_SHIFT; hose_mmap_page_range()
30 return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, hose_mmap_page_range()
31 vma->vm_end - vma->vm_start, hose_mmap_page_range()
32 vma->vm_page_prot); hose_mmap_page_range()
36 struct vm_area_struct *vma, int sparse) __pci_mmap_fits()
41 nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; __pci_mmap_fits()
42 start = vma->vm_pgoff; __pci_mmap_fits()
58 * @vma: struct vm_area_struct passed into the mmap
65 struct vm_area_struct *vma, int sparse) pci_mmap_resource()
80 if (!__pci_mmap_fits(pdev, i, vma, sparse)) pci_mmap_resource()
87 vma->vm_pgoff += bar.start >> (PAGE_SHIFT - (sparse ? 5 : 0)); pci_mmap_resource()
90 return hose_mmap_page_range(pdev->sysdata, vma, mmap_type, sparse); pci_mmap_resource()
95 struct vm_area_struct *vma) pci_mmap_resource_sparse()
97 return pci_mmap_resource(kobj, attr, vma, 1); pci_mmap_resource_sparse()
102 struct vm_area_struct *vma) pci_mmap_resource_dense()
104 return pci_mmap_resource(kobj, attr, vma, 0); pci_mmap_resource_dense()
253 struct vm_area_struct *vma, __legacy_mmap_fits()
258 nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; __legacy_mmap_fits()
259 start = vma->vm_pgoff; __legacy_mmap_fits()
282 int pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma, pci_mmap_legacy_page_range() argument
291 if (!__legacy_mmap_fits(hose, vma, res_size, sparse)) pci_mmap_legacy_page_range()
294 return hose_mmap_page_range(hose, vma, mmap_type, sparse); pci_mmap_legacy_page_range()
17 hose_mmap_page_range(struct pci_controller *hose, struct vm_area_struct *vma, enum pci_mmap_state mmap_type, int sparse) hose_mmap_page_range() argument
35 __pci_mmap_fits(struct pci_dev *pdev, int num, struct vm_area_struct *vma, int sparse) __pci_mmap_fits() argument
63 pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, struct vm_area_struct *vma, int sparse) pci_mmap_resource() argument
93 pci_mmap_resource_sparse(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, struct vm_area_struct *vma) pci_mmap_resource_sparse() argument
100 pci_mmap_resource_dense(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, struct vm_area_struct *vma) pci_mmap_resource_dense() argument
252 __legacy_mmap_fits(struct pci_controller *hose, struct vm_area_struct *vma, unsigned long res_size, int sparse) __legacy_mmap_fits() argument
/linux-4.4.14/drivers/gpu/drm/ttm/
H A Dttm_bo_vm.c45 struct vm_area_struct *vma, ttm_bo_vm_fault_idle()
69 up_read(&vma->vm_mm->mmap_sem); ttm_bo_vm_fault_idle()
86 static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ttm_bo_vm_fault() argument
89 vma->vm_private_data; ttm_bo_vm_fault()
117 up_read(&vma->vm_mm->mmap_sem); ttm_bo_vm_fault()
160 ret = ttm_bo_vm_fault_idle(bo, vma, vmf); ttm_bo_vm_fault()
177 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + ttm_bo_vm_fault()
178 vma->vm_pgoff - drm_vma_node_start(&bo->vma_node); ttm_bo_vm_fault()
179 page_last = vma_pages(vma) + vma->vm_pgoff - ttm_bo_vm_fault()
188 * Make a local vma copy to modify the page_prot member ttm_bo_vm_fault()
189 * and vm_flags if necessary. The vma parameter is protected ttm_bo_vm_fault()
192 cvma = *vma; ttm_bo_vm_fault()
225 page->mapping = vma->vm_file->f_mapping; ttm_bo_vm_fault()
231 if (vma->vm_flags & VM_MIXEDMAP) ttm_bo_vm_fault()
260 static void ttm_bo_vm_open(struct vm_area_struct *vma) ttm_bo_vm_open() argument
263 (struct ttm_buffer_object *)vma->vm_private_data; ttm_bo_vm_open()
265 WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping); ttm_bo_vm_open()
270 static void ttm_bo_vm_close(struct vm_area_struct *vma) ttm_bo_vm_close() argument
272 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data; ttm_bo_vm_close()
275 vma->vm_private_data = NULL; ttm_bo_vm_close()
308 int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, ttm_bo_mmap() argument
315 bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma)); ttm_bo_mmap()
328 vma->vm_ops = &ttm_bo_vm_ops; ttm_bo_mmap()
332 * vma->vm_private_data here. ttm_bo_mmap()
335 vma->vm_private_data = bo; ttm_bo_mmap()
339 * (vma->vm_flags & VM_SHARED) != 0, for performance reasons, ttm_bo_mmap()
344 vma->vm_flags |= VM_MIXEDMAP; ttm_bo_mmap()
345 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; ttm_bo_mmap()
353 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo) ttm_fbdev_mmap() argument
355 if (vma->vm_pgoff != 0) ttm_fbdev_mmap()
358 vma->vm_ops = &ttm_bo_vm_ops; ttm_fbdev_mmap()
359 vma->vm_private_data = ttm_bo_reference(bo); ttm_fbdev_mmap()
360 vma->vm_flags |= VM_MIXEDMAP; ttm_fbdev_mmap()
361 vma->vm_flags |= VM_IO | VM_DONTEXPAND; ttm_fbdev_mmap()
44 ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, struct vm_area_struct *vma, struct vm_fault *vmf) ttm_bo_vm_fault_idle() argument
/linux-4.4.14/arch/cris/arch-v32/drivers/pci/
H A Dbios.c17 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, pci_mmap_page_range() argument
25 prot = pgprot_val(vma->vm_page_prot); pci_mmap_page_range()
26 vma->vm_page_prot = __pgprot(prot); pci_mmap_page_range()
31 if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, pci_mmap_page_range()
32 vma->vm_end - vma->vm_start, pci_mmap_page_range()
33 vma->vm_page_prot)) pci_mmap_page_range()
/linux-4.4.14/drivers/staging/lustre/lustre/llite/
H A Dllite_mmap.c57 struct vm_area_struct *vma, unsigned long addr, policy_from_vma()
60 policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) + policy_from_vma()
61 (vma->vm_pgoff << PAGE_CACHE_SHIFT); policy_from_vma()
69 struct vm_area_struct *vma, *ret = NULL; our_vma() local
74 for (vma = find_vma(mm, addr); our_vma()
75 vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) { our_vma()
76 if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops && our_vma()
77 vma->vm_flags & VM_SHARED) { our_vma()
78 ret = vma; our_vma()
87 * \param vma - virtual memory area addressed to page fault
91 * \parm ra_flags - vma readahead flags.
98 ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret, ll_fault_io_init() argument
102 struct file *file = vma->vm_file; ll_fault_io_init()
132 fio->ft_executable = vma->vm_flags&VM_EXEC; ll_fault_io_init()
140 *ra_flags = vma->vm_flags & (VM_RAND_READ|VM_SEQ_READ); ll_fault_io_init()
141 vma->vm_flags &= ~VM_SEQ_READ; ll_fault_io_init()
142 vma->vm_flags |= VM_RAND_READ; ll_fault_io_init()
144 CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags, ll_fault_io_init()
169 static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage, ll_page_mkwrite0() argument
183 io = ll_fault_io_init(vma, &env, &nest, vmpage->index, NULL); ll_page_mkwrite0()
197 vio->u.fault.ft_vma = vma; ll_page_mkwrite0()
216 struct inode *inode = file_inode(vma->vm_file); ll_page_mkwrite0()
284 * \param vma - is virtual area struct related to page fault
291 static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf) ll_fault0() argument
302 io = ll_fault_io_init(vma, &env, &nest, vmf->pgoff, &ra_flags); ll_fault0()
309 vio->u.fault.ft_vma = vma; ll_fault0()
331 vma->vm_flags |= ra_flags; ll_fault0()
340 static int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ll_fault() argument
353 result = ll_fault0(vma, vmf); ll_fault()
380 static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) ll_page_mkwrite() argument
389 result = ll_page_mkwrite0(vma, vmf->page, &retry); ll_page_mkwrite()
394 file_inode(vma->vm_file)->i_ino); ll_page_mkwrite()
424 * we track the mapped vma count in ccc_object::cob_mmap_cnt.
426 static void ll_vm_open(struct vm_area_struct *vma) ll_vm_open() argument
428 struct inode *inode = file_inode(vma->vm_file); ll_vm_open()
431 LASSERT(vma->vm_file); ll_vm_open()
439 static void ll_vm_close(struct vm_area_struct *vma) ll_vm_close() argument
441 struct inode *inode = file_inode(vma->vm_file); ll_vm_close()
444 LASSERT(vma->vm_file); ll_vm_close()
472 int ll_file_mmap(struct file *file, struct vm_area_struct *vma) ll_file_mmap() argument
481 rc = generic_file_mmap(file, vma); ll_file_mmap()
483 vma->vm_ops = &ll_file_vm_ops; ll_file_mmap()
484 vma->vm_ops->open(vma); ll_file_mmap()
56 policy_from_vma(ldlm_policy_data_t *policy, struct vm_area_struct *vma, unsigned long addr, size_t count) policy_from_vma() argument
/linux-4.4.14/drivers/gpu/drm/i915/
H A Di915_gem_evict.c37 mark_free(struct i915_vma *vma, struct list_head *unwind) mark_free() argument
39 if (vma->pin_count) mark_free()
42 if (WARN_ON(!list_empty(&vma->exec_list))) mark_free()
45 list_add(&vma->exec_list, unwind); mark_free()
46 return drm_mm_scan_add_block(&vma->node); mark_free()
64 * This function is used by the object/vma binding code.
80 struct i915_vma *vma; i915_gem_evict_something() local
119 list_for_each_entry(vma, &vm->inactive_list, mm_list) { i915_gem_evict_something()
120 if (mark_free(vma, &unwind_list)) i915_gem_evict_something()
128 list_for_each_entry(vma, &vm->active_list, mm_list) { i915_gem_evict_something()
129 if (mark_free(vma, &unwind_list)) i915_gem_evict_something()
136 vma = list_first_entry(&unwind_list, i915_gem_evict_something()
139 ret = drm_mm_scan_remove_block(&vma->node); i915_gem_evict_something()
142 list_del_init(&vma->exec_list); i915_gem_evict_something()
173 vma = list_first_entry(&unwind_list, i915_gem_evict_something()
176 if (drm_mm_scan_remove_block(&vma->node)) { i915_gem_evict_something()
177 list_move(&vma->exec_list, &eviction_list); i915_gem_evict_something()
178 drm_gem_object_reference(&vma->obj->base); i915_gem_evict_something()
181 list_del_init(&vma->exec_list); i915_gem_evict_something()
187 vma = list_first_entry(&eviction_list, i915_gem_evict_something()
191 obj = &vma->obj->base; i915_gem_evict_something()
192 list_del_init(&vma->exec_list); i915_gem_evict_something()
194 ret = i915_vma_unbind(vma); i915_gem_evict_something()
218 struct i915_vma *vma, *next; i915_gem_evict_vm() local
234 list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list) i915_gem_evict_vm()
235 if (vma->pin_count == 0) i915_gem_evict_vm()
236 WARN_ON(i915_vma_unbind(vma)); i915_gem_evict_vm()
H A Di915_gem_execbuffer.c132 struct i915_vma *vma; eb_lookup_vmas() local
142 * lookup_or_create exists as an interface to get at the vma eb_lookup_vmas()
146 vma = i915_gem_obj_lookup_or_create_vma(obj, vm); eb_lookup_vmas()
147 if (IS_ERR(vma)) { eb_lookup_vmas()
149 ret = PTR_ERR(vma); eb_lookup_vmas()
154 list_add_tail(&vma->exec_list, &eb->vmas); eb_lookup_vmas()
157 vma->exec_entry = &exec[i]; eb_lookup_vmas()
159 eb->lut[i] = vma; eb_lookup_vmas()
162 vma->exec_handle = handle; eb_lookup_vmas()
163 hlist_add_head(&vma->exec_node, eb_lookup_vmas()
200 struct i915_vma *vma; hlist_for_each() local
202 vma = hlist_entry(node, struct i915_vma, exec_node); hlist_for_each()
203 if (vma->exec_handle == handle) hlist_for_each()
204 return vma; hlist_for_each()
211 i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma) i915_gem_execbuffer_unreserve_vma() argument
214 struct drm_i915_gem_object *obj = vma->obj; i915_gem_execbuffer_unreserve_vma()
216 if (!drm_mm_node_allocated(&vma->node)) i915_gem_execbuffer_unreserve_vma()
219 entry = vma->exec_entry; i915_gem_execbuffer_unreserve_vma()
225 vma->pin_count--; i915_gem_execbuffer_unreserve_vma()
233 struct i915_vma *vma; eb_destroy() local
235 vma = list_first_entry(&eb->vmas, eb_destroy()
238 list_del_init(&vma->exec_list); eb_destroy()
239 i915_gem_execbuffer_unreserve_vma(vma); eb_destroy()
240 drm_gem_object_unreference(&vma->obj->base); eb_destroy()
486 i915_gem_execbuffer_relocate_vma(struct i915_vma *vma, i915_gem_execbuffer_relocate_vma() argument
492 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; i915_gem_execbuffer_relocate_vma()
511 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r); i915_gem_execbuffer_relocate_vma()
532 i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma, i915_gem_execbuffer_relocate_vma_slow() argument
536 const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; i915_gem_execbuffer_relocate_vma_slow()
540 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]); i915_gem_execbuffer_relocate_vma_slow()
551 struct i915_vma *vma; i915_gem_execbuffer_relocate() local
562 list_for_each_entry(vma, &eb->vmas, exec_list) { i915_gem_execbuffer_relocate()
563 ret = i915_gem_execbuffer_relocate_vma(vma, eb); i915_gem_execbuffer_relocate()
579 i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, i915_gem_execbuffer_reserve_vma() argument
583 struct drm_i915_gem_object *obj = vma->obj; i915_gem_execbuffer_reserve_vma()
584 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; i915_gem_execbuffer_reserve_vma()
592 if (!drm_mm_node_allocated(&vma->node)) { i915_gem_execbuffer_reserve_vma()
606 ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags); i915_gem_execbuffer_reserve_vma()
609 ret = i915_gem_object_pin(obj, vma->vm, i915_gem_execbuffer_reserve_vma()
626 if (entry->offset != vma->node.start) { i915_gem_execbuffer_reserve_vma()
627 entry->offset = vma->node.start; i915_gem_execbuffer_reserve_vma()
640 need_reloc_mappable(struct i915_vma *vma) need_reloc_mappable() argument
642 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; need_reloc_mappable()
647 if (!i915_is_ggtt(vma->vm)) need_reloc_mappable()
651 if (HAS_LLC(vma->obj->base.dev)) need_reloc_mappable()
654 if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU) need_reloc_mappable()
661 eb_vma_misplaced(struct i915_vma *vma) eb_vma_misplaced() argument
663 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; eb_vma_misplaced()
664 struct drm_i915_gem_object *obj = vma->obj; eb_vma_misplaced()
667 !i915_is_ggtt(vma->vm)); eb_vma_misplaced()
670 vma->node.start & (entry->alignment - 1)) eb_vma_misplaced()
674 vma->node.start < BATCH_OFFSET_BIAS) eb_vma_misplaced()
682 (vma->node.start + vma->node.size - 1) >> 32) eb_vma_misplaced()
695 struct i915_vma *vma; i915_gem_execbuffer_reserve() local
710 vma = list_first_entry(vmas, struct i915_vma, exec_list); i915_gem_execbuffer_reserve()
711 obj = vma->obj; i915_gem_execbuffer_reserve()
712 entry = vma->exec_entry; i915_gem_execbuffer_reserve()
722 need_mappable = need_fence || need_reloc_mappable(vma); i915_gem_execbuffer_reserve()
726 list_move(&vma->exec_list, &ordered_vmas); i915_gem_execbuffer_reserve()
728 list_move_tail(&vma->exec_list, &ordered_vmas); i915_gem_execbuffer_reserve()
752 list_for_each_entry(vma, vmas, exec_list) { list_for_each_entry()
753 if (!drm_mm_node_allocated(&vma->node)) list_for_each_entry()
756 if (eb_vma_misplaced(vma)) list_for_each_entry()
757 ret = i915_vma_unbind(vma); list_for_each_entry()
759 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); list_for_each_entry()
765 list_for_each_entry(vma, vmas, exec_list) { list_for_each_entry()
766 if (drm_mm_node_allocated(&vma->node)) list_for_each_entry()
769 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); list_for_each_entry()
779 list_for_each_entry(vma, vmas, exec_list)
780 i915_gem_execbuffer_unreserve_vma(vma);
799 struct i915_vma *vma; i915_gem_execbuffer_relocate_slow() local
809 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list); i915_gem_execbuffer_relocate_slow()
810 list_del_init(&vma->exec_list); i915_gem_execbuffer_relocate_slow()
811 i915_gem_execbuffer_unreserve_vma(vma); i915_gem_execbuffer_relocate_slow()
812 drm_gem_object_unreference(&vma->obj->base); i915_gem_execbuffer_relocate_slow()
885 list_for_each_entry(vma, &eb->vmas, exec_list) { i915_gem_execbuffer_relocate_slow()
886 int offset = vma->exec_entry - exec; i915_gem_execbuffer_relocate_slow()
887 ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb, i915_gem_execbuffer_relocate_slow()
910 struct i915_vma *vma; i915_gem_execbuffer_move_to_gpu() local
915 list_for_each_entry(vma, vmas, exec_list) { list_for_each_entry()
916 struct drm_i915_gem_object *obj = vma->obj; list_for_each_entry()
1052 struct i915_vma *vma; i915_gem_execbuffer_move_to_active() local
1054 list_for_each_entry(vma, vmas, exec_list) { list_for_each_entry()
1055 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; list_for_each_entry()
1056 struct drm_i915_gem_object *obj = vma->obj; list_for_each_entry()
1066 i915_vma_move_to_active(vma, req); list_for_each_entry()
1136 struct i915_vma *vma; i915_gem_execbuffer_parse() local
1161 vma = i915_gem_obj_to_ggtt(shadow_batch_obj); i915_gem_execbuffer_parse()
1162 vma->exec_entry = shadow_exec_entry; i915_gem_execbuffer_parse()
1163 vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN; i915_gem_execbuffer_parse()
1165 list_add_tail(&vma->exec_list, &eb->vmas); i915_gem_execbuffer_parse()
1309 struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list); eb_get_batch() local
1320 vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS; eb_get_batch()
1322 return vma->obj; eb_get_batch()
1579 * batch vma for correctness. For less ugly and less fragility this i915_gem_do_execbuffer()
1580 * needs to be adjusted to also track the ggtt batch vma properly as i915_gem_do_execbuffer()
/linux-4.4.14/drivers/gpu/drm/udl/
H A Dudl_gem.c61 struct vm_area_struct *vma) update_vm_cache_attr()
67 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); update_vm_cache_attr()
69 vma->vm_page_prot = update_vm_cache_attr()
70 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); update_vm_cache_attr()
72 vma->vm_page_prot = update_vm_cache_attr()
73 pgprot_noncached(vm_get_page_prot(vma->vm_flags)); update_vm_cache_attr()
87 int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) udl_drm_gem_mmap() argument
91 ret = drm_gem_mmap(filp, vma); udl_drm_gem_mmap()
95 vma->vm_flags &= ~VM_PFNMAP; udl_drm_gem_mmap()
96 vma->vm_flags |= VM_MIXEDMAP; udl_drm_gem_mmap()
98 update_vm_cache_attr(to_udl_bo(vma->vm_private_data), vma); udl_drm_gem_mmap()
103 int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) udl_gem_fault() argument
105 struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data); udl_gem_fault()
110 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> udl_gem_fault()
117 ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page); udl_gem_fault()
60 update_vm_cache_attr(struct udl_gem_object *obj, struct vm_area_struct *vma) update_vm_cache_attr() argument
/linux-4.4.14/arch/microblaze/include/asm/
H A Dtlb.h19 #define tlb_start_vma(tlb, vma) do { } while (0)
20 #define tlb_end_vma(tlb, vma) do { } while (0)
H A Dtlbflush.h33 static inline void local_flush_tlb_page(struct vm_area_struct *vma, local_flush_tlb_page() argument
36 static inline void local_flush_tlb_range(struct vm_area_struct *vma, local_flush_tlb_range() argument
42 #define update_mmu_cache(vma, addr, ptep) do { } while (0)
62 #define flush_tlb_page(vma, addr) BUG()
H A Dcacheflush.h64 #define flush_icache_user_range(vma, pg, adr, len) flush_icache();
65 #define flush_icache_page(vma, pg) do { } while (0)
92 #define flush_cache_page(vma, vmaddr, pfn) \
97 #define flush_cache_range(vma, start, len) { \
103 #define flush_cache_range(vma, start, len) do { } while (0)
105 static inline void copy_to_user_page(struct vm_area_struct *vma, copy_to_user_page() argument
111 if (vma->vm_flags & VM_EXEC) { copy_to_user_page()
117 static inline void copy_from_user_page(struct vm_area_struct *vma, copy_from_user_page() argument
/linux-4.4.14/drivers/misc/mic/scif/
H A Dscif_mmap.c23 * @vma: VM area struct
27 struct vm_area_struct *vma; member in struct:scif_vma_info
79 struct vm_area_struct *vma; __scif_zap_mmaps() local
85 vma = info->vma; __scif_zap_mmaps()
86 size = vma->vm_end - vma->vm_start; __scif_zap_mmaps()
87 zap_vma_ptes(vma, vma->vm_start, size); __scif_zap_mmaps()
89 "%s ep %p zap vma %p size 0x%lx\n", __scif_zap_mmaps()
90 __func__, ep, info->vma, size); __scif_zap_mmaps()
169 static int scif_insert_vma(struct scif_endpt *ep, struct vm_area_struct *vma) scif_insert_vma() argument
179 info->vma = vma; scif_insert_vma()
188 static void scif_delete_vma(struct scif_endpt *ep, struct vm_area_struct *vma) scif_delete_vma() argument
196 if (info->vma == vma) { scif_delete_vma()
382 int nr_pages, struct vm_area_struct *vma) scif_rma_list_mmap()
407 err = remap_pfn_range(vma, list_for_each_entry_from()
408 vma->vm_start + list_for_each_entry_from()
412 vma->vm_page_prot); list_for_each_entry_from()
508 * else the offset within the vma is
523 * @vma: VMM memory area.
533 static void scif_vma_open(struct vm_area_struct *vma) scif_vma_open() argument
535 struct vma_pvt *vmapvt = vma->vm_private_data; scif_vma_open()
538 "SCIFAPI vma open: vma_start 0x%lx vma_end 0x%lx\n", scif_vma_open()
539 vma->vm_start, vma->vm_end); scif_vma_open()
540 scif_insert_vma(vmapvt->ep, vma); scif_vma_open()
546 * @vma: VMM memory area.
551 static void scif_munmap(struct vm_area_struct *vma) scif_munmap() argument
554 struct vma_pvt *vmapvt = vma->vm_private_data; scif_munmap()
555 int nr_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; scif_munmap()
564 vma->vm_start, vma->vm_end); scif_munmap()
567 (vma->vm_pgoff) << PAGE_SHIFT; scif_munmap()
573 req.nr_bytes = vma->vm_end - vma->vm_start; scif_munmap()
574 req.prot = vma->vm_flags & (VM_READ | VM_WRITE); scif_munmap()
592 vma->vm_ops = NULL; scif_munmap()
593 vma->vm_private_data = NULL; scif_munmap()
595 scif_delete_vma(ep, vma); scif_munmap()
605 * @vma: VMM memory area.
611 int scif_mmap(struct vm_area_struct *vma, scif_epd_t epd) scif_mmap() argument
616 s64 start_offset = vma->vm_pgoff << PAGE_SHIFT; scif_mmap()
617 int nr_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; scif_mmap()
630 err = scif_insert_vma(ep, vma); scif_mmap()
636 scif_delete_vma(ep, vma); scif_mmap()
645 req.nr_bytes = vma->vm_end - vma->vm_start; scif_mmap()
646 req.prot = vma->vm_flags & (VM_READ | VM_WRITE); scif_mmap()
661 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); scif_mmap()
664 * VM_DONTCOPY - Do not copy this vma on fork scif_mmap()
675 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP; scif_mmap()
678 vma->vm_flags |= VM_IO | VM_PFNMAP; scif_mmap()
681 err = scif_rma_list_mmap(window, start_offset, nr_pages, vma); scif_mmap()
688 vma->vm_ops = &scif_vm_ops; scif_mmap()
689 vma->vm_private_data = vmapvt; scif_mmap()
696 scif_delete_vma(ep, vma); scif_mmap()
381 scif_rma_list_mmap(struct scif_window *start_window, s64 offset, int nr_pages, struct vm_area_struct *vma) scif_rma_list_mmap() argument
/linux-4.4.14/fs/
H A Duserfaultfd.c260 int handle_userfault(struct vm_area_struct *vma, unsigned long address, handle_userfault() argument
263 struct mm_struct *mm = vma->vm_mm; handle_userfault()
272 ctx = vma->vm_userfaultfd_ctx.ctx; handle_userfault()
430 struct vm_area_struct *vma, *prev; userfaultfd_release() local
440 * userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx userfaultfd_release()
447 for (vma = mm->mmap; vma; vma = vma->vm_next) { userfaultfd_release()
449 BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^ userfaultfd_release()
450 !!(vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP))); userfaultfd_release()
451 if (vma->vm_userfaultfd_ctx.ctx != ctx) { userfaultfd_release()
452 prev = vma; userfaultfd_release()
455 new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP); userfaultfd_release()
456 prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end, userfaultfd_release()
457 new_flags, vma->anon_vma, userfaultfd_release()
458 vma->vm_file, vma->vm_pgoff, userfaultfd_release()
459 vma_policy(vma), userfaultfd_release()
462 vma = prev; userfaultfd_release()
464 prev = vma; userfaultfd_release()
465 vma->vm_flags = new_flags; userfaultfd_release()
466 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; userfaultfd_release()
721 struct vm_area_struct *vma, *prev, *cur; userfaultfd_register() local
764 vma = find_vma_prev(mm, start, &prev); userfaultfd_register()
767 if (!vma) userfaultfd_register()
770 /* check that there's at least one vma in the range */ userfaultfd_register()
772 if (vma->vm_start >= end) userfaultfd_register()
783 for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) { userfaultfd_register()
795 * Check that this vma isn't already owned by a userfaultfd_register()
797 * userfaultfd to own a single vma simultaneously or we userfaultfd_register()
809 if (vma->vm_start < start) userfaultfd_register()
810 prev = vma; userfaultfd_register()
816 BUG_ON(vma->vm_ops); userfaultfd_register()
817 BUG_ON(vma->vm_userfaultfd_ctx.ctx && userfaultfd_register()
818 vma->vm_userfaultfd_ctx.ctx != ctx); userfaultfd_register()
821 * Nothing to do: this vma is already registered into this userfaultfd_register()
824 if (vma->vm_userfaultfd_ctx.ctx == ctx && userfaultfd_register()
825 (vma->vm_flags & vm_flags) == vm_flags) userfaultfd_register()
828 if (vma->vm_start > start) userfaultfd_register()
829 start = vma->vm_start; userfaultfd_register()
830 vma_end = min(end, vma->vm_end); userfaultfd_register()
832 new_flags = (vma->vm_flags & ~vm_flags) | vm_flags; userfaultfd_register()
834 vma->anon_vma, vma->vm_file, vma->vm_pgoff, userfaultfd_register()
835 vma_policy(vma), userfaultfd_register()
838 vma = prev; userfaultfd_register()
841 if (vma->vm_start < start) { userfaultfd_register()
842 ret = split_vma(mm, vma, start, 1); userfaultfd_register()
846 if (vma->vm_end > end) { userfaultfd_register()
847 ret = split_vma(mm, vma, end, 0); userfaultfd_register()
854 * the next vma was merged into the current one and userfaultfd_register()
857 vma->vm_flags = new_flags; userfaultfd_register()
858 vma->vm_userfaultfd_ctx.ctx = ctx; userfaultfd_register()
861 prev = vma; userfaultfd_register()
862 start = vma->vm_end; userfaultfd_register()
863 vma = vma->vm_next; userfaultfd_register()
864 } while (vma && vma->vm_start < end); userfaultfd_register()
885 struct vm_area_struct *vma, *prev, *cur; userfaultfd_unregister() local
906 vma = find_vma_prev(mm, start, &prev); userfaultfd_unregister()
909 if (!vma) userfaultfd_unregister()
912 /* check that there's at least one vma in the range */ userfaultfd_unregister()
914 if (vma->vm_start >= end) userfaultfd_unregister()
926 for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) { userfaultfd_unregister()
946 if (vma->vm_start < start) userfaultfd_unregister()
947 prev = vma; userfaultfd_unregister()
953 BUG_ON(vma->vm_ops); userfaultfd_unregister()
956 * Nothing to do: this vma is already registered into this userfaultfd_unregister()
959 if (!vma->vm_userfaultfd_ctx.ctx) userfaultfd_unregister()
962 if (vma->vm_start > start) userfaultfd_unregister()
963 start = vma->vm_start; userfaultfd_unregister()
964 vma_end = min(end, vma->vm_end); userfaultfd_unregister()
966 new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP); userfaultfd_unregister()
968 vma->anon_vma, vma->vm_file, vma->vm_pgoff, userfaultfd_unregister()
969 vma_policy(vma), userfaultfd_unregister()
972 vma = prev; userfaultfd_unregister()
975 if (vma->vm_start < start) { userfaultfd_unregister()
976 ret = split_vma(mm, vma, start, 1); userfaultfd_unregister()
980 if (vma->vm_end > end) { userfaultfd_unregister()
981 ret = split_vma(mm, vma, end, 0); userfaultfd_unregister()
988 * the next vma was merged into the current one and userfaultfd_unregister()
991 vma->vm_flags = new_flags; userfaultfd_unregister()
992 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; userfaultfd_unregister()
995 prev = vma; userfaultfd_unregister()
996 start = vma->vm_end; userfaultfd_unregister()
997 vma = vma->vm_next; userfaultfd_unregister()
998 } while (vma && vma->vm_start < end); userfaultfd_unregister()
/linux-4.4.14/drivers/char/
H A Dmspec.c83 * structure is pointed to by the vma->vm_private_data field in the vma struct.
85 * This structure is shared by all vma's that are split off from the
86 * original vma when split_vma()'s are done.
145 mspec_open(struct vm_area_struct *vma) mspec_open() argument
149 vdata = vma->vm_private_data; mspec_open()
157 * belonging to all the vma's sharing this vma_data structure.
160 mspec_close(struct vm_area_struct *vma) mspec_close() argument
166 vdata = vma->vm_private_data; mspec_close()
200 mspec_fault(struct vm_area_struct *vma, struct vm_fault *vmf) mspec_fault() argument
205 struct vma_data *vdata = vma->vm_private_data; mspec_fault()
236 vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); mspec_fault()
250 * Called when mmapping the device. Initializes the vma with a fault handler
255 mspec_mmap(struct file *file, struct vm_area_struct *vma, mspec_mmap() argument
261 if (vma->vm_pgoff != 0) mspec_mmap()
264 if ((vma->vm_flags & VM_SHARED) == 0) mspec_mmap()
267 if ((vma->vm_flags & VM_WRITE) == 0) mspec_mmap()
270 pages = vma_pages(vma); mspec_mmap()
281 vdata->vm_start = vma->vm_start; mspec_mmap()
282 vdata->vm_end = vma->vm_end; mspec_mmap()
287 vma->vm_private_data = vdata; mspec_mmap()
289 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; mspec_mmap()
291 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); mspec_mmap()
292 vma->vm_ops = &mspec_vm_ops; mspec_mmap()
298 fetchop_mmap(struct file *file, struct vm_area_struct *vma) fetchop_mmap() argument
300 return mspec_mmap(file, vma, MSPEC_FETCHOP); fetchop_mmap()
304 cached_mmap(struct file *file, struct vm_area_struct *vma) cached_mmap() argument
306 return mspec_mmap(file, vma, MSPEC_CACHED); cached_mmap()
310 uncached_mmap(struct file *file, struct vm_area_struct *vma) uncached_mmap() argument
312 return mspec_mmap(file, vma, MSPEC_UNCACHED); uncached_mmap()
H A Duv_mmtimer.c43 static int uv_mmtimer_mmap(struct file *file, struct vm_area_struct *vma);
142 * @vma: VMA to map the registers into
147 static int uv_mmtimer_mmap(struct file *file, struct vm_area_struct *vma) uv_mmtimer_mmap() argument
151 if (vma->vm_end - vma->vm_start != PAGE_SIZE) uv_mmtimer_mmap()
154 if (vma->vm_flags & VM_WRITE) uv_mmtimer_mmap()
160 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); uv_mmtimer_mmap()
166 if (remap_pfn_range(vma, vma->vm_start, uv_mmtimer_addr >> PAGE_SHIFT, uv_mmtimer_mmap()
167 PAGE_SIZE, vma->vm_page_prot)) { uv_mmtimer_mmap()
/linux-4.4.14/arch/tile/kernel/
H A Dtlb.c53 void flush_tlb_page_mm(struct vm_area_struct *vma, struct mm_struct *mm, flush_tlb_page_mm() argument
56 unsigned long size = vma_kernel_pagesize(vma); flush_tlb_page_mm()
57 int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0; flush_tlb_page_mm()
62 void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) flush_tlb_page() argument
64 flush_tlb_page_mm(vma, vma->vm_mm, va); flush_tlb_page()
68 void flush_tlb_range(struct vm_area_struct *vma, flush_tlb_range() argument
71 unsigned long size = vma_kernel_pagesize(vma); flush_tlb_range()
72 struct mm_struct *mm = vma->vm_mm; flush_tlb_range()
73 int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0; flush_tlb_range()
/linux-4.4.14/arch/arm/kernel/
H A Dsmp_tlb.c156 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) flush_tlb_page() argument
160 ta.ta_vma = vma; flush_tlb_page()
162 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, flush_tlb_page()
165 __flush_tlb_page(vma, uaddr); flush_tlb_page()
166 broadcast_tlb_mm_a15_erratum(vma->vm_mm); flush_tlb_page()
180 void flush_tlb_range(struct vm_area_struct *vma, flush_tlb_range() argument
185 ta.ta_vma = vma; flush_tlb_range()
188 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, flush_tlb_range()
191 local_flush_tlb_range(vma, start, end); flush_tlb_range()
192 broadcast_tlb_mm_a15_erratum(vma->vm_mm); flush_tlb_range()
/linux-4.4.14/arch/ia64/mm/
H A Dfault.c83 struct vm_area_struct *vma, *prev_vma; ia64_do_page_fault() local
106 * is no vma for region 5 addr's anyway, so skip getting the semaphore ia64_do_page_fault()
127 vma = find_vma_prev(mm, address, &prev_vma); ia64_do_page_fault()
128 if (!vma && !prev_vma ) ia64_do_page_fault()
132 * find_vma_prev() returns vma such that address < vma->vm_end or NULL ia64_do_page_fault()
134 * May find no vma, but could be that the last vm area is the ia64_do_page_fault()
136 * this case vma will be null, but prev_vma will ne non-null ia64_do_page_fault()
138 if (( !vma && prev_vma ) || (address < vma->vm_start) ) ia64_do_page_fault()
151 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE)))) ia64_do_page_fault()
154 if ((vma->vm_flags & mask) != mask) ia64_do_page_fault()
162 fault = handle_mm_fault(mm, vma, address, flags); ia64_do_page_fault()
207 if (!vma) ia64_do_page_fault()
209 if (!(vma->vm_flags & VM_GROWSDOWN)) ia64_do_page_fault()
211 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) ia64_do_page_fault()
214 if (expand_stack(vma, address)) ia64_do_page_fault()
217 vma = prev_vma; ia64_do_page_fault()
218 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) ia64_do_page_fault()
225 if (address > vma->vm_end + PAGE_SIZE - sizeof(long)) ia64_do_page_fault()
227 if (expand_upwards(vma, address)) ia64_do_page_fault()
273 * Since we have no vma's for region 5, we might get here even if the address is ia64_do_page_fault()
/linux-4.4.14/arch/um/drivers/
H A Dmmapper_kern.c48 static int mmapper_mmap(struct file *file, struct vm_area_struct *vma) mmapper_mmap() argument
53 if (vma->vm_pgoff != 0) mmapper_mmap()
56 size = vma->vm_end - vma->vm_start; mmapper_mmap()
64 if (remap_pfn_range(vma, vma->vm_start, p_buf >> PAGE_SHIFT, size, mmapper_mmap()
65 vma->vm_page_prot)) mmapper_mmap()
/linux-4.4.14/arch/unicore32/mm/
H A Dflush.c23 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, flush_cache_range() argument
26 if (vma->vm_flags & VM_EXEC) flush_cache_range()
30 void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, flush_cache_page() argument
35 static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, flush_ptrace_access() argument
39 if (vma->vm_flags & VM_EXEC) { flush_ptrace_access()
53 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, copy_to_user_page() argument
58 flush_ptrace_access(vma, page, uaddr, dst, len); copy_to_user_page()
/linux-4.4.14/arch/parisc/kernel/
H A Dcache.c78 update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) update_mmu_cache() argument
276 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, __flush_cache_page() argument
281 if (vma->vm_flags & VM_EXEC) __flush_cache_page()
491 struct vm_area_struct *vma; mm_total_size() local
494 for (vma = mm->mmap; vma; vma = vma->vm_next) mm_total_size()
495 usize += vma->vm_end - vma->vm_start; mm_total_size()
516 struct vm_area_struct *vma; flush_cache_mm() local
527 for (vma = mm->mmap; vma; vma = vma->vm_next) { flush_cache_mm()
528 flush_user_dcache_range_asm(vma->vm_start, vma->vm_end); flush_cache_mm()
529 if ((vma->vm_flags & VM_EXEC) == 0) flush_cache_mm()
531 flush_user_icache_range_asm(vma->vm_start, vma->vm_end); flush_cache_mm()
537 for (vma = mm->mmap; vma; vma = vma->vm_next) { flush_cache_mm()
540 for (addr = vma->vm_start; addr < vma->vm_end; flush_cache_mm()
549 __flush_cache_page(vma, addr, PFN_PHYS(pfn)); flush_cache_mm()
572 void flush_cache_range(struct vm_area_struct *vma, flush_cache_range() argument
578 BUG_ON(!vma->vm_mm->context); flush_cache_range()
585 if (vma->vm_mm->context == mfsp(3)) { flush_cache_range()
587 if (vma->vm_flags & VM_EXEC) flush_cache_range()
592 pgd = vma->vm_mm->pgd; flush_cache_range()
600 __flush_cache_page(vma, addr, PFN_PHYS(pfn)); flush_cache_range()
605 flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) flush_cache_page() argument
607 BUG_ON(!vma->vm_mm->context); flush_cache_page()
610 flush_tlb_page(vma, vmaddr); flush_cache_page()
611 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); flush_cache_page()
/linux-4.4.14/drivers/staging/rdma/ipath/
H A Dipath_mmap.c64 static void ipath_vma_open(struct vm_area_struct *vma) ipath_vma_open() argument
66 struct ipath_mmap_info *ip = vma->vm_private_data; ipath_vma_open()
71 static void ipath_vma_close(struct vm_area_struct *vma) ipath_vma_close() argument
73 struct ipath_mmap_info *ip = vma->vm_private_data; ipath_vma_close()
86 * @vma: the VMA to be initialized
89 int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) ipath_mmap() argument
92 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; ipath_mmap()
93 unsigned long size = vma->vm_end - vma->vm_start; ipath_mmap()
115 ret = remap_vmalloc_range(vma, ip->obj, 0); ipath_mmap()
118 vma->vm_ops = &ipath_vm_ops; ipath_mmap()
119 vma->vm_private_data = ip; ipath_mmap()
120 ipath_vma_open(vma); ipath_mmap()
/linux-4.4.14/drivers/infiniband/hw/qib/
H A Dqib_mmap.c64 static void qib_vma_open(struct vm_area_struct *vma) qib_vma_open() argument
66 struct qib_mmap_info *ip = vma->vm_private_data; qib_vma_open()
71 static void qib_vma_close(struct vm_area_struct *vma) qib_vma_close() argument
73 struct qib_mmap_info *ip = vma->vm_private_data; qib_vma_close()
86 * @vma: the VMA to be initialized
89 int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) qib_mmap() argument
92 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; qib_mmap()
93 unsigned long size = vma->vm_end - vma->vm_start; qib_mmap()
115 ret = remap_vmalloc_range(vma, ip->obj, 0); qib_mmap()
118 vma->vm_ops = &qib_vm_ops; qib_mmap()
119 vma->vm_private_data = ip; qib_mmap()
120 qib_vma_open(vma); qib_mmap()
/linux-4.4.14/arch/c6x/include/asm/
H A Dcacheflush.h29 #define flush_cache_page(vma, vmaddr, pfn) do {} while (0)
46 #define flush_icache_page(vma, page) \
48 if ((vma)->vm_flags & PROT_EXEC) \
56 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
62 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
/linux-4.4.14/arch/mips/mm/
H A Dc-octeon.c61 * vma. If no vma is supplied, all cores are flushed.
63 * @vma: VMA to flush or NULL to flush all icaches.
65 static void octeon_flush_icache_all_cores(struct vm_area_struct *vma) octeon_flush_icache_all_cores() argument
80 * If we have a vma structure, we only need to worry about octeon_flush_icache_all_cores()
83 if (vma) octeon_flush_icache_all_cores()
84 mask = *mm_cpumask(vma->vm_mm); octeon_flush_icache_all_cores()
138 struct vm_area_struct *vma; octeon_flush_cache_sigtramp() local
141 vma = find_vma(current->mm, addr); octeon_flush_cache_sigtramp()
142 octeon_flush_icache_all_cores(vma); octeon_flush_cache_sigtramp()
148 * Flush a range out of a vma
150 * @vma: VMA to flush
154 static void octeon_flush_cache_range(struct vm_area_struct *vma, octeon_flush_cache_range() argument
157 if (vma->vm_flags & VM_EXEC) octeon_flush_cache_range()
158 octeon_flush_icache_all_cores(vma); octeon_flush_cache_range()
163 * Flush a specific page of a vma
165 * @vma: VMA to flush page for
169 static void octeon_flush_cache_page(struct vm_area_struct *vma, octeon_flush_cache_page() argument
172 if (vma->vm_flags & VM_EXEC) octeon_flush_cache_page()
173 octeon_flush_icache_all_cores(vma); octeon_flush_cache_page()
H A Dtlb-r3k.c81 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, local_flush_tlb_range() argument
84 struct mm_struct *mm = vma->vm_mm; local_flush_tlb_range()
160 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) local_flush_tlb_page() argument
164 if (cpu_context(cpu, vma->vm_mm) != 0) { local_flush_tlb_page()
169 printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page); local_flush_tlb_page()
171 newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK; local_flush_tlb_page()
191 void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) __update_tlb() argument
199 if (current->active_mm != vma->vm_mm) __update_tlb()
205 if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) { __update_tlb()
207 (cpu_context(cpu, vma->vm_mm)), pid); __update_tlb()
H A Dtlb-r8k.c61 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, local_flush_tlb_range() argument
64 struct mm_struct *mm = vma->vm_mm; local_flush_tlb_range()
148 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) local_flush_tlb_page() argument
155 if (!cpu_context(cpu, vma->vm_mm)) local_flush_tlb_page()
158 newpid = cpu_asid(cpu, vma->vm_mm); local_flush_tlb_page()
183 void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) __update_tlb() argument
194 if (current->active_mm != vma->vm_mm) __update_tlb()
203 pgdp = pgd_offset(vma->vm_mm, address); __update_tlb()
/linux-4.4.14/arch/tile/include/asm/
H A Dtlbflush.h41 /* Pass as vma pointer for non-executable mapping, if no vma available. */
45 static inline void local_flush_tlb_page(struct vm_area_struct *vma, local_flush_tlb_page() argument
53 if (!vma || (vma != FLUSH_NONEXEC && (vma->vm_flags & VM_EXEC))) local_flush_tlb_page()
58 static inline void local_flush_tlb_pages(struct vm_area_struct *vma, local_flush_tlb_pages() argument
67 if (!vma || (vma != FLUSH_NONEXEC && (vma->vm_flags & VM_EXEC))) local_flush_tlb_pages()
102 * - flush_tlb_page(vma, vmaddr) flushes one page
103 * - flush_tlb_range(vma, start, end) flushes a range of pages
H A Dtlb.h18 #define tlb_start_vma(tlb, vma) do { } while (0)
19 #define tlb_end_vma(tlb, vma) do { } while (0)
H A Dhugetlb.h63 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, huge_ptep_clear_flush() argument
66 ptep_clear_flush(vma, addr, ptep); huge_ptep_clear_flush()
85 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, huge_ptep_set_access_flags() argument
89 return ptep_set_access_flags(vma, addr, ptep, pte, dirty); huge_ptep_set_access_flags()
102 static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, arch_make_huge_pte() argument
105 size_t pagesize = huge_page_size(hstate_vma(vma)); arch_make_huge_pte()
/linux-4.4.14/arch/mn10300/include/asm/
H A Dtlb.h20 * we don't need any special per-pte or per-vma handling...
22 #define tlb_start_vma(tlb, vma) do { } while (0)
23 #define tlb_end_vma(tlb, vma) do { } while (0)
H A Dtlbflush.h93 * - flush_tlb_page(vma, vmaddr) flushes one page
108 static inline void flush_tlb_range(struct vm_area_struct *vma, flush_tlb_range() argument
111 flush_tlb_mm(vma->vm_mm); flush_tlb_range()
130 static inline void flush_tlb_range(struct vm_area_struct *vma, flush_tlb_range() argument
138 #define flush_tlb_page(vma, addr) local_flush_tlb_page((vma)->vm_mm, addr)
/linux-4.4.14/arch/openrisc/include/asm/
H A Dtlb.h24 * per-vma handling..
26 #define tlb_start_vma(tlb, vma) do { } while (0)
27 #define tlb_end_vma(tlb, vma) do { } while (0)
/linux-4.4.14/arch/arm64/include/asm/
H A Dfb.h23 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, fb_pgprotect() argument
26 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); fb_pgprotect()
H A Dtlbflush.h53 * flush_tlb_page(vaddr,vma)
57 * - vma - vma_struct describing address range
91 static inline void flush_tlb_page(struct vm_area_struct *vma, flush_tlb_page() argument
94 unsigned long addr = uaddr >> 12 | (ASID(vma->vm_mm) << 48); flush_tlb_page()
107 static inline void __flush_tlb_range(struct vm_area_struct *vma, __flush_tlb_range() argument
111 unsigned long asid = ASID(vma->vm_mm) << 48; __flush_tlb_range()
115 flush_tlb_mm(vma->vm_mm); __flush_tlb_range()
132 static inline void flush_tlb_range(struct vm_area_struct *vma, flush_tlb_range() argument
135 __flush_tlb_range(vma, start, end, false); flush_tlb_range()
/linux-4.4.14/arch/powerpc/kernel/
H A Dproc_powerpc.c44 static int page_map_mmap( struct file *file, struct vm_area_struct *vma ) page_map_mmap()
46 if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) page_map_mmap()
49 remap_pfn_range(vma, vma->vm_start, page_map_mmap()
51 PAGE_SIZE, vma->vm_page_prot); page_map_mmap()
/linux-4.4.14/arch/tile/mm/
H A Delf.c45 struct vm_area_struct *vma; notify_exec() local
64 for (vma = current->mm->mmap; ; vma = vma->vm_next) { notify_exec()
65 if (vma == NULL) { notify_exec()
69 if (vma->vm_file == exe_file) notify_exec()
78 if (vma->vm_start == (ELF_ET_DYN_BASE & PAGE_MASK)) { notify_exec()
82 snprintf(buf, sizeof(buf), "0x%lx:@", vma->vm_start); notify_exec()
/linux-4.4.14/arch/unicore32/include/asm/
H A Dtlb.h15 #define tlb_start_vma(tlb, vma) do { } while (0)
16 #define tlb_end_vma(tlb, vma) do { } while (0)
/linux-4.4.14/arch/sh/mm/
H A Dmmap.c37 struct vm_area_struct *vma; arch_get_unmapped_area() local
64 vma = find_vma(mm, addr); arch_get_unmapped_area()
66 (!vma || addr + len <= vma->vm_start)) arch_get_unmapped_area()
84 struct vm_area_struct *vma; arch_get_unmapped_area_topdown() local
114 vma = find_vma(mm, addr); arch_get_unmapped_area_topdown()
116 (!vma || addr + len <= vma->vm_start)) arch_get_unmapped_area_topdown()
H A Dtlbflush_32.c15 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) local_flush_tlb_page() argument
19 if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) { local_flush_tlb_page()
24 asid = cpu_asid(cpu, vma->vm_mm); local_flush_tlb_page()
28 if (vma->vm_mm != current->mm) { local_flush_tlb_page()
39 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, local_flush_tlb_range() argument
42 struct mm_struct *mm = vma->vm_mm; local_flush_tlb_range()
H A Dcache.c58 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, copy_to_user_page() argument
73 if (vma->vm_flags & VM_EXEC) copy_to_user_page()
74 flush_cache_page(vma, vaddr, page_to_pfn(page)); copy_to_user_page()
77 void copy_from_user_page(struct vm_area_struct *vma, struct page *page, copy_from_user_page() argument
94 unsigned long vaddr, struct vm_area_struct *vma) copy_user_highpage()
112 (vma->vm_flags & VM_EXEC)) copy_user_highpage()
134 void __update_cache(struct vm_area_struct *vma, __update_cache() argument
191 void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, flush_cache_page() argument
196 data.vma = vma; flush_cache_page()
203 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, flush_cache_range() argument
208 data.vma = vma; flush_cache_range()
226 data.vma = NULL; flush_icache_range()
234 void flush_icache_page(struct vm_area_struct *vma, struct page *page) flush_icache_page() argument
93 copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) copy_user_highpage() argument
/linux-4.4.14/arch/arm64/mm/
H A Dflush.c30 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, flush_cache_range() argument
33 if (vma->vm_flags & VM_EXEC) flush_cache_range()
37 static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, flush_ptrace_access() argument
41 if (vma->vm_flags & VM_EXEC) { flush_ptrace_access()
59 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, copy_to_user_page() argument
65 flush_ptrace_access(vma, page, uaddr, dst, len); copy_to_user_page()
105 void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, pmdp_splitting_flush() argument
111 set_pmd_at(vma->vm_mm, address, pmdp, pmd); pmdp_splitting_flush()
/linux-4.4.14/arch/microblaze/kernel/
H A Ddma.c158 int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, dma_direct_mmap_coherent() argument
163 unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; dma_direct_mmap_coherent()
165 unsigned long off = vma->vm_pgoff; dma_direct_mmap_coherent()
172 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); dma_direct_mmap_coherent()
177 return remap_pfn_range(vma, vma->vm_start, pfn + off, dma_direct_mmap_coherent()
178 vma->vm_end - vma->vm_start, vma->vm_page_prot); dma_direct_mmap_coherent()
/linux-4.4.14/arch/arm/mm/
H A Dfault-armv.c40 static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, do_adjust_pte() argument
56 flush_cache_page(vma, address, pfn); do_adjust_pte()
61 set_pte_at(vma->vm_mm, address, ptep, entry); do_adjust_pte()
62 flush_tlb_page(vma, address); do_adjust_pte()
92 static int adjust_pte(struct vm_area_struct *vma, unsigned long address, adjust_pte() argument
102 pgd = pgd_offset(vma->vm_mm, address); adjust_pte()
119 ptl = pte_lockptr(vma->vm_mm, pmd); adjust_pte()
123 ret = do_adjust_pte(vma, address, pfn, pte); adjust_pte()
132 make_coherent(struct address_space *mapping, struct vm_area_struct *vma, make_coherent() argument
135 struct mm_struct *mm = vma->vm_mm; make_coherent()
141 pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT); make_coherent()
155 if (mpnt->vm_mm != mm || mpnt == vma) make_coherent()
164 do_adjust_pte(vma, addr, pfn, ptep); make_coherent()
180 void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, update_mmu_cache() argument
203 make_coherent(mapping, vma, addr, ptep, pfn); update_mmu_cache()
204 else if (vma->vm_flags & VM_EXEC) update_mmu_cache()
H A Dmmap.c59 struct vm_area_struct *vma; arch_get_unmapped_area() local
90 vma = find_vma(mm, addr); arch_get_unmapped_area()
92 (!vma || addr + len <= vma->vm_start)) arch_get_unmapped_area()
110 struct vm_area_struct *vma; arch_get_unmapped_area_topdown() local
141 vma = find_vma(mm, addr); arch_get_unmapped_area_topdown()
143 (!vma || addr + len <= vma->vm_start)) arch_get_unmapped_area_topdown()
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/fifo/
H A Dchangf100.h19 struct nvkm_vma vma; member in struct:gf100_fifo_chan::__anon4480
/linux-4.4.14/drivers/gpu/drm/msm/
H A Dmsm_gem_prime.c44 int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) msm_gem_prime_mmap() argument
48 ret = drm_gem_mmap_obj(obj, obj->size, vma); msm_gem_prime_mmap()
52 return msm_gem_mmap_obj(vma->vm_private_data, vma); msm_gem_prime_mmap()
/linux-4.4.14/arch/arc/mm/
H A Dmmap.c35 struct vm_area_struct *vma; arch_get_unmapped_area() local
65 vma = find_vma(mm, addr); arch_get_unmapped_area()
67 (!vma || addr + len <= vma->vm_start)) arch_get_unmapped_area()
/linux-4.4.14/arch/sh/kernel/vsyscall/
H A Dvsyscall.c88 const char *arch_vma_name(struct vm_area_struct *vma) arch_vma_name() argument
90 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) arch_vma_name()
/linux-4.4.14/include/xen/
H A Dxen-ops.h34 * @vma: VMA to map the pages into
49 int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
57 * @vma: VMA to map the pages into
68 int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
73 int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
75 int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
81 int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
/linux-4.4.14/ipc/
H A Dshm.c68 static void shm_open(struct vm_area_struct *vma);
69 static void shm_close(struct vm_area_struct *vma);
190 static int __shm_open(struct vm_area_struct *vma) __shm_open() argument
192 struct file *file = vma->vm_file; __shm_open()
209 static void shm_open(struct vm_area_struct *vma) shm_open() argument
211 int err = __shm_open(vma); shm_open()
264 * remove the attach descriptor vma.
269 static void shm_close(struct vm_area_struct *vma) shm_close() argument
271 struct file *file = vma->vm_file; shm_close()
376 static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) shm_fault() argument
378 struct file *file = vma->vm_file; shm_fault()
381 return sfd->vm_ops->fault(vma, vmf); shm_fault()
385 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new) shm_set_policy() argument
387 struct file *file = vma->vm_file; shm_set_policy()
391 err = sfd->vm_ops->set_policy(vma, new); shm_set_policy()
395 static struct mempolicy *shm_get_policy(struct vm_area_struct *vma, shm_get_policy() argument
398 struct file *file = vma->vm_file; shm_get_policy()
403 pol = sfd->vm_ops->get_policy(vma, addr); shm_get_policy()
404 else if (vma->vm_policy) shm_get_policy()
405 pol = vma->vm_policy; shm_get_policy()
411 static int shm_mmap(struct file *file, struct vm_area_struct *vma) shm_mmap() argument
420 ret =__shm_open(vma); shm_mmap()
424 ret = sfd->file->f_op->mmap(sfd->file, vma); shm_mmap()
426 shm_close(vma); shm_mmap()
429 sfd->vm_ops = vma->vm_ops; shm_mmap()
433 vma->vm_ops = &shm_vm_ops; shm_mmap()
1262 struct vm_area_struct *vma; SYSCALL_DEFINE1() local
1280 * unmapped: It searches for a vma that is backed by shm and that SYSCALL_DEFINE1()
1288 * a part of a vma. Both calls in this function are for full vmas, SYSCALL_DEFINE1()
1289 * the parameters are directly copied from the vma itself and always SYSCALL_DEFINE1()
1294 * match the usual checks anyway. So assume all vma's are SYSCALL_DEFINE1()
1297 vma = find_vma(mm, addr); SYSCALL_DEFINE1()
1300 while (vma) { SYSCALL_DEFINE1()
1301 next = vma->vm_next; SYSCALL_DEFINE1()
1308 if ((vma->vm_ops == &shm_vm_ops) && SYSCALL_DEFINE1()
1309 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) { SYSCALL_DEFINE1()
1317 file = vma->vm_file; SYSCALL_DEFINE1()
1318 size = i_size_read(file_inode(vma->vm_file)); SYSCALL_DEFINE1()
1319 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); SYSCALL_DEFINE1()
1324 * searching for matching vma's. SYSCALL_DEFINE1()
1327 vma = next; SYSCALL_DEFINE1()
1330 vma = next; SYSCALL_DEFINE1()
1339 while (vma && (loff_t)(vma->vm_end - addr) <= size) { SYSCALL_DEFINE1()
1340 next = vma->vm_next; SYSCALL_DEFINE1()
1342 /* finding a matching vma now does not alter retval */ SYSCALL_DEFINE1()
1343 if ((vma->vm_ops == &shm_vm_ops) && SYSCALL_DEFINE1()
1344 ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) && SYSCALL_DEFINE1()
1345 (vma->vm_file == file)) SYSCALL_DEFINE1()
1346 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); SYSCALL_DEFINE1()
1347 vma = next; SYSCALL_DEFINE1()
1353 if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) { SYSCALL_DEFINE1()
1354 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); SYSCALL_DEFINE1()
/linux-4.4.14/drivers/xen/xenbus/
H A Dxenbus_dev_backend.c93 static int xenbus_backend_mmap(struct file *file, struct vm_area_struct *vma) xenbus_backend_mmap() argument
95 size_t size = vma->vm_end - vma->vm_start; xenbus_backend_mmap()
100 if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0)) xenbus_backend_mmap()
103 if (remap_pfn_range(vma, vma->vm_start, xenbus_backend_mmap()
105 size, vma->vm_page_prot)) xenbus_backend_mmap()
/linux-4.4.14/drivers/misc/mic/host/
H A Dmic_fops.c192 mic_mmap(struct file *f, struct vm_area_struct *vma) mic_mmap() argument
195 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; mic_mmap()
196 unsigned long pa, size = vma->vm_end - vma->vm_start, size_rem = size; mic_mmap()
203 if (vma->vm_flags & VM_WRITE) mic_mmap()
210 err = remap_pfn_range(vma, vma->vm_start + offset, mic_mmap()
211 pa >> PAGE_SHIFT, size, vma->vm_page_prot); mic_mmap()
215 "%s %d type %d size 0x%lx off 0x%lx pa 0x%lx vma 0x%lx\n", mic_mmap()
217 pa, vma->vm_start + offset); mic_mmap()
/linux-4.4.14/arch/m68k/mm/
H A Dfault.c73 struct vm_area_struct * vma; do_page_fault() local
92 vma = find_vma(mm, address); do_page_fault()
93 if (!vma) do_page_fault()
95 if (vma->vm_flags & VM_IO) do_page_fault()
97 if (vma->vm_start <= address) do_page_fault()
99 if (!(vma->vm_flags & VM_GROWSDOWN)) do_page_fault()
109 if (expand_stack(vma, address)) do_page_fault()
122 if (!(vma->vm_flags & VM_WRITE)) do_page_fault()
129 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) do_page_fault()
139 fault = handle_mm_fault(mm, vma, address, flags); do_page_fault()
/linux-4.4.14/arch/hexagon/mm/
H A Dvm_fault.c50 struct vm_area_struct *vma; do_page_fault() local
71 vma = find_vma(mm, address); do_page_fault()
72 if (!vma) do_page_fault()
75 if (vma->vm_start <= address) do_page_fault()
78 if (!(vma->vm_flags & VM_GROWSDOWN)) do_page_fault()
81 if (expand_stack(vma, address)) do_page_fault()
90 if (!(vma->vm_flags & VM_EXEC)) do_page_fault()
94 if (!(vma->vm_flags & VM_READ)) do_page_fault()
98 if (!(vma->vm_flags & VM_WRITE)) do_page_fault()
104 fault = handle_mm_fault(mm, vma, address, flags); do_page_fault()
H A Dvm_tlb.c37 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, flush_tlb_range() argument
40 struct mm_struct *mm = vma->vm_mm; flush_tlb_range()
76 * Flush TLB state associated with a page of a vma.
78 void flush_tlb_page(struct vm_area_struct *vma, unsigned long vaddr) flush_tlb_page() argument
80 struct mm_struct *mm = vma->vm_mm; flush_tlb_page()
88 * Like flush range, but without the check on the vma->vm_mm.
/linux-4.4.14/fs/ncpfs/
H A Dmmap.c104 int ncp_mmap(struct file *file, struct vm_area_struct *vma) ncp_mmap() argument
114 if (vma->vm_flags & VM_SHARED) ncp_mmap()
118 if (vma_pages(vma) + vma->vm_pgoff ncp_mmap()
122 vma->vm_ops = &ncp_file_mmap; ncp_mmap()
/linux-4.4.14/arch/s390/include/asm/
H A Dhugetlb.h47 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, huge_ptep_clear_flush() argument
50 huge_ptep_get_and_clear(vma->vm_mm, address, ptep); huge_ptep_clear_flush()
53 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, huge_ptep_set_access_flags() argument
59 huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); huge_ptep_set_access_flags()
60 set_huge_pte_at(vma->vm_mm, addr, ptep, pte); huge_ptep_set_access_flags()
/linux-4.4.14/arch/s390/pci/
H A Dpci_mmio.c17 struct vm_area_struct *vma; get_pfn() local
22 vma = find_vma(current->mm, user_addr); get_pfn()
23 if (!vma) get_pfn()
26 if (!(vma->vm_flags & access)) get_pfn()
28 ret = follow_pfn(vma, user_addr, pfn); get_pfn()
/linux-4.4.14/arch/sh/kernel/
H A Dsys_sh.c59 struct vm_area_struct *vma; sys_cacheflush() local
72 vma = find_vma (current->mm, addr); sys_cacheflush()
73 if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) { sys_cacheflush()
H A Dsmp.c376 struct vm_area_struct *vma; member in struct:flush_tlb_data
385 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); flush_tlb_range_ipi()
388 void flush_tlb_range(struct vm_area_struct *vma, flush_tlb_range() argument
391 struct mm_struct *mm = vma->vm_mm; flush_tlb_range()
397 fd.vma = vma; flush_tlb_range()
407 local_flush_tlb_range(vma, start, end); flush_tlb_range()
431 local_flush_tlb_page(fd->vma, fd->addr1); flush_tlb_page_ipi()
434 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) flush_tlb_page() argument
437 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || flush_tlb_page()
438 (current->mm != vma->vm_mm)) { flush_tlb_page()
441 fd.vma = vma; flush_tlb_page()
448 cpu_context(i, vma->vm_mm) = 0; flush_tlb_page()
450 local_flush_tlb_page(vma, page); flush_tlb_page()
/linux-4.4.14/arch/parisc/mm/
H A Dfault.c120 /* This is the treewalk to find a vma which is the highest that has
182 struct vm_area_struct *vma) show_signal_msg()
194 if (vma) show_signal_msg()
196 vma->vm_start, vma->vm_end); show_signal_msg()
204 struct vm_area_struct *vma, *prev_vma; do_page_fault() local
228 vma = find_vma_prev(mm, address, &prev_vma); do_page_fault()
229 if (!vma || address < vma->vm_start) do_page_fault()
238 if ((vma->vm_flags & acc_type) != acc_type) do_page_fault()
247 fault = handle_mm_fault(mm, vma, address, flags); do_page_fault()
287 vma = prev_vma; do_page_fault()
288 if (vma && (expand_stack(vma, address) == 0)) do_page_fault()
300 show_signal_msg(regs, code, address, tsk, vma); do_page_fault()
304 /* send SIGSEGV when outside of vma */ do_page_fault()
305 if (!vma || do_page_fault()
306 address < vma->vm_start || address > vma->vm_end) { do_page_fault()
313 if ((vma->vm_flags & acc_type) != acc_type) { do_page_fault()
180 show_signal_msg(struct pt_regs *regs, unsigned long code, unsigned long address, struct task_struct *tsk, struct vm_area_struct *vma) show_signal_msg() argument
/linux-4.4.14/drivers/misc/sgi-gru/
H A Dgrufile.c71 * and tables belonging to the vma.
73 static void gru_vma_close(struct vm_area_struct *vma) gru_vma_close() argument
79 if (!vma->vm_private_data) gru_vma_close()
82 vdata = vma->vm_private_data; gru_vma_close()
83 vma->vm_private_data = NULL; gru_vma_close()
84 gru_dbg(grudev, "vma %p, file %p, vdata %p\n", vma, vma->vm_file, gru_vma_close()
103 * Called when mmapping the device. Initializes the vma with a fault handler
107 static int gru_file_mmap(struct file *file, struct vm_area_struct *vma) gru_file_mmap() argument
109 if ((vma->vm_flags & (VM_SHARED | VM_WRITE)) != (VM_SHARED | VM_WRITE)) gru_file_mmap()
112 if (vma->vm_start & (GRU_GSEG_PAGESIZE - 1) || gru_file_mmap()
113 vma->vm_end & (GRU_GSEG_PAGESIZE - 1)) gru_file_mmap()
116 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_LOCKED | gru_file_mmap()
118 vma->vm_page_prot = PAGE_SHARED; gru_file_mmap()
119 vma->vm_ops = &gru_vm_ops; gru_file_mmap()
121 vma->vm_private_data = gru_alloc_vma_data(vma, 0); gru_file_mmap()
122 if (!vma->vm_private_data) gru_file_mmap()
125 gru_dbg(grudev, "file %p, vaddr 0x%lx, vma %p, vdata %p\n", gru_file_mmap()
126 file, vma->vm_start, vma, vma->vm_private_data); gru_file_mmap()
136 struct vm_area_struct *vma; gru_create_new_context() local
152 vma = gru_find_vma(req.gseg); gru_create_new_context()
153 if (vma) { gru_create_new_context()
154 vdata = vma->vm_private_data; gru_create_new_context()
/linux-4.4.14/arch/powerpc/oprofile/cell/
H A Dvma_map.c15 * vma-to-fileOffset maps for both overlay and non-overlay SPU
37 vma_map_lookup(struct vma_to_fileoffset_map *map, unsigned int vma, vma_map_lookup() argument
42 * Addresses of dynamically generated code can't be found in the vma vma_map_lookup()
47 u32 offset = 0x10000000 + vma; vma_map_lookup()
51 if (vma < map->vma || vma >= map->vma + map->size) vma_map_lookup()
60 offset = vma - map->vma + map->offset; vma_map_lookup()
68 vma_map_add(struct vma_to_fileoffset_map *map, unsigned int vma, vma_map_add() argument
82 new->vma = vma; vma_map_add()
249 /* The ovly.vma/size/offset arguments are analogous to the same create_vma_map()
272 map = vma_map_add(map, ovly.vma, ovly.size, ovly.offset, create_vma_map()
/linux-4.4.14/arch/nios2/mm/
H A Dcacheflush.c132 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, flush_cache_range() argument
136 if (vma == NULL || (vma->vm_flags & VM_EXEC)) flush_cache_range()
140 void flush_icache_page(struct vm_area_struct *vma, struct page *page) flush_icache_page() argument
149 void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, flush_cache_page() argument
156 if (vma->vm_flags & VM_EXEC) flush_cache_page()
200 void update_mmu_cache(struct vm_area_struct *vma, update_mmu_cache() argument
225 if (vma->vm_flags & VM_EXEC) update_mmu_cache()
226 flush_icache_page(vma, page); update_mmu_cache()
249 void copy_from_user_page(struct vm_area_struct *vma, struct page *page, copy_from_user_page() argument
253 flush_cache_page(vma, user_vaddr, page_to_pfn(page)); copy_from_user_page()
256 if (vma->vm_flags & VM_EXEC) copy_from_user_page()
260 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, copy_to_user_page() argument
264 flush_cache_page(vma, user_vaddr, page_to_pfn(page)); copy_to_user_page()
267 if (vma->vm_flags & VM_EXEC) copy_to_user_page()
/linux-4.4.14/kernel/events/
H A Duprobes.c106 * We keep the vma's vm_start rather than a pointer to the vma
108 * the vma go away, and we must handle that reasonably gracefully.
114 * valid_vma: Verify if the specified vma is an executable vma
119 * executable vma.
121 static bool valid_vma(struct vm_area_struct *vma, bool is_register) valid_vma() argument
128 return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC; valid_vma()
131 static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset) offset_to_vaddr() argument
133 return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT); offset_to_vaddr()
136 static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr) vaddr_to_offset() argument
138 return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start); vaddr_to_offset()
142 * __replace_page - replace page in vma by new page.
145 * @vma: vma that holds the pte pointing to page
152 static int __replace_page(struct vm_area_struct *vma, unsigned long addr, __replace_page() argument
155 struct mm_struct *mm = vma->vm_mm; __replace_page()
164 err = mem_cgroup_try_charge(kpage, vma->vm_mm, GFP_KERNEL, &memcg); __replace_page()
178 page_add_new_anon_rmap(kpage, vma, addr); __replace_page()
180 lru_cache_add_active_or_unevictable(kpage, vma); __replace_page()
187 flush_cache_page(vma, addr, pte_pfn(*ptep)); __replace_page()
188 ptep_clear_flush_notify(vma, addr, ptep); __replace_page()
189 set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); __replace_page()
196 if (vma->vm_flags & VM_LOCKED) __replace_page()
296 struct vm_area_struct *vma; uprobe_write_opcode() local
301 ret = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &old_page, &vma); uprobe_write_opcode()
309 ret = anon_vma_prepare(vma); uprobe_write_opcode()
314 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); uprobe_write_opcode()
322 ret = __replace_page(vma, vaddr, old_page, new_page); uprobe_write_opcode()
642 struct vm_area_struct *vma, unsigned long vaddr) install_breakpoint()
647 ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr); install_breakpoint()
714 struct vm_area_struct *vma; build_map_info() local
722 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { build_map_info()
723 if (!valid_vma(vma, is_register)) build_map_info()
741 if (!atomic_inc_not_zero(&vma->vm_mm->mm_users)) build_map_info()
749 info->mm = vma->vm_mm; build_map_info()
750 info->vaddr = offset_to_vaddr(vma, offset); build_map_info()
797 struct vm_area_struct *vma; register_for_each_vma() local
803 vma = find_vma(mm, info->vaddr); register_for_each_vma()
804 if (!vma || !valid_vma(vma, is_register) || register_for_each_vma()
805 file_inode(vma->vm_file) != uprobe->inode) register_for_each_vma()
808 if (vma->vm_start > info->vaddr || register_for_each_vma()
809 vaddr_to_offset(vma, info->vaddr) != uprobe->offset) register_for_each_vma()
816 err = install_breakpoint(uprobe, mm, vma, info->vaddr); register_for_each_vma()
962 struct vm_area_struct *vma; unapply_uprobe() local
966 for (vma = mm->mmap; vma; vma = vma->vm_next) { unapply_uprobe()
970 if (!valid_vma(vma, false) || unapply_uprobe()
971 file_inode(vma->vm_file) != uprobe->inode) unapply_uprobe()
974 offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT; unapply_uprobe()
976 uprobe->offset >= offset + vma->vm_end - vma->vm_start) unapply_uprobe()
979 vaddr = offset_to_vaddr(vma, uprobe->offset); unapply_uprobe()
1013 * For a given range in vma, build a list of probes that need to be inserted.
1016 struct vm_area_struct *vma, build_probe_list()
1025 min = vaddr_to_offset(vma, start); build_probe_list()
1055 int uprobe_mmap(struct vm_area_struct *vma) uprobe_mmap() argument
1061 if (no_uprobe_events() || !valid_vma(vma, true)) uprobe_mmap()
1064 inode = file_inode(vma->vm_file); uprobe_mmap()
1069 build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list); uprobe_mmap()
1077 filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) { uprobe_mmap()
1078 unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); uprobe_mmap()
1079 install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); uprobe_mmap()
1089 vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end) vma_has_uprobes() argument
1095 inode = file_inode(vma->vm_file); vma_has_uprobes()
1097 min = vaddr_to_offset(vma, start); vma_has_uprobes()
1108 * Called in context of a munmap of a vma.
1110 void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) uprobe_munmap() argument
1112 if (no_uprobe_events() || !valid_vma(vma, false)) uprobe_munmap()
1115 if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ uprobe_munmap()
1118 if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) || uprobe_munmap()
1119 test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags)) uprobe_munmap()
1122 if (vma_has_uprobes(vma, start, end)) uprobe_munmap()
1123 set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags); uprobe_munmap()
1129 struct vm_area_struct *vma; xol_add_vma() local
1148 vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE, xol_add_vma()
1151 if (IS_ERR(vma)) { xol_add_vma()
1152 ret = PTR_ERR(vma); xol_add_vma()
1355 * We probably need flush_icache_user_range() but it needs vma. arch_uprobe_copy_ixol()
1670 struct vm_area_struct *vma; mmf_recalc_uprobes() local
1672 for (vma = mm->mmap; vma; vma = vma->vm_next) { mmf_recalc_uprobes()
1673 if (!valid_vma(vma, false)) mmf_recalc_uprobes()
1681 if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end)) mmf_recalc_uprobes()
1717 struct vm_area_struct *vma; find_active_uprobe() local
1720 vma = find_vma(mm, bp_vaddr); find_active_uprobe()
1721 if (vma && vma->vm_start <= bp_vaddr) { find_active_uprobe()
1722 if (valid_vma(vma, false)) { find_active_uprobe()
1723 struct inode *inode = file_inode(vma->vm_file); find_active_uprobe()
1724 loff_t offset = vaddr_to_offset(vma, bp_vaddr); find_active_uprobe()
1877 * we can simply restart. If this vma was unmapped we handle_swbp()
641 install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long vaddr) install_breakpoint() argument
1015 build_probe_list(struct inode *inode, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct list_head *head) build_probe_list() argument
/linux-4.4.14/arch/powerpc/mm/
H A Dcopro_fault.c39 struct vm_area_struct *vma; copro_handle_mm_fault() local
51 vma = find_vma(mm, ea); copro_handle_mm_fault()
52 if (!vma) copro_handle_mm_fault()
55 if (ea < vma->vm_start) { copro_handle_mm_fault()
56 if (!(vma->vm_flags & VM_GROWSDOWN)) copro_handle_mm_fault()
58 if (expand_stack(vma, ea)) copro_handle_mm_fault()
64 if (!(vma->vm_flags & VM_WRITE)) copro_handle_mm_fault()
67 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) copro_handle_mm_fault()
78 *flt = handle_mm_fault(mm, vma, ea, is_write ? FAULT_FLAG_WRITE : 0); copro_handle_mm_fault()
H A Dsubpage-prot.c137 struct vm_area_struct *vma = walk->vma; subpage_walk_pmd_entry() local
138 split_huge_page_pmd(vma, addr, pmd); subpage_walk_pmd_entry()
145 struct vm_area_struct *vma; subpage_mark_vma_nohuge() local
152 * We don't try too hard, we just mark all the vma in that range subpage_mark_vma_nohuge()
155 vma = find_vma(mm, addr); subpage_mark_vma_nohuge()
159 if (vma && ((addr + len) <= vma->vm_start)) subpage_mark_vma_nohuge()
162 while (vma) { subpage_mark_vma_nohuge()
163 if (vma->vm_start >= (addr + len)) subpage_mark_vma_nohuge()
165 vma->vm_flags |= VM_NOHUGEPAGE; subpage_mark_vma_nohuge()
166 walk_page_vma(vma, &subpage_proto_walk); subpage_mark_vma_nohuge()
167 vma = vma->vm_next; subpage_mark_vma_nohuge()
H A Dhugetlbpage-book3e.c78 void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, book3e_hugetlb_preload() argument
94 mm = vma->vm_mm; book3e_hugetlb_preload()
101 psize = vma_mmu_pagesize(vma); book3e_hugetlb_preload()
147 void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) flush_hugetlb_page() argument
149 struct hstate *hstate = hstate_file(vma->vm_file); flush_hugetlb_page()
152 __flush_tlb_page(vma->vm_mm, vmaddr, tsize, 0); flush_hugetlb_page()
/linux-4.4.14/arch/mips/kernel/
H A Dvdso.c103 struct vm_area_struct *vma; arch_setup_additional_pages() local
130 vma = _install_special_mapping(mm, base, vvar_size, arch_setup_additional_pages()
133 if (IS_ERR(vma)) { arch_setup_additional_pages()
134 ret = PTR_ERR(vma); arch_setup_additional_pages()
144 ret = io_remap_pfn_range(vma, base, arch_setup_additional_pages()
153 ret = remap_pfn_range(vma, data_addr, arch_setup_additional_pages()
160 vma = _install_special_mapping(mm, vdso_addr, image->size, arch_setup_additional_pages()
164 if (IS_ERR(vma)) { arch_setup_additional_pages()
165 ret = PTR_ERR(vma); arch_setup_additional_pages()
/linux-4.4.14/arch/hexagon/include/asm/
H A Dcacheflush.h31 * - flush_cache_range(vma, start, end) flushes a range of pages
34 * - flush_icache_page(vma, pg) flushes(invalidates) a page for icache
44 #define flush_cache_range(vma, start, end) do { } while (0)
45 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
50 #define flush_icache_page(vma, pg) do { } while (0)
51 #define flush_icache_user_range(vma, pg, adr, len) do { } while (0)
86 static inline void update_mmu_cache(struct vm_area_struct *vma, update_mmu_cache() argument
92 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
95 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
/linux-4.4.14/sound/soc/pxa/
H A Dmmp-pcm.c123 struct vm_area_struct *vma) mmp_pcm_mmap()
126 unsigned long off = vma->vm_pgoff; mmp_pcm_mmap()
128 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); mmp_pcm_mmap()
129 return remap_pfn_range(vma, vma->vm_start, mmp_pcm_mmap()
131 vma->vm_end - vma->vm_start, vma->vm_page_prot); mmp_pcm_mmap()
122 mmp_pcm_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) mmp_pcm_mmap() argument
/linux-4.4.14/drivers/misc/cxl/
H A Dcontext.c103 static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) cxl_mmap_fault() argument
105 struct cxl_context *ctx = vma->vm_file->private_data; cxl_mmap_fault()
138 vma->vm_page_prot = pgprot_cached(vma->vm_page_prot); cxl_mmap_fault()
144 vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT); cxl_mmap_fault()
156 * Map a per-context mmio space into the given vma.
158 int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma) cxl_context_iomap() argument
160 u64 start = vma->vm_pgoff << PAGE_SHIFT; cxl_context_iomap()
161 u64 len = vma->vm_end - vma->vm_start; cxl_context_iomap()
186 vma->vm_flags |= VM_IO | VM_PFNMAP; cxl_context_iomap()
187 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); cxl_context_iomap()
188 vma->vm_ops = &cxl_mmap_vmops; cxl_context_iomap()
/linux-4.4.14/arch/sparc/mm/
H A Dfault_32.c170 struct vm_area_struct *vma; do_sparc_fault() local
210 vma = find_vma(mm, address); do_sparc_fault()
211 if (!vma) do_sparc_fault()
213 if (vma->vm_start <= address) do_sparc_fault()
215 if (!(vma->vm_flags & VM_GROWSDOWN)) do_sparc_fault()
217 if (expand_stack(vma, address)) do_sparc_fault()
226 if (!(vma->vm_flags & VM_WRITE)) do_sparc_fault()
230 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) do_sparc_fault()
244 fault = handle_mm_fault(mm, vma, address, flags); do_sparc_fault()
386 struct vm_area_struct *vma; force_user_fault() local
395 vma = find_vma(mm, address); force_user_fault()
396 if (!vma) force_user_fault()
398 if (vma->vm_start <= address) force_user_fault()
400 if (!(vma->vm_flags & VM_GROWSDOWN)) force_user_fault()
402 if (expand_stack(vma, address)) force_user_fault()
407 if (!(vma->vm_flags & VM_WRITE)) force_user_fault()
411 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) force_user_fault()
414 switch (handle_mm_fault(mm, vma, address, flags)) { force_user_fault()
/linux-4.4.14/drivers/uio/
H A Duio.c580 static int uio_find_mem_index(struct vm_area_struct *vma) uio_find_mem_index() argument
582 struct uio_device *idev = vma->vm_private_data; uio_find_mem_index()
584 if (vma->vm_pgoff < MAX_UIO_MAPS) { uio_find_mem_index()
585 if (idev->info->mem[vma->vm_pgoff].size == 0) uio_find_mem_index()
587 return (int)vma->vm_pgoff; uio_find_mem_index()
592 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) uio_vma_fault() argument
594 struct uio_device *idev = vma->vm_private_data; uio_vma_fault()
599 int mi = uio_find_mem_index(vma); uio_vma_fault()
623 static int uio_mmap_logical(struct vm_area_struct *vma) uio_mmap_logical() argument
625 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; uio_mmap_logical()
626 vma->vm_ops = &uio_logical_vm_ops; uio_mmap_logical()
636 static int uio_mmap_physical(struct vm_area_struct *vma) uio_mmap_physical() argument
638 struct uio_device *idev = vma->vm_private_data; uio_mmap_physical()
639 int mi = uio_find_mem_index(vma); uio_mmap_physical()
647 if (vma->vm_end - vma->vm_start > mem->size) uio_mmap_physical()
650 vma->vm_ops = &uio_physical_vm_ops; uio_mmap_physical()
651 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); uio_mmap_physical()
655 * because vma->vm_pgoff is the map index we looked uio_mmap_physical()
662 return remap_pfn_range(vma, uio_mmap_physical()
663 vma->vm_start, uio_mmap_physical()
665 vma->vm_end - vma->vm_start, uio_mmap_physical()
666 vma->vm_page_prot); uio_mmap_physical()
669 static int uio_mmap(struct file *filep, struct vm_area_struct *vma) uio_mmap() argument
677 if (vma->vm_end < vma->vm_start) uio_mmap()
680 vma->vm_private_data = idev; uio_mmap()
682 mi = uio_find_mem_index(vma); uio_mmap()
686 requested_pages = vma_pages(vma); uio_mmap()
693 ret = idev->info->mmap(idev->info, vma); uio_mmap()
699 return uio_mmap_physical(vma); uio_mmap()
702 return uio_mmap_logical(vma); uio_mmap()
/linux-4.4.14/arch/xtensa/kernel/
H A Dsmp.c442 struct vm_area_struct *vma; member in struct:flush_data
470 local_flush_tlb_page(fd->vma, fd->addr1); ipi_flush_tlb_page()
473 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) flush_tlb_page() argument
476 .vma = vma, flush_tlb_page()
485 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); ipi_flush_tlb_range()
488 void flush_tlb_range(struct vm_area_struct *vma, flush_tlb_range() argument
492 .vma = vma, flush_tlb_range()
529 local_flush_cache_page(fd->vma, fd->addr1, fd->addr2); ipi_flush_cache_page()
532 void flush_cache_page(struct vm_area_struct *vma, flush_cache_page() argument
536 .vma = vma, flush_cache_page()
546 local_flush_cache_range(fd->vma, fd->addr1, fd->addr2); ipi_flush_cache_range()
549 void flush_cache_range(struct vm_area_struct *vma, flush_cache_range() argument
553 .vma = vma, flush_cache_range()
/linux-4.4.14/drivers/staging/rdma/hfi1/
H A Dmmap.c82 static void hfi1_vma_open(struct vm_area_struct *vma) hfi1_vma_open() argument
84 struct hfi1_mmap_info *ip = vma->vm_private_data; hfi1_vma_open()
89 static void hfi1_vma_close(struct vm_area_struct *vma) hfi1_vma_close() argument
91 struct hfi1_mmap_info *ip = vma->vm_private_data; hfi1_vma_close()
104 * @vma: the VMA to be initialized
107 int hfi1_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) hfi1_mmap() argument
110 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; hfi1_mmap()
111 unsigned long size = vma->vm_end - vma->vm_start; hfi1_mmap()
133 ret = remap_vmalloc_range(vma, ip->obj, 0); hfi1_mmap()
136 vma->vm_ops = &hfi1_vm_ops; hfi1_mmap()
137 vma->vm_private_data = ip; hfi1_mmap()
138 hfi1_vma_open(vma); hfi1_mmap()
/linux-4.4.14/arch/x86/mm/
H A Dhugetlbpage.c27 struct vm_area_struct *vma;
29 vma = find_vma(mm, addr);
30 if (!vma || !is_vm_hugetlb_page(vma))
130 struct vm_area_struct *vma; hugetlb_get_unmapped_area() local
145 vma = find_vma(mm, addr); hugetlb_get_unmapped_area()
147 (!vma || addr + len <= vma->vm_start)) hugetlb_get_unmapped_area()
/linux-4.4.14/arch/score/mm/
H A Dtlb-score.c80 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, local_flush_tlb_range() argument
83 struct mm_struct *mm = vma->vm_mm; local_flush_tlb_range()
159 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) local_flush_tlb_page() argument
161 if (vma && vma->vm_mm->context != 0) { local_flush_tlb_page()
164 unsigned long vma_ASID = vma->vm_mm->context; local_flush_tlb_page()
213 void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) __update_tlb() argument
221 if (current->active_mm != vma->vm_mm) __update_tlb()
/linux-4.4.14/arch/ia64/pci/
H A Dpci.c422 pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma, pci_mmap_page_range() argument
425 unsigned long size = vma->vm_end - vma->vm_start; pci_mmap_page_range()
441 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) pci_mmap_page_range()
444 prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size, pci_mmap_page_range()
445 vma->vm_page_prot); pci_mmap_page_range()
455 efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start)) pci_mmap_page_range()
456 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); pci_mmap_page_range()
458 vma->vm_page_prot = prot; pci_mmap_page_range()
460 if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, pci_mmap_page_range()
461 vma->vm_end - vma->vm_start, vma->vm_page_prot)) pci_mmap_page_range()
487 * @vma: vma passed in by mmap
493 pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma, pci_mmap_legacy_page_range() argument
496 unsigned long size = vma->vm_end - vma->vm_start; pci_mmap_legacy_page_range()
508 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) pci_mmap_legacy_page_range()
510 prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size, pci_mmap_legacy_page_range()
511 vma->vm_page_prot); pci_mmap_legacy_page_range()
517 vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT; pci_mmap_legacy_page_range()
518 vma->vm_page_prot = prot; pci_mmap_legacy_page_range()
520 if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, pci_mmap_legacy_page_range()
521 size, vma->vm_page_prot)) pci_mmap_legacy_page_range()
/linux-4.4.14/drivers/vfio/platform/
H A Dvfio_platform_common.c478 struct vm_area_struct *vma) vfio_platform_mmap_mmio()
482 req_len = vma->vm_end - vma->vm_start; vfio_platform_mmap_mmio()
483 pgoff = vma->vm_pgoff & vfio_platform_mmap_mmio()
490 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vfio_platform_mmap_mmio()
491 vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff; vfio_platform_mmap_mmio()
493 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, vfio_platform_mmap_mmio()
494 req_len, vma->vm_page_prot); vfio_platform_mmap_mmio()
497 static int vfio_platform_mmap(void *device_data, struct vm_area_struct *vma) vfio_platform_mmap() argument
502 index = vma->vm_pgoff >> (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT); vfio_platform_mmap()
504 if (vma->vm_end < vma->vm_start) vfio_platform_mmap()
506 if (!(vma->vm_flags & VM_SHARED)) vfio_platform_mmap()
510 if (vma->vm_start & ~PAGE_MASK) vfio_platform_mmap()
512 if (vma->vm_end & ~PAGE_MASK) vfio_platform_mmap()
519 && (vma->vm_flags & VM_READ)) vfio_platform_mmap()
523 && (vma->vm_flags & VM_WRITE)) vfio_platform_mmap()
526 vma->vm_private_data = vdev; vfio_platform_mmap()
529 return vfio_platform_mmap_mmio(vdev->regions[index], vma); vfio_platform_mmap()
477 vfio_platform_mmap_mmio(struct vfio_platform_region region, struct vm_area_struct *vma) vfio_platform_mmap_mmio() argument

Completed in 4340 milliseconds

12345