Searched refs:vma (Results 1 - 200 of 864) sorted by relevance

12345

/linux-4.1.27/arch/sparc/include/asm/
H A Dtlb_32.h4 #define tlb_start_vma(tlb, vma) \
6 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
9 #define tlb_end_vma(tlb, vma) \
11 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
H A Dtlbflush_32.h10 #define flush_tlb_range(vma, start, end) \
11 sparc32_cachetlb_ops->tlb_range(vma, start, end)
12 #define flush_tlb_page(vma, addr) \
13 sparc32_cachetlb_ops->tlb_page(vma, addr)
H A Dcacheflush_32.h12 #define flush_cache_range(vma,start,end) \
13 sparc32_cachetlb_ops->cache_range(vma, start, end)
14 #define flush_cache_page(vma,addr,pfn) \
15 sparc32_cachetlb_ops->cache_page(vma, addr)
17 #define flush_icache_page(vma, pg) do { } while (0)
19 #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
21 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
23 flush_cache_page(vma, vaddr, page_to_pfn(page));\
26 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
28 flush_cache_page(vma, vaddr, page_to_pfn(page));\
H A Dcacheflush_64.h23 #define flush_cache_range(vma, start, end) \
24 flush_cache_mm((vma)->vm_mm)
25 #define flush_cache_page(vma, page, pfn) \
26 flush_cache_mm((vma)->vm_mm)
50 #define flush_icache_page(vma, pg) do { } while(0)
51 #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
57 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
59 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
61 flush_ptrace_access(vma, page, vaddr, src, len, 0); \
64 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
66 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
68 flush_ptrace_access(vma, page, vaddr, dst, len, 1); \
H A Dfb.h9 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, fb_pgprotect() argument
13 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); fb_pgprotect()
H A Dtlb_64.h25 #define tlb_start_vma(tlb, vma) do { } while (0)
26 #define tlb_end_vma(tlb, vma) do { } while (0)
H A Dhugetlb.h46 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, huge_ptep_clear_flush() argument
68 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, huge_ptep_set_access_flags() argument
74 set_huge_pte_at(vma->vm_mm, addr, ptep, pte); huge_ptep_set_access_flags()
75 flush_tlb_page(vma, addr); huge_ptep_set_access_flags()
/linux-4.1.27/arch/ia64/include/asm/
H A Dfb.h9 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, fb_pgprotect() argument
12 if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start)) fb_pgprotect()
13 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); fb_pgprotect()
15 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); fb_pgprotect()
H A Dcacheflush.h22 #define flush_cache_range(vma, start, end) do { } while (0)
23 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
24 #define flush_icache_page(vma,page) do { } while (0)
41 #define flush_icache_user_range(vma, page, user_addr, len) \
47 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
49 flush_icache_user_range(vma, page, vaddr, len); \
51 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
H A Dtlbflush.h69 extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end);
75 flush_tlb_page (struct vm_area_struct *vma, unsigned long addr) flush_tlb_page() argument
78 flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE); flush_tlb_page()
80 if (vma->vm_mm == current->active_mm) flush_tlb_page()
83 vma->vm_mm->context = 0; flush_tlb_page()
/linux-4.1.27/arch/metag/include/asm/
H A Dtlb.h11 #define tlb_start_vma(tlb, vma) \
14 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
17 #define tlb_end_vma(tlb, vma) \
20 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
26 #define tlb_start_vma(tlb, vma) do { } while (0)
27 #define tlb_end_vma(tlb, vma) do { } while (0)
H A Dtlbflush.h15 * - flush_tlb_page(vma, vmaddr) flushes one page
52 static inline void flush_tlb_page(struct vm_area_struct *vma, flush_tlb_page() argument
55 flush_tlb_mm(vma->vm_mm); flush_tlb_page()
58 static inline void flush_tlb_range(struct vm_area_struct *vma, flush_tlb_range() argument
61 flush_tlb_mm(vma->vm_mm); flush_tlb_range()
/linux-4.1.27/arch/x86/um/
H A Dmem_64.c5 const char *arch_vma_name(struct vm_area_struct *vma) arch_vma_name() argument
7 if (vma->vm_mm && vma->vm_start == um_vdso_addr) arch_vma_name()
H A Dmem_32.c48 struct vm_area_struct *vma = get_gate_vma(mm); in_gate_area() local
50 if (!vma) in_gate_area()
53 return (addr >= vma->vm_start) && (addr < vma->vm_end); in_gate_area()
/linux-4.1.27/arch/parisc/include/asm/
H A Dtlb.h9 #define tlb_start_vma(tlb, vma) \
11 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
14 #define tlb_end_vma(tlb, vma) \
16 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
H A Dfb.h8 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, fb_pgprotect() argument
11 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; fb_pgprotect()
H A Dcacheflush.h82 #define flush_icache_page(vma,page) do { \
92 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
94 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
99 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
101 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
105 void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn);
106 void flush_cache_range(struct vm_area_struct *vma,
114 flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) flush_anon_page() argument
117 flush_tlb_page(vma, vmaddr); flush_anon_page()
/linux-4.1.27/arch/powerpc/include/asm/
H A Dfb.h8 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, fb_pgprotect() argument
11 vma->vm_page_prot = phys_mem_access_prot(file, off >> PAGE_SHIFT, fb_pgprotect()
12 vma->vm_end - vma->vm_start, fb_pgprotect()
13 vma->vm_page_prot); fb_pgprotect()
H A Dtlbflush.h8 * - flush_tlb_page(vma, vmaddr) flushes one page
11 * - local_flush_tlb_page(vma, vmaddr) flushes one page on the local processor
12 * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
13 * - flush_tlb_range(vma, start, end) flushes a range of pages
37 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
42 extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
49 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
54 #define flush_tlb_page(vma,addr) local_flush_tlb_page(vma,addr)
57 #define flush_tlb_page_nohash(vma,addr) flush_tlb_page(vma,addr)
65 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
66 extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
67 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
70 static inline void local_flush_tlb_page(struct vm_area_struct *vma, local_flush_tlb_page() argument
73 flush_tlb_page(vma, vmaddr); local_flush_tlb_page()
142 static inline void local_flush_tlb_page(struct vm_area_struct *vma, local_flush_tlb_page() argument
147 static inline void flush_tlb_page(struct vm_area_struct *vma, flush_tlb_page() argument
152 static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, flush_tlb_page_nohash() argument
157 static inline void flush_tlb_range(struct vm_area_struct *vma, flush_tlb_range() argument
H A Dcacheflush.h22 #define flush_cache_range(vma, start, end) do { } while (0)
23 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
24 #define flush_icache_page(vma, page) do { } while (0)
36 extern void flush_icache_user_range(struct vm_area_struct *vma,
55 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
58 flush_icache_user_range(vma, page, vaddr, len); \
60 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
H A Dhugetlb.h86 void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
88 void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
136 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, huge_ptep_clear_flush() argument
140 pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); huge_ptep_clear_flush()
141 flush_tlb_page(vma, addr); huge_ptep_clear_flush()
154 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, huge_ptep_set_access_flags() argument
164 ptep_set_access_flags(vma, addr, ptep, pte, dirty); huge_ptep_set_access_flags()
167 return ptep_set_access_flags(vma, addr, ptep, pte, dirty); huge_ptep_set_access_flags()
190 static inline void flush_hugetlb_page(struct vm_area_struct *vma, flush_hugetlb_page() argument
H A Dtlb.h28 #define tlb_start_vma(tlb, vma) do { } while (0)
29 #define tlb_end_vma(tlb, vma) do { } while (0)
/linux-4.1.27/include/linux/
H A Dhugetlb_inline.h8 static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) is_vm_hugetlb_page() argument
10 return !!(vma->vm_flags & VM_HUGETLB); is_vm_hugetlb_page()
15 static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) is_vm_hugetlb_page() argument
H A Dhuge_mm.h5 struct vm_area_struct *vma,
10 struct vm_area_struct *vma);
12 struct vm_area_struct *vma,
15 extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
18 extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
23 struct vm_area_struct *vma,
25 extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
28 extern int move_huge_pmd(struct vm_area_struct *vma,
33 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
68 extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
101 extern void __split_huge_page_pmd(struct vm_area_struct *vma,
123 extern int hugepage_madvise(struct vm_area_struct *vma,
125 extern void __vma_adjust_trans_huge(struct vm_area_struct *vma,
129 extern int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
132 static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, pmd_trans_huge_lock() argument
135 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); pmd_trans_huge_lock()
137 return __pmd_trans_huge_lock(pmd, vma, ptl); pmd_trans_huge_lock()
141 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, vma_adjust_trans_huge() argument
146 if (!vma->anon_vma || vma->vm_ops) vma_adjust_trans_huge()
148 __vma_adjust_trans_huge(vma, start, end, adjust_next); vma_adjust_trans_huge()
157 extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
192 static inline int hugepage_madvise(struct vm_area_struct *vma, hugepage_madvise() argument
198 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, vma_adjust_trans_huge() argument
204 static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, pmd_trans_huge_lock() argument
210 static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, do_huge_pmd_numa_page() argument
H A Dkhugepaged.h9 extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
39 static inline int khugepaged_enter(struct vm_area_struct *vma, khugepaged_enter() argument
42 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags)) khugepaged_enter()
46 if (__khugepaged_enter(vma->vm_mm)) khugepaged_enter()
58 static inline int khugepaged_enter(struct vm_area_struct *vma, khugepaged_enter() argument
63 static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma, khugepaged_enter_vma_merge() argument
H A Drmap.h20 * directly to a vma: instead it points to an anon_vma, on whose list
23 * After unlinking the last vma on the list, we must garbage collect
25 * pointing to this anon_vma once its vma list is empty.
32 * guarantee that the vma of page tables will exist for
74 struct vm_area_struct *vma; member in struct:anon_vma_chain
138 static inline void anon_vma_merge(struct vm_area_struct *vma, anon_vma_merge() argument
141 VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma); anon_vma_merge()
196 * Used by swapoff to help locate where page is expected in vma.
219 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
225 * rmap_one: executed on each vma where page is mapped
228 * invalid_vma: for skipping uninterested vma
232 int (*rmap_one)(struct page *page, struct vm_area_struct *vma,
236 bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
244 #define anon_vma_prepare(vma) (0)
245 #define anon_vma_link(vma) do {} while (0)
H A Dmempolicy.h31 * its own state. All vma manipulation is somewhat protected by a down_read on
93 #define vma_policy(vma) ((vma)->vm_policy)
131 struct vm_area_struct *vma,
138 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
140 bool vma_policy_mof(struct vm_area_struct *vma);
148 extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
174 /* Check if a vma is migratable */ vma_migratable()
175 static inline int vma_migratable(struct vm_area_struct *vma) vma_migratable() argument
177 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) vma_migratable()
181 if (vma->vm_flags & VM_HUGETLB) vma_migratable()
190 if (vma->vm_file && vma_migratable()
191 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) vma_migratable()
231 #define vma_policy(vma) NULL
257 static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, huge_zonelist() argument
288 static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma, mpol_misplaced() argument
H A Dmmdebug.h13 void dump_vma(const struct vm_area_struct *vma);
25 #define VM_BUG_ON_VMA(cond, vma) \
28 dump_vma(vma); \
45 #define VM_BUG_ON_VMA(cond, vma) VM_BUG_ON(cond)
H A Dksm.h20 int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
52 * no problem, it will be assigned to this vma's anon_vma; but thereafter,
58 * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
59 * but what if the vma was unmerged while the page was swapped out?
62 struct vm_area_struct *vma, unsigned long address);
79 static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, ksm_madvise() argument
86 struct vm_area_struct *vma, unsigned long address) ksm_might_need_to_copy()
85 ksm_might_need_to_copy(struct page *page, struct vm_area_struct *vma, unsigned long address) ksm_might_need_to_copy() argument
H A Dhighmem.h14 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) flush_anon_page() argument
144 * @vma: The VMA the page is to be allocated for
157 struct vm_area_struct *vma,
161 vma, vaddr);
172 * @vma: The VMA the page is to be allocated for
179 alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
182 return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
225 unsigned long vaddr, struct vm_area_struct *vma) copy_user_highpage()
224 copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) copy_user_highpage() argument
/linux-4.1.27/arch/avr32/include/asm/
H A Dtlb.h11 #define tlb_start_vma(tlb, vma) \
12 flush_cache_range(vma, vma->vm_start, vma->vm_end)
14 #define tlb_end_vma(tlb, vma) \
15 flush_tlb_range(vma, vma->vm_start, vma->vm_end)
H A Dfb.h8 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, fb_pgprotect() argument
11 vma->vm_page_prot = __pgprot((pgprot_val(vma->vm_page_prot) fb_pgprotect()
H A Dtlbflush.h19 * - flush_tlb_page(vma, vmaddr) flushes one page
20 * - flush_tlb_range(vma, start, end) flushes a range of pages
26 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
28 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
H A Dcacheflush.h91 #define flush_cache_range(vma, start, end) do { } while (0)
92 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
101 * #define flush_icache_page(vma, page) do { } while (0)
103 extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
121 extern void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
125 static inline void copy_from_user_page(struct vm_area_struct *vma, copy_from_user_page() argument
/linux-4.1.27/drivers/media/v4l2-core/
H A Dvideobuf2-memops.c27 * @vma: given virtual memory area
36 struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma) vb2_get_vma() argument
44 if (vma->vm_ops && vma->vm_ops->open) vb2_get_vma()
45 vma->vm_ops->open(vma); vb2_get_vma()
47 if (vma->vm_file) vb2_get_vma()
48 get_file(vma->vm_file); vb2_get_vma()
50 memcpy(vma_copy, vma, sizeof(*vma)); vb2_get_vma()
62 * @vma: virtual memory region associated with the area to be released
67 void vb2_put_vma(struct vm_area_struct *vma) vb2_put_vma() argument
69 if (!vma) vb2_put_vma()
72 if (vma->vm_ops && vma->vm_ops->close) vb2_put_vma()
73 vma->vm_ops->close(vma); vb2_put_vma()
75 if (vma->vm_file) vb2_put_vma()
76 fput(vma->vm_file); vb2_put_vma()
78 kfree(vma); vb2_put_vma()
100 struct vm_area_struct *vma; vb2_get_contig_userptr() local
109 vma = find_vma(mm, start); vb2_get_contig_userptr()
111 if (vma == NULL || vma->vm_end < end) vb2_get_contig_userptr()
115 int ret = follow_pfn(vma, start, &this_pfn); vb2_get_contig_userptr()
128 * Memory is contigous, lock vma and return to the caller vb2_get_contig_userptr()
130 *res_vma = vb2_get_vma(vma); vb2_get_contig_userptr()
140 * vb2_common_vm_open() - increase refcount of the vma
141 * @vma: virtual memory region for the mapping
143 * This function adds another user to the provided vma. It expects
144 * struct vb2_vmarea_handler pointer in vma->vm_private_data.
146 static void vb2_common_vm_open(struct vm_area_struct *vma) vb2_common_vm_open() argument
148 struct vb2_vmarea_handler *h = vma->vm_private_data; vb2_common_vm_open()
150 pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n", vb2_common_vm_open()
151 __func__, h, atomic_read(h->refcount), vma->vm_start, vb2_common_vm_open()
152 vma->vm_end); vb2_common_vm_open()
158 * vb2_common_vm_close() - decrease refcount of the vma
159 * @vma: virtual memory region for the mapping
161 * This function releases the user from the provided vma. It expects
162 * struct vb2_vmarea_handler pointer in vma->vm_private_data.
164 static void vb2_common_vm_close(struct vm_area_struct *vma) vb2_common_vm_close() argument
166 struct vb2_vmarea_handler *h = vma->vm_private_data; vb2_common_vm_close()
168 pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n", vb2_common_vm_close()
169 __func__, h, atomic_read(h->refcount), vma->vm_start, vb2_common_vm_close()
170 vma->vm_end); vb2_common_vm_close()
H A Dvideobuf-vmalloc.c54 static void videobuf_vm_open(struct vm_area_struct *vma) videobuf_vm_open() argument
56 struct videobuf_mapping *map = vma->vm_private_data; videobuf_vm_open()
58 dprintk(2, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", map, videobuf_vm_open()
59 map->count, vma->vm_start, vma->vm_end); videobuf_vm_open()
64 static void videobuf_vm_close(struct vm_area_struct *vma) videobuf_vm_close() argument
66 struct videobuf_mapping *map = vma->vm_private_data; videobuf_vm_close()
70 dprintk(2, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", map, videobuf_vm_close()
71 map->count, vma->vm_start, vma->vm_end); videobuf_vm_close()
209 The code below won't work, since mem->vma = NULL __videobuf_iolock()
212 rc = remap_vmalloc_range(mem->vma, (void *)vb->baddr, 0); __videobuf_iolock()
234 struct vm_area_struct *vma) __videobuf_mmap_mapper()
250 buf->baddr = vma->vm_start; __videobuf_mmap_mapper()
256 pages = PAGE_ALIGN(vma->vm_end - vma->vm_start); __videobuf_mmap_mapper()
265 retval = remap_vmalloc_range(vma, mem->vaddr, 0); __videobuf_mmap_mapper()
272 vma->vm_ops = &videobuf_vm_ops; __videobuf_mmap_mapper()
273 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; __videobuf_mmap_mapper()
274 vma->vm_private_data = map; __videobuf_mmap_mapper()
277 map, q, vma->vm_start, vma->vm_end, __videobuf_mmap_mapper()
279 vma->vm_pgoff, buf->i); __videobuf_mmap_mapper()
281 videobuf_vm_open(vma); __videobuf_mmap_mapper()
232 __videobuf_mmap_mapper(struct videobuf_queue *q, struct videobuf_buffer *buf, struct vm_area_struct *vma) __videobuf_mmap_mapper() argument
H A Dvideobuf-dma-contig.c66 static void videobuf_vm_open(struct vm_area_struct *vma) videobuf_vm_open() argument
68 struct videobuf_mapping *map = vma->vm_private_data; videobuf_vm_open()
70 dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", videobuf_vm_open()
71 map, map->count, vma->vm_start, vma->vm_end); videobuf_vm_open()
76 static void videobuf_vm_close(struct vm_area_struct *vma) videobuf_vm_close() argument
78 struct videobuf_mapping *map = vma->vm_private_data; videobuf_vm_close()
82 dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", videobuf_vm_close()
83 map, map->count, vma->vm_start, vma->vm_end); videobuf_vm_close()
164 struct vm_area_struct *vma; videobuf_dma_contig_user_get() local
176 vma = find_vma(mm, vb->baddr); videobuf_dma_contig_user_get()
177 if (!vma) videobuf_dma_contig_user_get()
180 if ((vb->baddr + mem->size) > vma->vm_end) videobuf_dma_contig_user_get()
188 ret = follow_pfn(vma, user_address, &this_pfn); videobuf_dma_contig_user_get()
278 struct vm_area_struct *vma) __videobuf_mmap_mapper()
295 buf->baddr = vma->vm_start; __videobuf_mmap_mapper()
306 size = vma->vm_end - vma->vm_start; __videobuf_mmap_mapper()
307 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); __videobuf_mmap_mapper()
315 vma->vm_pgoff = 0; __videobuf_mmap_mapper()
317 retval = vm_iomap_memory(vma, mem->dma_handle, size); __videobuf_mmap_mapper()
326 vma->vm_ops = &videobuf_vm_ops; __videobuf_mmap_mapper()
327 vma->vm_flags |= VM_DONTEXPAND; __videobuf_mmap_mapper()
328 vma->vm_private_data = map; __videobuf_mmap_mapper()
331 map, q, vma->vm_start, vma->vm_end, __videobuf_mmap_mapper()
332 (long int)buf->bsize, vma->vm_pgoff, buf->i); __videobuf_mmap_mapper()
334 videobuf_vm_open(vma); __videobuf_mmap_mapper()
276 __videobuf_mmap_mapper(struct videobuf_queue *q, struct videobuf_buffer *buf, struct vm_area_struct *vma) __videobuf_mmap_mapper() argument
H A Dvideobuf2-dma-contig.c42 struct vm_area_struct *vma; member in struct:vb2_dc_buf
191 static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma) vb2_dc_mmap() argument
205 vma->vm_pgoff = 0; vb2_dc_mmap()
207 ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr, vb2_dc_mmap()
215 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; vb2_dc_mmap()
216 vma->vm_private_data = &buf->handler; vb2_dc_mmap()
217 vma->vm_ops = &vb2_common_vm_ops; vb2_dc_mmap()
219 vma->vm_ops->open(vma); vb2_dc_mmap()
222 __func__, (unsigned long)buf->dma_addr, vma->vm_start, vb2_dc_mmap()
362 struct vm_area_struct *vma) vb2_dc_dmabuf_ops_mmap()
364 return vb2_dc_mmap(dbuf->priv, vma); vb2_dc_dmabuf_ops_mmap()
432 static inline int vma_is_io(struct vm_area_struct *vma) vma_is_io() argument
434 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP)); vma_is_io()
438 struct vm_area_struct *vma, unsigned long *res) vb2_dc_get_user_pfn()
444 if (!vma_is_io(vma)) vb2_dc_get_user_pfn()
447 ret = follow_pfn(vma, start, &pfn); vb2_dc_get_user_pfn()
456 ret = follow_pfn(vma, start, &pfn); vb2_dc_get_user_pfn()
471 int n_pages, struct vm_area_struct *vma, vb2_dc_get_user_pages()
474 if (vma_is_io(vma)) { vb2_dc_get_user_pages()
479 int ret = follow_pfn(vma, start, &pfn); vb2_dc_get_user_pages()
529 if (!vma_is_io(buf->vma)) vb2_dc_put_userptr()
535 vb2_put_vma(buf->vma); vb2_dc_put_userptr()
581 struct vm_area_struct *vma; vb2_dc_get_userptr() local
620 vma = find_vma(current->mm, vaddr); vb2_dc_get_userptr()
621 if (!vma) { vb2_dc_get_userptr()
622 pr_err("no vma for address %lu\n", vaddr); vb2_dc_get_userptr()
627 if (vma->vm_end < vaddr + size) { vb2_dc_get_userptr()
628 pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size); vb2_dc_get_userptr()
633 buf->vma = vb2_get_vma(vma); vb2_dc_get_userptr()
634 if (!buf->vma) { vb2_dc_get_userptr()
635 pr_err("failed to copy vma\n"); vb2_dc_get_userptr()
641 ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, dma_dir); vb2_dc_get_userptr()
644 if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) { vb2_dc_get_userptr()
704 if (!vma_is_io(buf->vma)) vb2_dc_get_userptr()
712 if (pages && !vma_is_io(buf->vma)) vb2_dc_get_userptr()
717 vb2_put_vma(buf->vma); vb2_dc_get_userptr()
361 vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf, struct vm_area_struct *vma) vb2_dc_dmabuf_ops_mmap() argument
437 vb2_dc_get_user_pfn(unsigned long start, int n_pages, struct vm_area_struct *vma, unsigned long *res) vb2_dc_get_user_pfn() argument
470 vb2_dc_get_user_pages(unsigned long start, struct page **pages, int n_pages, struct vm_area_struct *vma, enum dma_data_direction dma_dir) vb2_dc_get_user_pages() argument
H A Dvideobuf2-vmalloc.c27 struct vm_area_struct *vma; member in struct:vb2_vmalloc_buf
81 struct vm_area_struct *vma; vb2_vmalloc_get_userptr() local
93 vma = find_vma(current->mm, vaddr); vb2_vmalloc_get_userptr()
94 if (vma && (vma->vm_flags & VM_PFNMAP) && (vma->vm_pgoff)) { vb2_vmalloc_get_userptr()
95 if (vb2_get_contig_userptr(vaddr, size, &vma, &physp)) vb2_vmalloc_get_userptr()
97 buf->vma = vma; vb2_vmalloc_get_userptr()
157 vb2_put_vma(buf->vma); vb2_vmalloc_put_userptr()
182 static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma) vb2_vmalloc_mmap() argument
192 ret = remap_vmalloc_range(vma, buf->vaddr, 0); vb2_vmalloc_mmap()
201 vma->vm_flags |= VM_DONTEXPAND; vb2_vmalloc_mmap()
206 vma->vm_private_data = &buf->handler; vb2_vmalloc_mmap()
207 vma->vm_ops = &vb2_common_vm_ops; vb2_vmalloc_mmap()
209 vma->vm_ops->open(vma); vb2_vmalloc_mmap()
350 struct vm_area_struct *vma) vb2_vmalloc_dmabuf_ops_mmap()
352 return vb2_vmalloc_mmap(dbuf->priv, vma); vb2_vmalloc_dmabuf_ops_mmap()
349 vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf, struct vm_area_struct *vma) vb2_vmalloc_dmabuf_ops_mmap() argument
H A Dvideobuf-dma-sg.c384 static void videobuf_vm_open(struct vm_area_struct *vma) videobuf_vm_open() argument
386 struct videobuf_mapping *map = vma->vm_private_data; videobuf_vm_open()
388 dprintk(2, "vm_open %p [count=%d,vma=%08lx-%08lx]\n", map, videobuf_vm_open()
389 map->count, vma->vm_start, vma->vm_end); videobuf_vm_open()
394 static void videobuf_vm_close(struct vm_area_struct *vma) videobuf_vm_close() argument
396 struct videobuf_mapping *map = vma->vm_private_data; videobuf_vm_close()
401 dprintk(2, "vm_close %p [count=%d,vma=%08lx-%08lx]\n", map, videobuf_vm_close()
402 map->count, vma->vm_start, vma->vm_end); videobuf_vm_close()
435 static int videobuf_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) videobuf_vm_fault() argument
439 dprintk(3, "fault: fault @ %08lx [vma %08lx-%08lx]\n", videobuf_vm_fault()
441 vma->vm_start, vma->vm_end); videobuf_vm_fault()
589 struct vm_area_struct *vma) __videobuf_mmap_mapper()
612 (vma->vm_pgoff << PAGE_SHIFT)); __videobuf_mmap_mapper()
629 q->bufs[i]->baddr = vma->vm_start + size; __videobuf_mmap_mapper()
635 vma->vm_ops = &videobuf_vm_ops; __videobuf_mmap_mapper()
636 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; __videobuf_mmap_mapper()
637 vma->vm_flags &= ~VM_IO; /* using shared anonymous pages */ __videobuf_mmap_mapper()
638 vma->vm_private_data = map; __videobuf_mmap_mapper()
640 map, q, vma->vm_start, vma->vm_end, vma->vm_pgoff, first, last); __videobuf_mmap_mapper()
587 __videobuf_mmap_mapper(struct videobuf_queue *q, struct videobuf_buffer *buf, struct vm_area_struct *vma) __videobuf_mmap_mapper() argument
/linux-4.1.27/fs/ocfs2/
H A Dmmap.h4 int ocfs2_mmap(struct file *file, struct vm_area_struct *vma);
/linux-4.1.27/arch/xtensa/include/asm/
H A Dtlb.h21 # define tlb_start_vma(tlb,vma) do { } while (0)
22 # define tlb_end_vma(tlb,vma) do { } while (0)
26 # define tlb_start_vma(tlb, vma) \
29 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
32 # define tlb_end_vma(tlb, vma) \
35 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
/linux-4.1.27/drivers/gpu/drm/
H A Ddrm_vm.c49 struct vm_area_struct *vma; member in struct:drm_vma_entry
53 static void drm_vm_open(struct vm_area_struct *vma);
54 static void drm_vm_close(struct vm_area_struct *vma);
57 struct vm_area_struct *vma) drm_io_prot()
59 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); drm_io_prot()
67 if (efi_range_is_wc(vma->vm_start, vma->vm_end - drm_io_prot()
68 vma->vm_start)) drm_io_prot()
78 static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma) drm_dma_prot() argument
80 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); drm_dma_prot()
91 * \param vma virtual memory area.
99 static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) drm_do_vm_fault() argument
101 struct drm_file *priv = vma->vm_file->private_data; drm_do_vm_fault()
116 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) drm_do_vm_fault()
128 vma->vm_start; drm_do_vm_fault()
172 static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) drm_do_vm_fault() argument
181 * \param vma virtual memory area.
188 static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) drm_do_vm_shm_fault() argument
190 struct drm_local_map *map = vma->vm_private_data; drm_do_vm_shm_fault()
198 offset = (unsigned long)vmf->virtual_address - vma->vm_start; drm_do_vm_shm_fault()
213 * \param vma virtual memory area.
218 static void drm_vm_shm_close(struct vm_area_struct *vma) drm_vm_shm_close() argument
220 struct drm_file *priv = vma->vm_file->private_data; drm_vm_shm_close()
228 vma->vm_start, vma->vm_end - vma->vm_start); drm_vm_shm_close()
230 map = vma->vm_private_data; drm_vm_shm_close()
234 if (pt->vma->vm_private_data == map) drm_vm_shm_close()
236 if (pt->vma == vma) { drm_vm_shm_close()
284 * \param vma virtual memory area.
290 static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) drm_do_vm_dma_fault() argument
292 struct drm_file *priv = vma->vm_file->private_data; drm_do_vm_dma_fault()
304 offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */ drm_do_vm_dma_fault()
318 * \param vma virtual memory area.
324 static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf) drm_do_vm_sg_fault() argument
326 struct drm_local_map *map = vma->vm_private_data; drm_do_vm_sg_fault()
327 struct drm_file *priv = vma->vm_file->private_data; drm_do_vm_sg_fault()
340 offset = (unsigned long)vmf->virtual_address - vma->vm_start; drm_do_vm_sg_fault()
350 static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) drm_vm_fault() argument
352 return drm_do_vm_fault(vma, vmf); drm_vm_fault()
355 static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) drm_vm_shm_fault() argument
357 return drm_do_vm_shm_fault(vma, vmf); drm_vm_shm_fault()
360 static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) drm_vm_dma_fault() argument
362 return drm_do_vm_dma_fault(vma, vmf); drm_vm_dma_fault()
365 static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf) drm_vm_sg_fault() argument
367 return drm_do_vm_sg_fault(vma, vmf); drm_vm_sg_fault()
401 * \param vma virtual memory area.
403 * Create a new drm_vma_entry structure as the \p vma private data entry and
407 struct vm_area_struct *vma) drm_vm_open_locked()
412 vma->vm_start, vma->vm_end - vma->vm_start); drm_vm_open_locked()
416 vma_entry->vma = vma; drm_vm_open_locked()
422 static void drm_vm_open(struct vm_area_struct *vma) drm_vm_open() argument
424 struct drm_file *priv = vma->vm_file->private_data; drm_vm_open()
428 drm_vm_open_locked(dev, vma); drm_vm_open()
433 struct vm_area_struct *vma) drm_vm_close_locked()
438 vma->vm_start, vma->vm_end - vma->vm_start); drm_vm_close_locked()
441 if (pt->vma == vma) { drm_vm_close_locked()
452 * \param vma virtual memory area.
454 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
457 static void drm_vm_close(struct vm_area_struct *vma) drm_vm_close() argument
459 struct drm_file *priv = vma->vm_file->private_data; drm_vm_close()
463 drm_vm_close_locked(dev, vma); drm_vm_close()
471 * \param vma virtual memory area.
477 static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) drm_mmap_dma() argument
482 unsigned long length = vma->vm_end - vma->vm_start; drm_mmap_dma()
487 vma->vm_start, vma->vm_end, vma->vm_pgoff); drm_mmap_dma()
496 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE); drm_mmap_dma()
498 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW; drm_mmap_dma()
503 vma->vm_page_prot = drm_mmap_dma()
506 (__pte(pgprot_val(vma->vm_page_prot))))); drm_mmap_dma()
510 vma->vm_ops = &drm_vm_dma_ops; drm_mmap_dma()
512 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; drm_mmap_dma()
514 drm_vm_open_locked(dev, vma); drm_mmap_dma()
531 * \param vma virtual memory area.
540 static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) drm_mmap_locked() argument
549 vma->vm_start, vma->vm_end, vma->vm_pgoff); drm_mmap_locked()
558 if (!vma->vm_pgoff drm_mmap_locked()
564 return drm_mmap_dma(filp, vma); drm_mmap_locked()
566 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) { drm_mmap_locked()
576 if (map->size < vma->vm_end - vma->vm_start) drm_mmap_locked()
580 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE); drm_mmap_locked()
582 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW; drm_mmap_locked()
587 vma->vm_page_prot = drm_mmap_locked()
590 (__pte(pgprot_val(vma->vm_page_prot))))); drm_mmap_locked()
604 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; drm_mmap_locked()
606 vma->vm_ops = &drm_vm_ops; drm_mmap_locked()
614 vma->vm_page_prot = drm_io_prot(map, vma); drm_mmap_locked()
615 if (io_remap_pfn_range(vma, vma->vm_start, drm_mmap_locked()
617 vma->vm_end - vma->vm_start, drm_mmap_locked()
618 vma->vm_page_prot)) drm_mmap_locked()
623 vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset)); drm_mmap_locked()
625 vma->vm_ops = &drm_vm_ops; drm_mmap_locked()
630 if (remap_pfn_range(vma, vma->vm_start, drm_mmap_locked()
632 vma->vm_end - vma->vm_start, vma->vm_page_prot)) drm_mmap_locked()
634 vma->vm_page_prot = drm_dma_prot(map->type, vma); drm_mmap_locked()
637 vma->vm_ops = &drm_vm_shm_ops; drm_mmap_locked()
638 vma->vm_private_data = (void *)map; drm_mmap_locked()
641 vma->vm_ops = &drm_vm_sg_ops; drm_mmap_locked()
642 vma->vm_private_data = (void *)map; drm_mmap_locked()
643 vma->vm_page_prot = drm_dma_prot(map->type, vma); drm_mmap_locked()
648 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; drm_mmap_locked()
650 drm_vm_open_locked(dev, vma); drm_mmap_locked()
654 int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma) drm_legacy_mmap() argument
664 ret = drm_mmap_locked(filp, vma); drm_legacy_mmap()
673 struct drm_vma_entry *vma, *vma_temp; drm_legacy_vma_flush() local
675 /* Clear vma list (only needed for legacy drivers) */ drm_legacy_vma_flush()
676 list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) { drm_legacy_vma_flush()
677 list_del(&vma->head); drm_legacy_vma_flush()
678 kfree(vma); drm_legacy_vma_flush()
687 struct vm_area_struct *vma; drm_vma_info() local
697 seq_printf(m, "vma use count: %lu, high_memory = %pK, 0x%pK\n", drm_vma_info()
702 vma = pt->vma; drm_vma_info()
703 if (!vma) drm_vma_info()
708 (void *)vma->vm_start, (void *)vma->vm_end, drm_vma_info()
709 vma->vm_flags & VM_READ ? 'r' : '-', drm_vma_info()
710 vma->vm_flags & VM_WRITE ? 'w' : '-', drm_vma_info()
711 vma->vm_flags & VM_EXEC ? 'x' : '-', drm_vma_info()
712 vma->vm_flags & VM_MAYSHARE ? 's' : 'p', drm_vma_info()
713 vma->vm_flags & VM_LOCKED ? 'l' : '-', drm_vma_info()
714 vma->vm_flags & VM_IO ? 'i' : '-', drm_vma_info()
715 vma->vm_pgoff); drm_vma_info()
718 pgprot = pgprot_val(vma->vm_page_prot); drm_vma_info()
56 drm_io_prot(struct drm_local_map *map, struct vm_area_struct *vma) drm_io_prot() argument
406 drm_vm_open_locked(struct drm_device *dev, struct vm_area_struct *vma) drm_vm_open_locked() argument
432 drm_vm_close_locked(struct drm_device *dev, struct vm_area_struct *vma) drm_vm_close_locked() argument
/linux-4.1.27/arch/arc/include/asm/
H A Dtlb.h27 #define tlb_start_vma(tlb, vma)
29 #define tlb_start_vma(tlb, vma) \
32 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
36 #define tlb_end_vma(tlb, vma) \
39 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
H A Dtlbflush.h16 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
18 void local_flush_tlb_range(struct vm_area_struct *vma,
22 #define flush_tlb_range(vma, s, e) local_flush_tlb_range(vma, s, e)
23 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
28 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
30 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
H A Dcacheflush.h30 #define flush_icache_page(vma, page)
62 #define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */
68 void flush_cache_range(struct vm_area_struct *vma,
70 void flush_cache_page(struct vm_area_struct *vma,
78 void flush_anon_page(struct vm_area_struct *vma,
110 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
113 if (vma->vm_flags & VM_EXEC) \
117 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
/linux-4.1.27/mm/
H A Dmremap.c52 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, alloc_new_pmd() argument
88 static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, move_ptes() argument
95 struct mm_struct *mm = vma->vm_mm; move_ptes()
108 * - During exec() shift_arg_pages(), we use a specially tagged vma move_ptes()
111 * - During mremap(), new_vma is often known to be placed after vma move_ptes()
118 if (vma->vm_file) { move_ptes()
119 mapping = vma->vm_file->f_mapping; move_ptes()
122 if (vma->anon_vma) { move_ptes()
123 anon_vma = vma->anon_vma; move_ptes()
162 unsigned long move_page_tables(struct vm_area_struct *vma, move_page_tables() argument
174 flush_cache_range(vma, old_addr, old_end); move_page_tables()
178 mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); move_page_tables()
187 old_pmd = get_old_pmd(vma->vm_mm, old_addr); move_page_tables()
190 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); move_page_tables()
196 VM_BUG_ON_VMA(vma->vm_file || !vma->anon_vma, move_page_tables()
197 vma); move_page_tables()
200 anon_vma_lock_write(vma->anon_vma); move_page_tables()
201 err = move_huge_pmd(vma, new_vma, old_addr, move_page_tables()
205 anon_vma_unlock_write(vma->anon_vma); move_page_tables()
211 split_huge_page_pmd(vma, old_addr, old_pmd); move_page_tables()
223 move_ptes(vma, old_pmd, old_addr, old_addr + extent, move_page_tables()
228 flush_tlb_range(vma, old_end-len, old_addr); move_page_tables()
230 mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); move_page_tables()
235 static unsigned long move_vma(struct vm_area_struct *vma, move_vma() argument
239 struct mm_struct *mm = vma->vm_mm; move_vma()
241 unsigned long vm_flags = vma->vm_flags; move_vma()
252 * which may split one vma into three before unmapping. move_vma()
261 * pages recently unmapped. But leave vma->vm_flags as it was, move_vma()
262 * so KSM can come around to merge on vma and new_vma afterwards. move_vma()
264 err = ksm_madvise(vma, old_addr, old_addr + old_len, move_vma()
269 new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); move_vma()
270 new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff, move_vma()
275 moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len, move_vma()
283 move_page_tables(new_vma, new_addr, vma, old_addr, moved_len, move_vma()
285 vma = new_vma; move_vma()
289 } else if (vma->vm_file && vma->vm_file->f_op->mremap) { move_vma()
290 err = vma->vm_file->f_op->mremap(vma->vm_file, new_vma); move_vma()
292 move_page_tables(new_vma, new_addr, vma, old_addr, move_vma()
300 vma->vm_flags &= ~VM_ACCOUNT; move_vma()
301 excess = vma->vm_end - vma->vm_start - old_len; move_vma()
302 if (old_addr > vma->vm_start && move_vma()
303 old_addr + old_len < vma->vm_end) move_vma()
317 vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT); move_vma()
320 /* OOM: unable to split vma, just get accounts right */ move_vma()
326 /* Restore VM_ACCOUNT if one or two pieces of vma left */ move_vma()
328 vma->vm_flags |= VM_ACCOUNT; move_vma()
330 vma->vm_next->vm_flags |= VM_ACCOUNT; move_vma()
345 struct vm_area_struct *vma = find_vma(mm, addr); vma_to_resize() local
347 if (!vma || vma->vm_start > addr) vma_to_resize()
350 if (is_vm_hugetlb_page(vma)) vma_to_resize()
354 if (old_len > vma->vm_end - addr) vma_to_resize()
361 if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) vma_to_resize()
363 pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; vma_to_resize()
364 pgoff += vma->vm_pgoff; vma_to_resize()
369 if (vma->vm_flags & VM_LOCKED) { vma_to_resize()
381 if (vma->vm_flags & VM_ACCOUNT) { vma_to_resize()
388 return vma; vma_to_resize()
395 struct vm_area_struct *vma; mremap_to() local
426 vma = vma_to_resize(addr, old_len, new_len, &charged); mremap_to()
427 if (IS_ERR(vma)) { mremap_to()
428 ret = PTR_ERR(vma); mremap_to()
433 if (vma->vm_flags & VM_MAYSHARE) mremap_to()
436 ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff + mremap_to()
437 ((addr - vma->vm_start) >> PAGE_SHIFT), mremap_to()
442 ret = move_vma(vma, addr, old_len, new_len, new_addr, locked); mremap_to()
452 static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) vma_expandable() argument
454 unsigned long end = vma->vm_end + delta; vma_expandable()
455 if (end < vma->vm_end) /* overflow */ vma_expandable()
457 if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */ vma_expandable()
459 if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start, vma_expandable()
477 struct vm_area_struct *vma; SYSCALL_DEFINE5() local
526 vma = vma_to_resize(addr, old_len, new_len, &charged); SYSCALL_DEFINE5()
527 if (IS_ERR(vma)) { SYSCALL_DEFINE5()
528 ret = PTR_ERR(vma); SYSCALL_DEFINE5()
534 if (old_len == vma->vm_end - addr) { SYSCALL_DEFINE5()
536 if (vma_expandable(vma, new_len - old_len)) { SYSCALL_DEFINE5()
539 if (vma_adjust(vma, vma->vm_start, addr + new_len, SYSCALL_DEFINE5()
540 vma->vm_pgoff, NULL)) { SYSCALL_DEFINE5()
545 vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages); SYSCALL_DEFINE5()
546 if (vma->vm_flags & VM_LOCKED) { SYSCALL_DEFINE5()
563 if (vma->vm_flags & VM_MAYSHARE) SYSCALL_DEFINE5()
566 new_addr = get_unmapped_area(vma->vm_file, 0, new_len, SYSCALL_DEFINE5()
567 vma->vm_pgoff + SYSCALL_DEFINE5()
568 ((addr - vma->vm_start) >> PAGE_SHIFT), SYSCALL_DEFINE5()
575 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked); SYSCALL_DEFINE5()
H A Dmsync.c35 struct vm_area_struct *vma; SYSCALL_DEFINE3() local
58 vma = find_vma(mm, start); SYSCALL_DEFINE3()
65 if (!vma) SYSCALL_DEFINE3()
67 /* Here start < vma->vm_end. */ SYSCALL_DEFINE3()
68 if (start < vma->vm_start) { SYSCALL_DEFINE3()
69 start = vma->vm_start; SYSCALL_DEFINE3()
74 /* Here vma->vm_start <= start < vma->vm_end. */ SYSCALL_DEFINE3()
76 (vma->vm_flags & VM_LOCKED)) { SYSCALL_DEFINE3()
80 file = vma->vm_file; SYSCALL_DEFINE3()
81 fstart = (start - vma->vm_start) + SYSCALL_DEFINE3()
82 ((loff_t)vma->vm_pgoff << PAGE_SHIFT); SYSCALL_DEFINE3()
83 fend = fstart + (min(end, vma->vm_end) - start) - 1; SYSCALL_DEFINE3()
84 start = vma->vm_end; SYSCALL_DEFINE3()
86 (vma->vm_flags & VM_SHARED)) { SYSCALL_DEFINE3()
94 vma = find_vma(mm, start); SYSCALL_DEFINE3()
100 vma = vma->vm_next; SYSCALL_DEFINE3()
H A Dmprotect.c39 static pte_t *lock_pte_protection(struct vm_area_struct *vma, pmd_t *pmd, lock_pte_protection() argument
47 return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); lock_pte_protection()
49 pmdl = pmd_lock(vma->vm_mm, pmd); lock_pte_protection()
55 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); lock_pte_protection()
60 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, change_pte_range() argument
64 struct mm_struct *mm = vma->vm_mm; change_pte_range()
69 pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl); change_pte_range()
87 page = vm_normal_page(vma, addr, oldpte); change_pte_range()
104 !(vma->vm_flags & VM_SOFTDIRTY))) { change_pte_range()
134 static inline unsigned long change_pmd_range(struct vm_area_struct *vma, change_pmd_range() argument
139 struct mm_struct *mm = vma->vm_mm; change_pmd_range()
161 split_huge_page_pmd(vma, addr, pmd); change_pmd_range()
163 int nr_ptes = change_huge_pmd(vma, pmd, addr, change_pmd_range()
178 this_pages = change_pte_range(vma, pmd, addr, next, newprot, change_pmd_range()
191 static inline unsigned long change_pud_range(struct vm_area_struct *vma, change_pud_range() argument
204 pages += change_pmd_range(vma, pud, addr, next, newprot, change_pud_range()
211 static unsigned long change_protection_range(struct vm_area_struct *vma, change_protection_range() argument
215 struct mm_struct *mm = vma->vm_mm; change_protection_range()
223 flush_cache_range(vma, addr, end); change_protection_range()
229 pages += change_pud_range(vma, pgd, addr, next, newprot, change_protection_range()
235 flush_tlb_range(vma, start, end); change_protection_range()
241 unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, change_protection() argument
247 if (is_vm_hugetlb_page(vma)) change_protection()
248 pages = hugetlb_change_protection(vma, start, end, newprot); change_protection()
250 pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa); change_protection()
256 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, mprotect_fixup() argument
259 struct mm_struct *mm = vma->vm_mm; mprotect_fixup()
260 unsigned long oldflags = vma->vm_flags; mprotect_fixup()
268 *pprev = vma; mprotect_fixup()
289 * First try to merge with previous and/or next vma. mprotect_fixup()
291 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); mprotect_fixup()
293 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma)); mprotect_fixup()
295 vma = *pprev; mprotect_fixup()
299 *pprev = vma; mprotect_fixup()
301 if (start != vma->vm_start) { mprotect_fixup()
302 error = split_vma(mm, vma, start, 1); mprotect_fixup()
307 if (end != vma->vm_end) { mprotect_fixup()
308 error = split_vma(mm, vma, end, 0); mprotect_fixup()
318 vma->vm_flags = newflags; mprotect_fixup()
319 dirty_accountable = vma_wants_writenotify(vma); mprotect_fixup()
320 vma_set_page_prot(vma); mprotect_fixup()
322 change_protection(vma, start, end, vma->vm_page_prot, mprotect_fixup()
325 vm_stat_account(mm, oldflags, vma->vm_file, -nrpages); mprotect_fixup()
326 vm_stat_account(mm, newflags, vma->vm_file, nrpages); mprotect_fixup()
327 perf_event_mmap(vma); mprotect_fixup()
339 struct vm_area_struct *vma, *prev; SYSCALL_DEFINE3() local
368 vma = find_vma(current->mm, start); SYSCALL_DEFINE3()
370 if (!vma) SYSCALL_DEFINE3()
372 prev = vma->vm_prev; SYSCALL_DEFINE3()
374 if (vma->vm_start >= end) SYSCALL_DEFINE3()
376 start = vma->vm_start; SYSCALL_DEFINE3()
378 if (!(vma->vm_flags & VM_GROWSDOWN)) SYSCALL_DEFINE3()
381 if (vma->vm_start > start) SYSCALL_DEFINE3()
384 end = vma->vm_end; SYSCALL_DEFINE3()
386 if (!(vma->vm_flags & VM_GROWSUP)) SYSCALL_DEFINE3()
390 if (start > vma->vm_start) SYSCALL_DEFINE3()
391 prev = vma; SYSCALL_DEFINE3()
396 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ SYSCALL_DEFINE3()
399 newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC)); SYSCALL_DEFINE3()
407 error = security_file_mprotect(vma, reqprot, prot); SYSCALL_DEFINE3()
411 tmp = vma->vm_end; SYSCALL_DEFINE3()
414 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); SYSCALL_DEFINE3()
424 vma = prev->vm_next; SYSCALL_DEFINE3()
425 if (!vma || vma->vm_start != nstart) { SYSCALL_DEFINE3()
H A Dpagewalk.c38 if (pmd_none(*pmd) || !walk->vma) { walk_pmd_range()
135 struct vm_area_struct *vma = walk->vma; walk_hugetlb_range() local
136 struct hstate *h = hstate_vma(vma); walk_hugetlb_range()
164 * Decide whether we really walk over the current vma on [@start, @end)
166 * current vma, and return 1 if we skip the vma. Negative values means
172 struct vm_area_struct *vma = walk->vma; walk_page_test() local
178 * vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP walk_page_test()
183 * vma(VM_PFNMAP). walk_page_test()
185 if (vma->vm_flags & VM_PFNMAP) { walk_page_test()
198 struct vm_area_struct *vma = walk->vma; __walk_page_range() local
200 if (vma && is_vm_hugetlb_page(vma)) { __walk_page_range()
226 * they really want to walk over the current vma, typically by checking
230 * struct mm_walk keeps current values of some common data like vma and pmd,
236 * @walk->mm->mmap_sem, because these function traverse vma list and/or
237 * access to vma's data.
244 struct vm_area_struct *vma; walk_page_range() local
254 vma = find_vma(walk->mm, start); walk_page_range()
256 if (!vma) { /* after the last vma */ walk_page_range()
257 walk->vma = NULL; walk_page_range()
259 } else if (start < vma->vm_start) { /* outside vma */ walk_page_range()
260 walk->vma = NULL; walk_page_range()
261 next = min(end, vma->vm_start); walk_page_range()
262 } else { /* inside vma */ walk_page_range()
263 walk->vma = vma; walk_page_range()
264 next = min(end, vma->vm_end); walk_page_range()
265 vma = vma->vm_next; walk_page_range()
280 if (walk->vma || walk->pte_hole) walk_page_range()
288 int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk) walk_page_vma() argument
296 VM_BUG_ON(!vma); walk_page_vma()
297 walk->vma = vma; walk_page_vma()
298 err = walk_page_test(vma->vm_start, vma->vm_end, walk); walk_page_vma()
303 return __walk_page_range(vma->vm_start, vma->vm_end, walk); walk_page_vma()
H A Dmadvise.c24 * Any behaviour which results in changes to the vma->vm_flags needs to
45 static long madvise_behavior(struct vm_area_struct *vma, madvise_behavior() argument
49 struct mm_struct *mm = vma->vm_mm; madvise_behavior()
52 unsigned long new_flags = vma->vm_flags; madvise_behavior()
68 if (vma->vm_flags & VM_IO) { madvise_behavior()
86 error = ksm_madvise(vma, start, end, behavior, &new_flags); madvise_behavior()
92 error = hugepage_madvise(vma, &new_flags, behavior); madvise_behavior()
98 if (new_flags == vma->vm_flags) { madvise_behavior()
99 *prev = vma; madvise_behavior()
103 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); madvise_behavior()
104 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, madvise_behavior()
105 vma->vm_file, pgoff, vma_policy(vma)); madvise_behavior()
107 vma = *prev; madvise_behavior()
111 *prev = vma; madvise_behavior()
113 if (start != vma->vm_start) { madvise_behavior()
114 error = split_vma(mm, vma, start, 1); madvise_behavior()
119 if (end != vma->vm_end) { madvise_behavior()
120 error = split_vma(mm, vma, end, 0); madvise_behavior()
129 vma->vm_flags = new_flags; madvise_behavior()
142 struct vm_area_struct *vma = walk->private; swapin_walk_pmd_entry() local
154 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); swapin_walk_pmd_entry()
165 vma, index); swapin_walk_pmd_entry()
173 static void force_swapin_readahead(struct vm_area_struct *vma, force_swapin_readahead() argument
177 .mm = vma->vm_mm, force_swapin_readahead()
179 .private = vma, force_swapin_readahead()
187 static void force_shm_swapin_readahead(struct vm_area_struct *vma, force_shm_swapin_readahead() argument
196 index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; force_shm_swapin_readahead()
218 static long madvise_willneed(struct vm_area_struct *vma, madvise_willneed() argument
222 struct file *file = vma->vm_file; madvise_willneed()
226 *prev = vma; madvise_willneed()
227 force_swapin_readahead(vma, start, end); madvise_willneed()
232 *prev = vma; madvise_willneed()
233 force_shm_swapin_readahead(vma, start, end, madvise_willneed()
247 *prev = vma; madvise_willneed()
248 start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; madvise_willneed()
249 if (end > vma->vm_end) madvise_willneed()
250 end = vma->vm_end; madvise_willneed()
251 end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; madvise_willneed()
276 static long madvise_dontneed(struct vm_area_struct *vma, madvise_dontneed() argument
280 *prev = vma; madvise_dontneed()
281 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)) madvise_dontneed()
284 zap_page_range(vma, start, end - start, NULL); madvise_dontneed()
292 static long madvise_remove(struct vm_area_struct *vma, madvise_remove() argument
302 if (vma->vm_flags & (VM_LOCKED | VM_HUGETLB)) madvise_remove()
305 f = vma->vm_file; madvise_remove()
311 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)) madvise_remove()
314 offset = (loff_t)(start - vma->vm_start) madvise_remove()
315 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); madvise_remove()
319 * explicitly grab a reference because the vma (and hence the madvise_remove()
320 * vma's reference to the file) can go away as soon as we drop madvise_remove()
372 madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, madvise_vma() argument
377 return madvise_remove(vma, prev, start, end); madvise_vma()
379 return madvise_willneed(vma, prev, start, end); madvise_vma()
381 return madvise_dontneed(vma, prev, start, end); madvise_vma()
383 return madvise_behavior(vma, prev, start, end, behavior); madvise_vma()
461 struct vm_area_struct *vma, *prev; SYSCALL_DEFINE3() local
502 vma = find_vma_prev(current->mm, start, &prev); SYSCALL_DEFINE3()
503 if (vma && start > vma->vm_start) SYSCALL_DEFINE3()
504 prev = vma; SYSCALL_DEFINE3()
510 if (!vma) SYSCALL_DEFINE3()
513 /* Here start < (end|vma->vm_end). */ SYSCALL_DEFINE3()
514 if (start < vma->vm_start) { SYSCALL_DEFINE3()
516 start = vma->vm_start; SYSCALL_DEFINE3()
521 /* Here vma->vm_start <= start < (end|vma->vm_end) */ SYSCALL_DEFINE3()
522 tmp = vma->vm_end; SYSCALL_DEFINE3()
526 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */ SYSCALL_DEFINE3()
527 error = madvise_vma(vma, &prev, start, tmp, behavior); SYSCALL_DEFINE3()
537 vma = prev->vm_next; SYSCALL_DEFINE3()
539 vma = find_vma(current->mm, start); SYSCALL_DEFINE3()
H A Dmmap.c61 struct vm_area_struct *vma, struct vm_area_struct *prev,
97 /* Update vma->vm_page_prot to reflect vma->vm_flags. */ vma_set_page_prot()
98 void vma_set_page_prot(struct vm_area_struct *vma) vma_set_page_prot() argument
100 unsigned long vm_flags = vma->vm_flags; vma_set_page_prot()
102 vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); vma_set_page_prot()
103 if (vma_wants_writenotify(vma)) { vma_set_page_prot()
105 vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vma_set_page_prot()
237 static void __remove_shared_vm_struct(struct vm_area_struct *vma, __remove_shared_vm_struct() argument
240 if (vma->vm_flags & VM_DENYWRITE) __remove_shared_vm_struct()
242 if (vma->vm_flags & VM_SHARED) __remove_shared_vm_struct()
246 vma_interval_tree_remove(vma, &mapping->i_mmap); __remove_shared_vm_struct()
252 * vma from rmap and vmtruncate before freeing its page tables.
254 void unlink_file_vma(struct vm_area_struct *vma) unlink_file_vma() argument
256 struct file *file = vma->vm_file; unlink_file_vma()
261 __remove_shared_vm_struct(vma, file, mapping); unlink_file_vma()
269 static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) remove_vma() argument
271 struct vm_area_struct *next = vma->vm_next; remove_vma()
274 if (vma->vm_ops && vma->vm_ops->close) remove_vma()
275 vma->vm_ops->close(vma); remove_vma()
276 if (vma->vm_file) remove_vma()
277 fput(vma->vm_file); remove_vma()
278 mpol_put(vma_policy(vma)); remove_vma()
279 kmem_cache_free(vm_area_cachep, vma); remove_vma()
355 static long vma_compute_subtree_gap(struct vm_area_struct *vma) vma_compute_subtree_gap() argument
358 max = vma->vm_start; vma_compute_subtree_gap()
359 if (vma->vm_prev) vma_compute_subtree_gap()
360 max -= vma->vm_prev->vm_end; vma_compute_subtree_gap()
361 if (vma->vm_rb.rb_left) { vma_compute_subtree_gap()
362 subtree_gap = rb_entry(vma->vm_rb.rb_left, vma_compute_subtree_gap()
367 if (vma->vm_rb.rb_right) { vma_compute_subtree_gap()
368 subtree_gap = rb_entry(vma->vm_rb.rb_right, vma_compute_subtree_gap()
384 struct vm_area_struct *vma; browse_rb() local
385 vma = rb_entry(nd, struct vm_area_struct, vm_rb); browse_rb()
386 if (vma->vm_start < prev) { browse_rb()
388 vma->vm_start, prev); browse_rb()
391 if (vma->vm_start < pend) { browse_rb()
393 vma->vm_start, pend); browse_rb()
396 if (vma->vm_start > vma->vm_end) { browse_rb()
398 vma->vm_start, vma->vm_end); browse_rb()
401 if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) { browse_rb()
403 vma->rb_subtree_gap, browse_rb()
404 vma_compute_subtree_gap(vma)); browse_rb()
409 prev = vma->vm_start; browse_rb()
410 pend = vma->vm_end; browse_rb()
427 struct vm_area_struct *vma; validate_mm_rb() local
428 vma = rb_entry(nd, struct vm_area_struct, vm_rb); validate_mm_rb()
429 VM_BUG_ON_VMA(vma != ignore && validate_mm_rb()
430 vma->rb_subtree_gap != vma_compute_subtree_gap(vma), validate_mm_rb()
431 vma); validate_mm_rb()
440 struct vm_area_struct *vma = mm->mmap; validate_mm() local
442 while (vma) { validate_mm()
443 struct anon_vma *anon_vma = vma->anon_vma; validate_mm()
448 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) validate_mm()
453 highest_address = vma->vm_end; validate_mm()
454 vma = vma->vm_next; validate_mm()
483 * Update augmented rbtree rb_subtree_gap values after vma->vm_start or
484 * vma->vm_prev->vm_end values changed, without modifying the vma's position
487 static void vma_gap_update(struct vm_area_struct *vma) vma_gap_update() argument
493 vma_gap_callbacks_propagate(&vma->vm_rb, NULL); vma_gap_update()
496 static inline void vma_rb_insert(struct vm_area_struct *vma, vma_rb_insert() argument
502 rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks); vma_rb_insert()
505 static void vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root) vma_rb_erase() argument
509 * with the possible exception of the vma being erased. vma_rb_erase()
511 validate_mm_rb(root, vma); vma_rb_erase()
518 rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks); vma_rb_erase()
522 * vma has some anon_vma assigned, and is already inserted on that
525 * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
526 * vma must be removed from the anon_vma's interval trees using
529 * After the update, the vma will be reinserted using
536 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma) anon_vma_interval_tree_pre_update_vma() argument
540 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) anon_vma_interval_tree_pre_update_vma()
545 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma) anon_vma_interval_tree_post_update_vma() argument
549 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) anon_vma_interval_tree_post_update_vma()
569 /* Fail if an existing vma overlaps the area */ find_vma_links()
591 struct vm_area_struct *vma; count_vma_pages_range() local
594 vma = find_vma_intersection(mm, addr, end); count_vma_pages_range()
595 if (!vma) count_vma_pages_range()
598 nr_pages = (min(end, vma->vm_end) - count_vma_pages_range()
599 max(addr, vma->vm_start)) >> PAGE_SHIFT; count_vma_pages_range()
602 for (vma = vma->vm_next; vma; vma = vma->vm_next) { count_vma_pages_range()
605 if (vma->vm_start > end) count_vma_pages_range()
608 overlap_len = min(end, vma->vm_end) - vma->vm_start; count_vma_pages_range()
615 void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, __vma_link_rb() argument
618 /* Update tracking information for the gap following the new vma. */ __vma_link_rb()
619 if (vma->vm_next) __vma_link_rb()
620 vma_gap_update(vma->vm_next); __vma_link_rb()
622 mm->highest_vm_end = vma->vm_end; __vma_link_rb()
625 * vma->vm_prev wasn't known when we followed the rbtree to find the __vma_link_rb()
626 * correct insertion point for that vma. As a result, we could not __vma_link_rb()
627 * update the vma vm_rb parents rb_subtree_gap values on the way down. __vma_link_rb()
628 * So, we first insert the vma with a zero rb_subtree_gap value __vma_link_rb()
633 rb_link_node(&vma->vm_rb, rb_parent, rb_link); __vma_link_rb()
634 vma->rb_subtree_gap = 0; __vma_link_rb()
635 vma_gap_update(vma); __vma_link_rb()
636 vma_rb_insert(vma, &mm->mm_rb); __vma_link_rb()
639 static void __vma_link_file(struct vm_area_struct *vma) __vma_link_file() argument
643 file = vma->vm_file; __vma_link_file()
647 if (vma->vm_flags & VM_DENYWRITE) __vma_link_file()
649 if (vma->vm_flags & VM_SHARED) __vma_link_file()
653 vma_interval_tree_insert(vma, &mapping->i_mmap); __vma_link_file()
659 __vma_link(struct mm_struct *mm, struct vm_area_struct *vma, __vma_link() argument
663 __vma_link_list(mm, vma, prev, rb_parent); __vma_link()
664 __vma_link_rb(mm, vma, rb_link, rb_parent); __vma_link()
667 static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, vma_link() argument
673 if (vma->vm_file) { vma_link()
674 mapping = vma->vm_file->f_mapping; vma_link()
678 __vma_link(mm, vma, prev, rb_link, rb_parent); vma_link()
679 __vma_link_file(vma); vma_link()
689 * Helper for vma_adjust() in the split_vma insert case: insert a vma into the
692 static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) __insert_vm_struct() argument
697 if (find_vma_links(mm, vma->vm_start, vma->vm_end, __insert_vm_struct()
700 __vma_link(mm, vma, prev, rb_link, rb_parent); __insert_vm_struct()
705 __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma, __vma_unlink() argument
710 vma_rb_erase(vma, &mm->mm_rb); __vma_unlink()
711 prev->vm_next = next = vma->vm_next; __vma_unlink()
720 * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
723 * are necessary. The "insert" vma (if any) is to be inserted
726 int vma_adjust(struct vm_area_struct *vma, unsigned long start, vma_adjust() argument
729 struct mm_struct *mm = vma->vm_mm; vma_adjust()
730 struct vm_area_struct *next = vma->vm_next; vma_adjust()
735 struct file *file = vma->vm_file; vma_adjust()
745 * vma expands, overlapping all the next, and vma_adjust()
751 importer = vma; vma_adjust()
754 * vma expands, overlapping part of the next: vma_adjust()
759 importer = vma; vma_adjust()
760 } else if (end < vma->vm_end) { vma_adjust()
762 * vma shrinks, and !insert tells it's not vma_adjust()
766 adjust_next = -((vma->vm_end - end) >> PAGE_SHIFT); vma_adjust()
767 exporter = vma; vma_adjust()
773 * make sure the expanding vma has anon_vma set if the vma_adjust()
774 * shrinking vma had, to cover any anon pages imported. vma_adjust()
789 uprobe_munmap(vma, vma->vm_start, vma->vm_end); vma_adjust()
800 * space until vma start or end is updated. vma_adjust()
806 vma_adjust_trans_huge(vma, start, end, adjust_next); vma_adjust()
808 anon_vma = vma->anon_vma; vma_adjust()
815 anon_vma_interval_tree_pre_update_vma(vma); vma_adjust()
822 vma_interval_tree_remove(vma, root); vma_adjust()
827 if (start != vma->vm_start) { vma_adjust()
828 vma->vm_start = start; vma_adjust()
831 if (end != vma->vm_end) { vma_adjust()
832 vma->vm_end = end; vma_adjust()
835 vma->vm_pgoff = pgoff; vma_adjust()
844 vma_interval_tree_insert(vma, root); vma_adjust()
850 * vma_merge has merged next into vma, and needs vma_adjust()
853 __vma_unlink(mm, next, vma); vma_adjust()
858 * split_vma has split insert from vma, and needs vma_adjust()
860 * (it may either follow vma or precede it). vma_adjust()
865 vma_gap_update(vma); vma_adjust()
875 anon_vma_interval_tree_post_update_vma(vma); vma_adjust()
884 uprobe_mmap(vma); vma_adjust()
896 anon_vma_merge(vma, next); vma_adjust()
905 next = vma->vm_next; vma_adjust()
922 * If the vma has a ->close operation then the driver probably needs to release
923 * per-vma resources, so we don't attempt to merge those.
925 static inline int is_mergeable_vma(struct vm_area_struct *vma, is_mergeable_vma() argument
936 if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY) is_mergeable_vma()
938 if (vma->vm_file != file) is_mergeable_vma()
940 if (vma->vm_ops && vma->vm_ops->close) is_mergeable_vma()
947 struct vm_area_struct *vma) is_mergeable_anon_vma()
953 if ((!anon_vma1 || !anon_vma2) && (!vma || is_mergeable_anon_vma()
954 list_is_singular(&vma->anon_vma_chain))) is_mergeable_anon_vma()
961 * in front of (at a lower virtual address and file offset than) the vma.
971 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, can_vma_merge_before() argument
974 if (is_mergeable_vma(vma, file, vm_flags) && can_vma_merge_before()
975 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { can_vma_merge_before()
976 if (vma->vm_pgoff == vm_pgoff) can_vma_merge_before()
984 * beyond (at a higher virtual address and file offset than) the vma.
990 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, can_vma_merge_after() argument
993 if (is_mergeable_vma(vma, file, vm_flags) && can_vma_merge_after()
994 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { can_vma_merge_after()
996 vm_pglen = vma_pages(vma); can_vma_merge_after()
997 if (vma->vm_pgoff + vm_pglen == vm_pgoff) can_vma_merge_after()
1017 * vma, PPPPPP is the prev vma specified, and NNNNNN the next vma after:
1043 * We later require that vma->vm_flags == vm_flags, vma_merge()
1044 * so this tests vma->vm_flags & VM_SPECIAL, too. vma_merge()
1115 * we can merge the two vma's. For example, we refuse to merge a vma if
1131 * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
1147 * We also make sure that the two vma's are compatible (adjacent,
1167 * anon_vmas being allocated, preventing vma merge in subsequent
1170 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) find_mergeable_anon_vma() argument
1175 near = vma->vm_next; find_mergeable_anon_vma()
1179 anon_vma = reusable_anon_vma(near, vma, near); find_mergeable_anon_vma()
1183 near = vma->vm_prev; find_mergeable_anon_vma()
1187 anon_vma = reusable_anon_vma(near, near, vma); find_mergeable_anon_vma()
1483 int vma_wants_writenotify(struct vm_area_struct *vma) vma_wants_writenotify() argument
1485 vm_flags_t vm_flags = vma->vm_flags; vma_wants_writenotify()
1492 if (vma->vm_ops && vma->vm_ops->page_mkwrite) vma_wants_writenotify()
1497 if (pgprot_val(vma->vm_page_prot) != vma_wants_writenotify()
1498 pgprot_val(vm_pgprot_modify(vma->vm_page_prot, vm_flags))) vma_wants_writenotify()
1510 return vma->vm_file && vma->vm_file->f_mapping && vma_wants_writenotify()
1511 mapping_cap_account_dirty(vma->vm_file->f_mapping); vma_wants_writenotify()
1534 struct vm_area_struct *vma, *prev; mmap_region() local
1577 vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, mmap_region()
1579 if (vma) mmap_region()
1587 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); mmap_region()
1588 if (!vma) { mmap_region()
1593 vma->vm_mm = mm; mmap_region()
1594 vma->vm_start = addr; mmap_region()
1595 vma->vm_end = addr + len; mmap_region()
1596 vma->vm_flags = vm_flags; mmap_region()
1597 vma->vm_page_prot = vm_get_page_prot(vm_flags); mmap_region()
1598 vma->vm_pgoff = pgoff; mmap_region()
1599 INIT_LIST_HEAD(&vma->anon_vma_chain); mmap_region()
1613 /* ->mmap() can change vma->vm_file, but must guarantee that mmap_region()
1618 vma->vm_file = get_file(file); mmap_region()
1619 error = file->f_op->mmap(file, vma); mmap_region()
1630 WARN_ON_ONCE(addr != vma->vm_start); mmap_region()
1632 addr = vma->vm_start; mmap_region()
1633 vm_flags = vma->vm_flags; mmap_region()
1635 error = shmem_zero_setup(vma); mmap_region()
1640 vma_link(mm, vma, prev, rb_link, rb_parent); mmap_region()
1641 /* Once vma denies write, undo our temporary denial count */ mmap_region()
1648 file = vma->vm_file; mmap_region()
1650 perf_event_mmap(vma); mmap_region()
1654 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) || mmap_region()
1655 vma == get_gate_vma(current->mm))) mmap_region()
1658 vma->vm_flags &= ~VM_LOCKED; mmap_region()
1662 uprobe_mmap(vma); mmap_region()
1665 * New (or expanded) vma always get soft dirty status. mmap_region()
1667 * be able to distinguish situation when vma area unmapped, mmap_region()
1671 vma->vm_flags |= VM_SOFTDIRTY; mmap_region()
1673 vma_set_page_prot(vma); mmap_region()
1678 vma->vm_file = NULL; mmap_region()
1682 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); mmap_region()
1690 kmem_cache_free(vm_area_cachep, vma); mmap_region()
1702 * - gap_start = vma->vm_prev->vm_end <= info->high_limit - length; unmapped_area()
1703 * - gap_end = vma->vm_start >= info->low_limit + length; unmapped_area()
1708 struct vm_area_struct *vma; unmapped_area() local
1728 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); unmapped_area()
1729 if (vma->rb_subtree_gap < length) unmapped_area()
1734 gap_end = vma->vm_start; unmapped_area()
1735 if (gap_end >= low_limit && vma->vm_rb.rb_left) { unmapped_area()
1737 rb_entry(vma->vm_rb.rb_left, unmapped_area()
1740 vma = left; unmapped_area()
1745 gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; unmapped_area()
1754 if (vma->vm_rb.rb_right) { unmapped_area()
1756 rb_entry(vma->vm_rb.rb_right, unmapped_area()
1759 vma = right; unmapped_area()
1766 struct rb_node *prev = &vma->vm_rb; unmapped_area()
1769 vma = rb_entry(rb_parent(prev), unmapped_area()
1771 if (prev == vma->vm_rb.rb_left) { unmapped_area()
1772 gap_start = vma->vm_prev->vm_end; unmapped_area()
1773 gap_end = vma->vm_start; unmapped_area()
1802 struct vm_area_struct *vma; unmapped_area_topdown() local
1831 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); unmapped_area_topdown()
1832 if (vma->rb_subtree_gap < length) unmapped_area_topdown()
1837 gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; unmapped_area_topdown()
1838 if (gap_start <= high_limit && vma->vm_rb.rb_right) { unmapped_area_topdown()
1840 rb_entry(vma->vm_rb.rb_right, unmapped_area_topdown()
1843 vma = right; unmapped_area_topdown()
1850 gap_end = vma->vm_start; unmapped_area_topdown()
1857 if (vma->vm_rb.rb_left) { unmapped_area_topdown()
1859 rb_entry(vma->vm_rb.rb_left, unmapped_area_topdown()
1862 vma = left; unmapped_area_topdown()
1869 struct rb_node *prev = &vma->vm_rb; unmapped_area_topdown()
1872 vma = rb_entry(rb_parent(prev), unmapped_area_topdown()
1874 if (prev == vma->vm_rb.rb_right) { unmapped_area_topdown()
1875 gap_start = vma->vm_prev ? unmapped_area_topdown()
1876 vma->vm_prev->vm_end : 0; unmapped_area_topdown()
1914 struct vm_area_struct *vma; arch_get_unmapped_area() local
1925 vma = find_vma(mm, addr); arch_get_unmapped_area()
1927 (!vma || addr + len <= vma->vm_start)) arch_get_unmapped_area()
1950 struct vm_area_struct *vma; arch_get_unmapped_area_topdown() local
1965 vma = find_vma(mm, addr); arch_get_unmapped_area_topdown()
1967 (!vma || addr + len <= vma->vm_start)) arch_get_unmapped_area_topdown()
2034 struct vm_area_struct *vma; find_vma() local
2037 vma = vmacache_find(mm, addr); find_vma()
2038 if (likely(vma)) find_vma()
2039 return vma; find_vma()
2042 vma = NULL; find_vma()
2050 vma = tmp; find_vma()
2058 if (vma) find_vma()
2059 vmacache_update(addr, vma); find_vma()
2060 return vma; find_vma()
2072 struct vm_area_struct *vma; find_vma_prev() local
2074 vma = find_vma(mm, addr); find_vma_prev()
2075 if (vma) { find_vma_prev()
2076 *pprev = vma->vm_prev; find_vma_prev()
2085 return vma; find_vma_prev()
2093 static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow) acct_stack_growth() argument
2095 struct mm_struct *mm = vma->vm_mm; acct_stack_growth()
2105 if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN))) acct_stack_growth()
2111 if (vma->vm_flags & VM_LOCKED) { acct_stack_growth()
2122 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : acct_stack_growth()
2123 vma->vm_end - size; acct_stack_growth()
2124 if (is_hugepage_only_range(vma->vm_mm, new_start, size)) acct_stack_growth()
2135 if (vma->vm_flags & VM_LOCKED) acct_stack_growth()
2137 vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow); acct_stack_growth()
2144 * vma is the last one with address > vma->vm_end. Have to extend vma.
2146 int expand_upwards(struct vm_area_struct *vma, unsigned long address) expand_upwards() argument
2150 if (!(vma->vm_flags & VM_GROWSUP)) expand_upwards()
2160 if (unlikely(anon_vma_prepare(vma))) expand_upwards()
2164 * vma->vm_start/vm_end cannot change under us because the caller expand_upwards()
2168 anon_vma_lock_write(vma->anon_vma); expand_upwards()
2171 if (address > vma->vm_end) { expand_upwards()
2174 size = address - vma->vm_start; expand_upwards()
2175 grow = (address - vma->vm_end) >> PAGE_SHIFT; expand_upwards()
2178 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { expand_upwards()
2179 error = acct_stack_growth(vma, size, grow); expand_upwards()
2185 * concurrent vma expansions. expand_upwards()
2188 * in a mm share the same root anon vma. expand_upwards()
2190 * against concurrent vma expansions. expand_upwards()
2192 spin_lock(&vma->vm_mm->page_table_lock); expand_upwards()
2193 anon_vma_interval_tree_pre_update_vma(vma); expand_upwards()
2194 vma->vm_end = address; expand_upwards()
2195 anon_vma_interval_tree_post_update_vma(vma); expand_upwards()
2196 if (vma->vm_next) expand_upwards()
2197 vma_gap_update(vma->vm_next); expand_upwards()
2199 vma->vm_mm->highest_vm_end = address; expand_upwards()
2200 spin_unlock(&vma->vm_mm->page_table_lock); expand_upwards()
2202 perf_event_mmap(vma); expand_upwards()
2206 anon_vma_unlock_write(vma->anon_vma); expand_upwards()
2207 khugepaged_enter_vma_merge(vma, vma->vm_flags); expand_upwards()
2208 validate_mm(vma->vm_mm); expand_upwards()
2214 * vma is the first one with address < vma->vm_start. Have to extend vma.
2216 int expand_downwards(struct vm_area_struct *vma, expand_downwards() argument
2227 if (unlikely(anon_vma_prepare(vma))) expand_downwards()
2231 * vma->vm_start/vm_end cannot change under us because the caller expand_downwards()
2235 anon_vma_lock_write(vma->anon_vma); expand_downwards()
2238 if (address < vma->vm_start) { expand_downwards()
2241 size = vma->vm_end - address; expand_downwards()
2242 grow = (vma->vm_start - address) >> PAGE_SHIFT; expand_downwards()
2245 if (grow <= vma->vm_pgoff) { expand_downwards()
2246 error = acct_stack_growth(vma, size, grow); expand_downwards()
2252 * concurrent vma expansions. expand_downwards()
2255 * in a mm share the same root anon vma. expand_downwards()
2257 * against concurrent vma expansions. expand_downwards()
2259 spin_lock(&vma->vm_mm->page_table_lock); expand_downwards()
2260 anon_vma_interval_tree_pre_update_vma(vma); expand_downwards()
2261 vma->vm_start = address; expand_downwards()
2262 vma->vm_pgoff -= grow; expand_downwards()
2263 anon_vma_interval_tree_post_update_vma(vma); expand_downwards()
2264 vma_gap_update(vma); expand_downwards()
2265 spin_unlock(&vma->vm_mm->page_table_lock); expand_downwards()
2267 perf_event_mmap(vma); expand_downwards()
2271 anon_vma_unlock_write(vma->anon_vma); expand_downwards()
2272 khugepaged_enter_vma_merge(vma, vma->vm_flags); expand_downwards()
2273 validate_mm(vma->vm_mm); expand_downwards()
2289 int expand_stack(struct vm_area_struct *vma, unsigned long address) expand_stack() argument
2294 next = vma->vm_next; expand_stack()
2299 return expand_upwards(vma, address); expand_stack()
2305 struct vm_area_struct *vma, *prev; find_extend_vma() local
2308 vma = find_vma_prev(mm, addr, &prev); find_extend_vma()
2309 if (vma && (vma->vm_start <= addr)) find_extend_vma()
2310 return vma; find_extend_vma()
2318 int expand_stack(struct vm_area_struct *vma, unsigned long address) expand_stack() argument
2323 prev = vma->vm_prev; expand_stack()
2328 return expand_downwards(vma, address); expand_stack()
2334 struct vm_area_struct *vma; find_extend_vma() local
2338 vma = find_vma(mm, addr); find_extend_vma()
2339 if (!vma) find_extend_vma()
2341 if (vma->vm_start <= addr) find_extend_vma()
2342 return vma; find_extend_vma()
2343 if (!(vma->vm_flags & VM_GROWSDOWN)) find_extend_vma()
2345 start = vma->vm_start; find_extend_vma()
2346 if (expand_stack(vma, addr)) find_extend_vma()
2348 if (vma->vm_flags & VM_LOCKED) find_extend_vma()
2349 populate_vma_page_range(vma, addr, start, NULL); find_extend_vma()
2350 return vma; find_extend_vma()
2357 * Ok - we have the memory areas we should free on the vma list,
2358 * so release them, and do the vma updates.
2362 static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) remove_vma_list() argument
2369 long nrpages = vma_pages(vma); remove_vma_list()
2371 if (vma->vm_flags & VM_ACCOUNT) remove_vma_list()
2373 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); remove_vma_list()
2374 vma = remove_vma(vma); remove_vma_list()
2375 } while (vma); remove_vma_list()
2386 struct vm_area_struct *vma, struct vm_area_struct *prev, unmap_region()
2395 unmap_vmas(&tlb, vma, start, end); unmap_region()
2396 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, unmap_region()
2402 * Create a list of vma's touched by the unmap, removing them from the mm's
2403 * vma list as we go..
2406 detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, detach_vmas_to_be_unmapped() argument
2413 vma->vm_prev = NULL; detach_vmas_to_be_unmapped()
2415 vma_rb_erase(vma, &mm->mm_rb); detach_vmas_to_be_unmapped()
2417 tail_vma = vma; detach_vmas_to_be_unmapped()
2418 vma = vma->vm_next; detach_vmas_to_be_unmapped()
2419 } while (vma && vma->vm_start < end); detach_vmas_to_be_unmapped()
2420 *insertion_point = vma; detach_vmas_to_be_unmapped()
2421 if (vma) { detach_vmas_to_be_unmapped()
2422 vma->vm_prev = prev; detach_vmas_to_be_unmapped()
2423 vma_gap_update(vma); detach_vmas_to_be_unmapped()
2436 static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, __split_vma() argument
2442 if (is_vm_hugetlb_page(vma) && (addr & __split_vma()
2443 ~(huge_page_mask(hstate_vma(vma))))) __split_vma()
2451 *new = *vma; __split_vma()
2459 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); __split_vma()
2462 err = vma_dup_policy(vma, new); __split_vma()
2466 err = anon_vma_clone(new, vma); __split_vma()
2477 err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff + __split_vma()
2480 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); __split_vma()
2501 * Split a vma into two pieces at address 'addr', a new vma is allocated
2504 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, split_vma() argument
2510 return __split_vma(mm, vma, addr, new_below); split_vma()
2521 struct vm_area_struct *vma, *prev, *last; do_munmap() local
2531 vma = find_vma(mm, start); do_munmap()
2532 if (!vma) do_munmap()
2534 prev = vma->vm_prev; do_munmap()
2535 /* we have start < vma->vm_end */ do_munmap()
2539 if (vma->vm_start >= end) do_munmap()
2543 * If we need to split any vma, do it now to save pain later. do_munmap()
2547 * places tmp vma above, and higher split_vma places tmp vma below. do_munmap()
2549 if (start > vma->vm_start) { do_munmap()
2557 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count) do_munmap()
2560 error = __split_vma(mm, vma, start, 0); do_munmap()
2563 prev = vma; do_munmap()
2573 vma = prev ? prev->vm_next : mm->mmap; do_munmap()
2579 struct vm_area_struct *tmp = vma; do_munmap()
2590 * Remove the vma's, and unmap the actual pages do_munmap()
2592 detach_vmas_to_be_unmapped(mm, vma, prev, end); do_munmap()
2593 unmap_region(mm, vma, prev, start, end); do_munmap()
2595 arch_unmap(mm, vma, start, end); do_munmap()
2598 remove_vma_list(mm, vma); do_munmap()
2630 struct vm_area_struct *vma; SYSCALL_DEFINE5() local
2652 vma = find_vma(mm, start); SYSCALL_DEFINE5()
2654 if (!vma || !(vma->vm_flags & VM_SHARED)) SYSCALL_DEFINE5()
2657 if (start < vma->vm_start) SYSCALL_DEFINE5()
2660 if (start + size > vma->vm_end) { SYSCALL_DEFINE5()
2663 for (next = vma->vm_next; next; next = next->vm_next) { SYSCALL_DEFINE5()
2668 if (next->vm_file != vma->vm_file) SYSCALL_DEFINE5()
2671 if (next->vm_flags != vma->vm_flags) SYSCALL_DEFINE5()
2682 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; SYSCALL_DEFINE5()
2683 prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0; SYSCALL_DEFINE5()
2684 prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0; SYSCALL_DEFINE5()
2688 if (vma->vm_flags & VM_LOCKED) { SYSCALL_DEFINE5()
2693 for (tmp = vma; tmp->vm_start >= start + size; SYSCALL_DEFINE5()
2701 file = get_file(vma->vm_file); SYSCALL_DEFINE5()
2702 ret = do_mmap_pgoff(vma->vm_file, start, size, SYSCALL_DEFINE5()
2732 struct vm_area_struct *vma, *prev; do_brk() local
2778 vma = vma_merge(mm, prev, addr, addr + len, flags, do_brk()
2780 if (vma) do_brk()
2784 * create a vma struct for an anonymous mapping do_brk()
2786 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); do_brk()
2787 if (!vma) { do_brk()
2792 INIT_LIST_HEAD(&vma->anon_vma_chain); do_brk()
2793 vma->vm_mm = mm; do_brk()
2794 vma->vm_start = addr; do_brk()
2795 vma->vm_end = addr + len; do_brk()
2796 vma->vm_pgoff = pgoff; do_brk()
2797 vma->vm_flags = flags; do_brk()
2798 vma->vm_page_prot = vm_get_page_prot(flags); do_brk()
2799 vma_link(mm, vma, prev, rb_link, rb_parent); do_brk()
2801 perf_event_mmap(vma); do_brk()
2805 vma->vm_flags |= VM_SOFTDIRTY; do_brk()
2829 struct vm_area_struct *vma; exit_mmap() local
2836 vma = mm->mmap; exit_mmap()
2837 while (vma) { exit_mmap()
2838 if (vma->vm_flags & VM_LOCKED) exit_mmap()
2839 munlock_vma_pages_all(vma); exit_mmap()
2840 vma = vma->vm_next; exit_mmap()
2846 vma = mm->mmap; exit_mmap()
2847 if (!vma) /* Can happen if dup_mmap() received an OOM */ exit_mmap()
2855 unmap_vmas(&tlb, vma, 0, -1); exit_mmap()
2857 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING); exit_mmap()
2864 while (vma) { exit_mmap()
2865 if (vma->vm_flags & VM_ACCOUNT) exit_mmap()
2866 nr_accounted += vma_pages(vma); exit_mmap()
2867 vma = remove_vma(vma); exit_mmap()
2876 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) insert_vm_struct() argument
2882 * The vm_pgoff of a purely anonymous vma should be irrelevant insert_vm_struct()
2889 * vma, merges and splits can happen in a seamless way, just insert_vm_struct()
2893 if (!vma->vm_file) { insert_vm_struct()
2894 BUG_ON(vma->anon_vma); insert_vm_struct()
2895 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; insert_vm_struct()
2897 if (find_vma_links(mm, vma->vm_start, vma->vm_end, insert_vm_struct()
2900 if ((vma->vm_flags & VM_ACCOUNT) && insert_vm_struct()
2901 security_vm_enough_memory_mm(mm, vma_pages(vma))) insert_vm_struct()
2904 vma_link(mm, vma, prev, rb_link, rb_parent); insert_vm_struct()
2909 * Copy the vma structure to a new location in the same mm,
2916 struct vm_area_struct *vma = *vmap; copy_vma() local
2917 unsigned long vma_start = vma->vm_start; copy_vma()
2918 struct mm_struct *mm = vma->vm_mm; copy_vma()
2924 * If anonymous vma has not yet been faulted, update new pgoff copy_vma()
2927 if (unlikely(!vma->vm_file && !vma->anon_vma)) { copy_vma()
2934 new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, copy_vma()
2935 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma)); copy_vma()
2938 * Source vma may have been merged into new_vma copy_vma()
2944 * self during an mremap is if the vma hasn't copy_vma()
2946 * reset the dst vma->vm_pgoff to the copy_vma()
2955 *vmap = vma = new_vma; copy_vma()
2957 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); copy_vma()
2961 *new_vma = *vma; copy_vma()
2965 if (vma_dup_policy(vma, new_vma)) copy_vma()
2968 if (anon_vma_clone(new_vma, vma)) copy_vma()
3003 static int special_mapping_fault(struct vm_area_struct *vma,
3007 * Having a close hook prevents vma merging regardless of flags.
3009 static void special_mapping_close(struct vm_area_struct *vma) special_mapping_close() argument
3013 static const char *special_mapping_name(struct vm_area_struct *vma) special_mapping_name() argument
3015 return ((struct vm_special_mapping *)vma->vm_private_data)->name; special_mapping_name()
3029 static int special_mapping_fault(struct vm_area_struct *vma, special_mapping_fault() argument
3041 pgoff = vmf->pgoff - vma->vm_pgoff; special_mapping_fault()
3043 if (vma->vm_ops == &legacy_special_mapping_vmops) special_mapping_fault()
3044 pages = vma->vm_private_data; special_mapping_fault()
3046 pages = ((struct vm_special_mapping *)vma->vm_private_data)-> special_mapping_fault()
3069 struct vm_area_struct *vma; __install_special_mapping() local
3071 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); __install_special_mapping()
3072 if (unlikely(vma == NULL)) __install_special_mapping()
3075 INIT_LIST_HEAD(&vma->anon_vma_chain); __install_special_mapping()
3076 vma->vm_mm = mm; __install_special_mapping()
3077 vma->vm_start = addr; __install_special_mapping()
3078 vma->vm_end = addr + len; __install_special_mapping()
3080 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY; __install_special_mapping()
3081 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); __install_special_mapping()
3083 vma->vm_ops = ops; __install_special_mapping()
3084 vma->vm_private_data = priv; __install_special_mapping()
3086 ret = insert_vm_struct(mm, vma); __install_special_mapping()
3092 perf_event_mmap(vma); __install_special_mapping()
3094 return vma; __install_special_mapping()
3097 kmem_cache_free(vm_area_cachep, vma); __install_special_mapping()
3103 * Insert a new vma covering the given region, with the given flags.
3123 struct vm_area_struct *vma = __install_special_mapping( install_special_mapping() local
3127 return PTR_ERR_OR_ZERO(vma); install_special_mapping()
3142 * anon_vma->root->rwsem. If some other vma in this mm shares vm_lock_anon_vma()
3174 * This operation locks against the VM for all pte/vma/mm related
3184 * altering the vma layout. It's also needed in write mode to avoid new
3192 * vma in this mm is backed by the same anon_vma or address_space.
3206 struct vm_area_struct *vma; mm_take_all_locks() local
3213 for (vma = mm->mmap; vma; vma = vma->vm_next) { mm_take_all_locks()
3216 if (vma->vm_file && vma->vm_file->f_mapping) mm_take_all_locks()
3217 vm_lock_mapping(mm, vma->vm_file->f_mapping); mm_take_all_locks()
3220 for (vma = mm->mmap; vma; vma = vma->vm_next) { mm_take_all_locks()
3223 if (vma->anon_vma) mm_take_all_locks()
3224 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) mm_take_all_locks()
3243 * the vma so the users using the anon_vma->rb_root will vm_unlock_anon_vma()
3277 struct vm_area_struct *vma; mm_drop_all_locks() local
3283 for (vma = mm->mmap; vma; vma = vma->vm_next) { mm_drop_all_locks()
3284 if (vma->anon_vma) mm_drop_all_locks()
3285 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) mm_drop_all_locks()
3287 if (vma->vm_file && vma->vm_file->f_mapping) mm_drop_all_locks()
3288 vm_unlock_mapping(vma->vm_file->f_mapping); mm_drop_all_locks()
945 is_mergeable_anon_vma(struct anon_vma *anon_vma1, struct anon_vma *anon_vma2, struct vm_area_struct *vma) is_mergeable_anon_vma() argument
2385 unmap_region(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, unsigned long start, unsigned long end) unmap_region() argument
H A Dpgtable-generic.c47 int ptep_set_access_flags(struct vm_area_struct *vma, ptep_set_access_flags() argument
53 set_pte_at(vma->vm_mm, address, ptep, entry); ptep_set_access_flags()
54 flush_tlb_fix_spurious_fault(vma, address); ptep_set_access_flags()
61 int pmdp_set_access_flags(struct vm_area_struct *vma, pmdp_set_access_flags() argument
69 set_pmd_at(vma->vm_mm, address, pmdp, entry); pmdp_set_access_flags()
70 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); pmdp_set_access_flags()
81 int ptep_clear_flush_young(struct vm_area_struct *vma, ptep_clear_flush_young() argument
85 young = ptep_test_and_clear_young(vma, address, ptep); ptep_clear_flush_young()
87 flush_tlb_page(vma, address); ptep_clear_flush_young()
93 int pmdp_clear_flush_young(struct vm_area_struct *vma, pmdp_clear_flush_young() argument
102 young = pmdp_test_and_clear_young(vma, address, pmdp); pmdp_clear_flush_young()
104 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); pmdp_clear_flush_young()
110 pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, ptep_clear_flush() argument
113 struct mm_struct *mm = (vma)->vm_mm; ptep_clear_flush()
117 flush_tlb_page(vma, address); ptep_clear_flush()
124 pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address, pmdp_clear_flush() argument
129 pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp); pmdp_clear_flush()
130 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); pmdp_clear_flush()
138 void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, pmdp_splitting_flush() argument
143 set_pmd_at(vma->vm_mm, address, pmdp, pmd); pmdp_splitting_flush()
145 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); pmdp_splitting_flush()
192 void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, pmdp_invalidate() argument
196 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry)); pmdp_invalidate()
197 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); pmdp_invalidate()
H A Dnommu.c137 struct vm_area_struct *vma; kobjsize() local
139 vma = find_vma(current->mm, (unsigned long)objp); kobjsize()
140 if (vma) kobjsize()
141 return vma->vm_end - vma->vm_start; kobjsize()
156 struct vm_area_struct *vma; __get_user_pages() local
169 vma = find_vma(mm, start); __get_user_pages()
170 if (!vma) __get_user_pages()
174 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) || __get_user_pages()
175 !(vm_flags & vma->vm_flags)) __get_user_pages()
184 vmas[i] = vma; __get_user_pages()
253 * @vma: memory mapping
261 int follow_pfn(struct vm_area_struct *vma, unsigned long address, follow_pfn() argument
264 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) follow_pfn()
297 struct vm_area_struct *vma; vmalloc_user() local
300 vma = find_vma(current->mm, (unsigned long)ret); vmalloc_user()
301 if (vma) vmalloc_user()
302 vma->vm_flags |= VM_USERMAP; vmalloc_user()
531 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, vm_insert_page() argument
720 * update protection on a vma
722 static void protect_vma(struct vm_area_struct *vma, unsigned long flags) protect_vma() argument
725 struct mm_struct *mm = vma->vm_mm; protect_vma()
726 long start = vma->vm_start & PAGE_MASK; protect_vma()
727 while (start < vma->vm_end) { protect_vma()
741 static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) add_vma_to_mm() argument
747 kenter(",%p", vma); add_vma_to_mm()
749 BUG_ON(!vma->vm_region); add_vma_to_mm()
752 vma->vm_mm = mm; add_vma_to_mm()
754 protect_vma(vma, vma->vm_flags); add_vma_to_mm()
757 if (vma->vm_file) { add_vma_to_mm()
758 mapping = vma->vm_file->f_mapping; add_vma_to_mm()
762 vma_interval_tree_insert(vma, &mapping->i_mmap); add_vma_to_mm()
776 if (vma->vm_start < pvma->vm_start) add_vma_to_mm()
778 else if (vma->vm_start > pvma->vm_start) { add_vma_to_mm()
781 } else if (vma->vm_end < pvma->vm_end) add_vma_to_mm()
783 else if (vma->vm_end > pvma->vm_end) { add_vma_to_mm()
786 } else if (vma < pvma) add_vma_to_mm()
788 else if (vma > pvma) { add_vma_to_mm()
795 rb_link_node(&vma->vm_rb, parent, p); add_vma_to_mm()
796 rb_insert_color(&vma->vm_rb, &mm->mm_rb); add_vma_to_mm()
803 __vma_link_list(mm, vma, prev, parent); add_vma_to_mm()
809 static void delete_vma_from_mm(struct vm_area_struct *vma) delete_vma_from_mm() argument
813 struct mm_struct *mm = vma->vm_mm; delete_vma_from_mm()
816 kenter("%p", vma); delete_vma_from_mm()
818 protect_vma(vma, 0); delete_vma_from_mm()
822 /* if the vma is cached, invalidate the entire cache */ delete_vma_from_mm()
823 if (curr->vmacache[i] == vma) { delete_vma_from_mm()
830 if (vma->vm_file) { delete_vma_from_mm()
831 mapping = vma->vm_file->f_mapping; delete_vma_from_mm()
835 vma_interval_tree_remove(vma, &mapping->i_mmap); delete_vma_from_mm()
841 rb_erase(&vma->vm_rb, &mm->mm_rb); delete_vma_from_mm()
843 if (vma->vm_prev) delete_vma_from_mm()
844 vma->vm_prev->vm_next = vma->vm_next; delete_vma_from_mm()
846 mm->mmap = vma->vm_next; delete_vma_from_mm()
848 if (vma->vm_next) delete_vma_from_mm()
849 vma->vm_next->vm_prev = vma->vm_prev; delete_vma_from_mm()
855 static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) delete_vma() argument
857 kenter("%p", vma); delete_vma()
858 if (vma->vm_ops && vma->vm_ops->close) delete_vma()
859 vma->vm_ops->close(vma); delete_vma()
860 if (vma->vm_file) delete_vma()
861 fput(vma->vm_file); delete_vma()
862 put_nommu_region(vma->vm_region); delete_vma()
863 kmem_cache_free(vm_area_cachep, vma); delete_vma()
872 struct vm_area_struct *vma; find_vma() local
875 vma = vmacache_find(mm, addr); find_vma()
876 if (likely(vma)) find_vma()
877 return vma; find_vma()
881 for (vma = mm->mmap; vma; vma = vma->vm_next) { find_vma()
882 if (vma->vm_start > addr) find_vma()
884 if (vma->vm_end > addr) { find_vma()
885 vmacache_update(addr, vma); find_vma()
886 return vma; find_vma()
907 int expand_stack(struct vm_area_struct *vma, unsigned long address) expand_stack() argument
920 struct vm_area_struct *vma; find_vma_exact() local
924 vma = vmacache_find_exact(mm, addr, end); find_vma_exact()
925 if (vma) find_vma_exact()
926 return vma; find_vma_exact()
930 for (vma = mm->mmap; vma; vma = vma->vm_next) { find_vma_exact()
931 if (vma->vm_start < addr) find_vma_exact()
933 if (vma->vm_start > addr) find_vma_exact()
935 if (vma->vm_end == end) { find_vma_exact()
936 vmacache_update(addr, vma); find_vma_exact()
937 return vma; find_vma_exact()
1152 static int do_mmap_shared_file(struct vm_area_struct *vma) do_mmap_shared_file() argument
1156 ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); do_mmap_shared_file()
1158 vma->vm_region->vm_top = vma->vm_region->vm_end; do_mmap_shared_file()
1173 static int do_mmap_private(struct vm_area_struct *vma, do_mmap_private() argument
1187 ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); do_mmap_private()
1190 BUG_ON(!(vma->vm_flags & VM_MAYSHARE)); do_mmap_private()
1191 vma->vm_region->vm_top = vma->vm_region->vm_end; do_mmap_private()
1225 region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY; do_mmap_private()
1230 vma->vm_start = region->vm_start; do_mmap_private()
1231 vma->vm_end = region->vm_start + len; do_mmap_private()
1233 if (vma->vm_file) { do_mmap_private()
1238 fpos = vma->vm_pgoff; do_mmap_private()
1243 ret = __vfs_read(vma->vm_file, base, len, &fpos); do_mmap_private()
1259 region->vm_start = vma->vm_start = 0; do_mmap_private()
1260 region->vm_end = vma->vm_end = 0; do_mmap_private()
1282 struct vm_area_struct *vma; do_mmap_pgoff() local
1314 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); do_mmap_pgoff()
1315 if (!vma) do_mmap_pgoff()
1322 INIT_LIST_HEAD(&vma->anon_vma_chain); do_mmap_pgoff()
1323 vma->vm_flags = vm_flags; do_mmap_pgoff()
1324 vma->vm_pgoff = pgoff; do_mmap_pgoff()
1328 vma->vm_file = get_file(file); do_mmap_pgoff()
1380 vma->vm_region = pregion; do_mmap_pgoff()
1383 vma->vm_start = start; do_mmap_pgoff()
1384 vma->vm_end = start + len; do_mmap_pgoff()
1388 vma->vm_flags |= VM_MAPPED_COPY; do_mmap_pgoff()
1391 ret = do_mmap_shared_file(vma); do_mmap_pgoff()
1393 vma->vm_region = NULL; do_mmap_pgoff()
1394 vma->vm_start = 0; do_mmap_pgoff()
1395 vma->vm_end = 0; do_mmap_pgoff()
1429 vma->vm_start = region->vm_start = addr; do_mmap_pgoff()
1430 vma->vm_end = region->vm_end = addr + len; do_mmap_pgoff()
1435 vma->vm_region = region; do_mmap_pgoff()
1440 if (file && vma->vm_flags & VM_SHARED) do_mmap_pgoff()
1441 ret = do_mmap_shared_file(vma); do_mmap_pgoff()
1443 ret = do_mmap_private(vma, region, len, capabilities); do_mmap_pgoff()
1449 if (!vma->vm_file && !(flags & MAP_UNINITIALIZED)) do_mmap_pgoff()
1454 result = vma->vm_start; do_mmap_pgoff()
1459 add_vma_to_mm(current->mm, vma); do_mmap_pgoff()
1463 if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) { do_mmap_pgoff()
1479 if (vma->vm_file) do_mmap_pgoff()
1480 fput(vma->vm_file); do_mmap_pgoff()
1481 kmem_cache_free(vm_area_cachep, vma); do_mmap_pgoff()
1493 printk(KERN_WARNING "Allocation of vma for %lu byte allocation" do_mmap_pgoff()
1556 * split a vma into two pieces at address 'addr', a new vma is allocated either
1559 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, split_vma() argument
1570 if (vma->vm_file) split_vma()
1587 *new = *vma; split_vma()
1588 *region = *vma->vm_region; split_vma()
1591 npages = (addr - vma->vm_start) >> PAGE_SHIFT; split_vma()
1603 delete_vma_from_mm(vma); split_vma()
1605 delete_nommu_region(vma->vm_region); split_vma()
1607 vma->vm_region->vm_start = vma->vm_start = addr; split_vma()
1608 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages; split_vma()
1610 vma->vm_region->vm_end = vma->vm_end = addr; split_vma()
1611 vma->vm_region->vm_top = addr; split_vma()
1613 add_nommu_region(vma->vm_region); split_vma()
1616 add_vma_to_mm(mm, vma); split_vma()
1626 struct vm_area_struct *vma, shrink_vma()
1635 delete_vma_from_mm(vma); shrink_vma()
1636 if (from > vma->vm_start) shrink_vma()
1637 vma->vm_end = from; shrink_vma()
1639 vma->vm_start = to; shrink_vma()
1640 add_vma_to_mm(mm, vma); shrink_vma()
1643 region = vma->vm_region; shrink_vma()
1668 struct vm_area_struct *vma; do_munmap() local
1681 vma = find_vma(mm, start); do_munmap()
1682 if (!vma) { do_munmap()
1696 if (vma->vm_file) { do_munmap()
1698 if (start > vma->vm_start) { do_munmap()
1702 if (end == vma->vm_end) do_munmap()
1704 vma = vma->vm_next; do_munmap()
1705 } while (vma); do_munmap()
1710 if (start == vma->vm_start && end == vma->vm_end) do_munmap()
1712 if (start < vma->vm_start || end > vma->vm_end) { do_munmap()
1720 if (end != vma->vm_end && end & ~PAGE_MASK) { do_munmap()
1724 if (start != vma->vm_start && end != vma->vm_end) { do_munmap()
1725 ret = split_vma(mm, vma, start, 1); do_munmap()
1731 return shrink_vma(mm, vma, start, end); do_munmap()
1735 delete_vma_from_mm(vma); do_munmap()
1736 delete_vma(mm, vma); do_munmap()
1764 struct vm_area_struct *vma; exit_mmap() local
1773 while ((vma = mm->mmap)) { exit_mmap()
1774 mm->mmap = vma->vm_next; exit_mmap()
1775 delete_vma_from_mm(vma); exit_mmap()
1776 delete_vma(mm, vma); exit_mmap()
1802 struct vm_area_struct *vma; do_mremap() local
1816 vma = find_vma_exact(current->mm, addr, old_len); do_mremap()
1817 if (!vma) do_mremap()
1820 if (vma->vm_end != vma->vm_start + old_len) do_mremap()
1823 if (vma->vm_flags & VM_MAYSHARE) do_mremap()
1826 if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start) do_mremap()
1830 vma->vm_end = vma->vm_start + new_len; do_mremap()
1831 return vma->vm_start; do_mremap()
1846 struct page *follow_page_mask(struct vm_area_struct *vma, follow_page_mask() argument
1854 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, remap_pfn_range() argument
1860 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; remap_pfn_range()
1865 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) vm_iomap_memory() argument
1868 unsigned long vm_len = vma->vm_end - vma->vm_start; vm_iomap_memory()
1870 pfn += vma->vm_pgoff; vm_iomap_memory()
1871 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); vm_iomap_memory()
1875 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, remap_vmalloc_range() argument
1878 unsigned int size = vma->vm_end - vma->vm_start; remap_vmalloc_range()
1880 if (!(vma->vm_flags & VM_USERMAP)) remap_vmalloc_range()
1883 vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT)); remap_vmalloc_range()
1884 vma->vm_end = vma->vm_start + size; remap_vmalloc_range()
1997 int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) filemap_fault() argument
2004 void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf) filemap_map_pages() argument
2013 struct vm_area_struct *vma; __access_remote_vm() local
2018 vma = find_vma(mm, addr); __access_remote_vm()
2019 if (vma) { __access_remote_vm()
2021 if (addr + len >= vma->vm_end) __access_remote_vm()
2022 len = vma->vm_end - addr; __access_remote_vm()
2025 if (write && vma->vm_flags & VM_MAYWRITE) __access_remote_vm()
2026 copy_to_user_page(vma, NULL, addr, __access_remote_vm()
2028 else if (!write && vma->vm_flags & VM_MAYREAD) __access_remote_vm()
2029 copy_from_user_page(vma, NULL, addr, __access_remote_vm()
2093 struct vm_area_struct *vma; nommu_shrink_inode_mappings() local
2105 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) { nommu_shrink_inode_mappings()
2108 if (vma->vm_flags & VM_SHARED) { nommu_shrink_inode_mappings()
2121 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) { nommu_shrink_inode_mappings()
2122 if (!(vma->vm_flags & VM_SHARED)) nommu_shrink_inode_mappings()
2125 region = vma->vm_region; nommu_shrink_inode_mappings()
1625 shrink_vma(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long from, unsigned long to) shrink_vma() argument
H A Dvmacache.c9 * Flush vma caches for threads that share a given mm.
12 * exclusively and other threads accessing the vma cache will
14 * is required to maintain the vma cache.
96 struct vm_area_struct *vma = current->vmacache[i]; vmacache_find() local
98 if (!vma) vmacache_find()
100 if (WARN_ON_ONCE(vma->vm_mm != mm)) vmacache_find()
102 if (vma->vm_start <= addr && vma->vm_end > addr) { vmacache_find()
104 return vma; vmacache_find()
124 struct vm_area_struct *vma = current->vmacache[i]; vmacache_find_exact() local
126 if (vma && vma->vm_start == start && vma->vm_end == end) { vmacache_find_exact()
128 return vma; vmacache_find_exact()
H A Drmap.c75 anon_vma->degree = 1; /* Reference for first vma */ anon_vma_alloc()
127 static void anon_vma_chain_link(struct vm_area_struct *vma, anon_vma_chain_link() argument
131 avc->vma = vma; anon_vma_chain_link()
133 list_add(&avc->same_vma, &vma->anon_vma_chain); anon_vma_chain_link()
139 * @vma: the memory region in question
141 * This makes sure the memory mapping described by 'vma' has
148 * reason for splitting a vma has been mprotect()), or we
151 * Anon-vma allocations are very subtle, because we may have
154 * allocated vma (it depends on RCU to make sure that the
164 int anon_vma_prepare(struct vm_area_struct *vma) anon_vma_prepare() argument
166 struct anon_vma *anon_vma = vma->anon_vma; anon_vma_prepare()
171 struct mm_struct *mm = vma->vm_mm; anon_vma_prepare()
178 anon_vma = find_mergeable_anon_vma(vma); anon_vma_prepare()
190 if (likely(!vma->anon_vma)) { anon_vma_prepare()
191 vma->anon_vma = anon_vma; anon_vma_prepare()
192 anon_vma_chain_link(vma, avc, anon_vma); anon_vma_prepare()
193 /* vma reference or self-parent link for new root */ anon_vma_prepare()
216 * we traverse the vma->anon_vma_chain, looping over anon_vma's that
217 * have the same vma.
248 * child isn't reused even if there was no alive vma, thus rmap walker has a
274 * that means it has no vma and only one anon_vma child. anon_vma_clone()
302 * Attach vma to its own anon_vma, as well as to the anon_vmas that
306 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) anon_vma_fork() argument
317 vma->anon_vma = NULL; anon_vma_fork()
323 error = anon_vma_clone(vma, pvma); anon_vma_fork()
328 if (vma->anon_vma) anon_vma_fork()
352 vma->anon_vma = anon_vma; anon_vma_fork()
354 anon_vma_chain_link(vma, avc, anon_vma); anon_vma_fork()
363 unlink_anon_vmas(vma); anon_vma_fork()
367 void unlink_anon_vmas(struct vm_area_struct *vma) unlink_anon_vmas() argument
376 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { unlink_anon_vmas()
394 if (vma->anon_vma) unlink_anon_vmas()
395 vma->anon_vma->degree--; unlink_anon_vmas()
403 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { unlink_anon_vmas()
564 * At what user virtual address is page expected in @vma?
567 __vma_address(struct page *page, struct vm_area_struct *vma) __vma_address() argument
570 return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); __vma_address()
574 vma_address(struct page *page, struct vm_area_struct *vma) vma_address() argument
576 unsigned long address = __vma_address(page, vma); vma_address()
578 /* page should be within @vma mapping range */ vma_address()
579 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); vma_address()
585 * At what user virtual address is page expected in vma?
586 * Caller should check the page is actually part of the vma.
588 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) page_address_in_vma() argument
597 if (!vma->anon_vma || !page__anon_vma || page_address_in_vma()
598 vma->anon_vma->root != page__anon_vma->root) page_address_in_vma()
601 if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping) page_address_in_vma()
605 address = __vma_address(page, vma); page_address_in_vma()
606 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) page_address_in_vma()
691 * @vma: the VMA to test
697 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) page_mapped_in_vma() argument
703 address = __vma_address(page, vma); page_mapped_in_vma()
704 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) page_mapped_in_vma()
706 pte = page_check_address(page, vma->vm_mm, address, &ptl, 1); page_mapped_in_vma()
723 static int page_referenced_one(struct page *page, struct vm_area_struct *vma, page_referenced_one() argument
726 struct mm_struct *mm = vma->vm_mm; page_referenced_one()
743 if (vma->vm_flags & VM_LOCKED) { page_referenced_one()
750 if (pmdp_clear_flush_young_notify(vma, address, pmd)) page_referenced_one()
764 if (vma->vm_flags & VM_LOCKED) { page_referenced_one()
770 if (ptep_clear_flush_young_notify(vma, address, pte)) { page_referenced_one()
778 if (likely(!(vma->vm_flags & VM_SEQ_READ))) page_referenced_one()
786 pra->vm_flags |= vma->vm_flags; page_referenced_one()
796 static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg) invalid_page_referenced_vma() argument
801 if (!mm_match_cgroup(vma->vm_mm, memcg)) invalid_page_referenced_vma()
812 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
865 static int page_mkclean_one(struct page *page, struct vm_area_struct *vma, page_mkclean_one() argument
868 struct mm_struct *mm = vma->vm_mm; page_mkclean_one()
881 flush_cache_page(vma, address, pte_pfn(*pte)); page_mkclean_one()
882 entry = ptep_clear_flush(vma, address, pte); page_mkclean_one()
899 static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) invalid_mkclean_vma() argument
901 if (vma->vm_flags & VM_SHARED) invalid_mkclean_vma()
935 * @vma: the vma the page belongs to
944 struct vm_area_struct *vma, unsigned long address) page_move_anon_rmap()
946 struct anon_vma *anon_vma = vma->anon_vma; page_move_anon_rmap()
949 VM_BUG_ON_VMA(!anon_vma, vma); page_move_anon_rmap()
950 VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page); page_move_anon_rmap()
959 * @vma: VM area to add page to.
964 struct vm_area_struct *vma, unsigned long address, int exclusive) __page_set_anon_rmap()
966 struct anon_vma *anon_vma = vma->anon_vma; __page_set_anon_rmap()
974 * If the page isn't exclusively mapped into this vma, __page_set_anon_rmap()
983 page->index = linear_page_index(vma, address); __page_set_anon_rmap()
989 * @vma: the vm area in which the mapping is added
993 struct vm_area_struct *vma, unsigned long address) __page_check_anon_rmap()
1008 BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root); __page_check_anon_rmap()
1009 BUG_ON(page->index != linear_page_index(vma, address)); __page_check_anon_rmap()
1016 * @vma: the vm area in which the mapping is added
1025 struct vm_area_struct *vma, unsigned long address) page_add_anon_rmap()
1027 do_page_add_anon_rmap(page, vma, address, 0); page_add_anon_rmap()
1036 struct vm_area_struct *vma, unsigned long address, int exclusive) do_page_add_anon_rmap()
1056 /* address might be in next vma when migration races vma_adjust */ do_page_add_anon_rmap()
1058 __page_set_anon_rmap(page, vma, address, exclusive); do_page_add_anon_rmap()
1060 __page_check_anon_rmap(page, vma, address); do_page_add_anon_rmap()
1066 * @vma: the vm area in which the mapping is added
1074 struct vm_area_struct *vma, unsigned long address) page_add_new_anon_rmap()
1076 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); page_add_new_anon_rmap()
1083 __page_set_anon_rmap(page, vma, address, 1); page_add_new_anon_rmap()
1181 static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, try_to_unmap_one() argument
1184 struct mm_struct *mm = vma->vm_mm; try_to_unmap_one()
1201 if (vma->vm_flags & VM_LOCKED) try_to_unmap_one()
1208 if (ptep_clear_flush_young_notify(vma, address, pte)) { try_to_unmap_one()
1215 flush_cache_page(vma, address, page_to_pfn(page)); try_to_unmap_one()
1216 pteval = ptep_clear_flush(vma, address, pte); try_to_unmap_one()
1310 if (down_read_trylock(&vma->vm_mm->mmap_sem)) { try_to_unmap_one()
1311 if (vma->vm_flags & VM_LOCKED) { try_to_unmap_one()
1315 up_read(&vma->vm_mm->mmap_sem); try_to_unmap_one()
1320 bool is_vma_temporary_stack(struct vm_area_struct *vma) is_vma_temporary_stack() argument
1322 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); is_vma_temporary_stack()
1327 if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) == is_vma_temporary_stack()
1334 static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) invalid_migration_vma() argument
1336 return is_vma_temporary_stack(vma); invalid_migration_vma()
1398 * SWAP_AGAIN - no vma is holding page mlocked, or,
1399 * SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem
1457 * Find all the mappings of a page using the mapping pointer and the vma chains
1460 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
1462 * vm_flags for that VMA. That should be OK, because that vma shouldn't be
1478 struct vm_area_struct *vma = avc->vma; rmap_walk_anon() local
1479 unsigned long address = vma_address(page, vma); rmap_walk_anon()
1481 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) rmap_walk_anon()
1484 ret = rwc->rmap_one(page, vma, address, rwc->arg); rmap_walk_anon()
1499 * Find all the mappings of a page using the mapping pointer and the vma chains
1502 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
1504 * vm_flags for that VMA. That should be OK, because that vma shouldn't be
1511 struct vm_area_struct *vma; rmap_walk_file() local
1527 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { rmap_walk_file()
1528 unsigned long address = vma_address(page, vma); rmap_walk_file()
1530 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) rmap_walk_file()
1533 ret = rwc->rmap_one(page, vma, address, rwc->arg); rmap_walk_file()
1562 struct vm_area_struct *vma, unsigned long address, int exclusive) __hugepage_set_anon_rmap()
1564 struct anon_vma *anon_vma = vma->anon_vma; __hugepage_set_anon_rmap()
1575 page->index = linear_page_index(vma, address); __hugepage_set_anon_rmap()
1579 struct vm_area_struct *vma, unsigned long address) hugepage_add_anon_rmap()
1581 struct anon_vma *anon_vma = vma->anon_vma; hugepage_add_anon_rmap()
1586 /* address might be in next vma when migration races vma_adjust */ hugepage_add_anon_rmap()
1589 __hugepage_set_anon_rmap(page, vma, address, 0); hugepage_add_anon_rmap()
1593 struct vm_area_struct *vma, unsigned long address) hugepage_add_new_anon_rmap()
1595 BUG_ON(address < vma->vm_start || address >= vma->vm_end); hugepage_add_new_anon_rmap()
1597 __hugepage_set_anon_rmap(page, vma, address, 1); hugepage_add_new_anon_rmap()
943 page_move_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) page_move_anon_rmap() argument
963 __page_set_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address, int exclusive) __page_set_anon_rmap() argument
992 __page_check_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) __page_check_anon_rmap() argument
1024 page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) page_add_anon_rmap() argument
1035 do_page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address, int exclusive) do_page_add_anon_rmap() argument
1073 page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) page_add_new_anon_rmap() argument
1561 __hugepage_set_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address, int exclusive) __hugepage_set_anon_rmap() argument
1578 hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) hugepage_add_anon_rmap() argument
1592 hugepage_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) hugepage_add_new_anon_rmap() argument
H A Dmemory.c528 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, free_pgtables() argument
531 while (vma) { free_pgtables()
532 struct vm_area_struct *next = vma->vm_next; free_pgtables()
533 unsigned long addr = vma->vm_start; free_pgtables()
536 * Hide vma from rmap and truncate_pagecache before freeing free_pgtables()
539 unlink_anon_vmas(vma); free_pgtables()
540 unlink_file_vma(vma); free_pgtables()
542 if (is_vm_hugetlb_page(vma)) { free_pgtables()
543 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, free_pgtables()
549 while (next && next->vm_start <= vma->vm_end + PMD_SIZE free_pgtables()
551 vma = next; free_pgtables()
552 next = vma->vm_next; free_pgtables()
553 unlink_anon_vmas(vma); free_pgtables()
554 unlink_file_vma(vma); free_pgtables()
556 free_pgd_range(tlb, addr, vma->vm_end, free_pgtables()
559 vma = next; free_pgtables()
563 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, __pte_alloc() argument
599 wait_split_huge_page(vma->anon_vma, pmd); __pte_alloc()
646 static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, print_bad_pte() argument
649 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); print_bad_pte()
678 mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; print_bad_pte()
679 index = linear_page_index(vma, addr); print_bad_pte()
689 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); print_bad_pte()
694 vma->vm_file, print_bad_pte()
695 vma->vm_ops ? vma->vm_ops->fault : NULL, print_bad_pte()
696 vma->vm_file ? vma->vm_file->f_op->mmap : NULL, print_bad_pte()
719 * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
723 * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
729 * as the vma is not a COW mapping; in that case, we know that all ptes are
749 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, vm_normal_page() argument
757 if (vma->vm_ops && vma->vm_ops->find_special_page) vm_normal_page()
758 return vma->vm_ops->find_special_page(vma, addr); vm_normal_page()
759 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) vm_normal_page()
762 print_bad_pte(vma, addr, pte, NULL); vm_normal_page()
768 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { vm_normal_page()
769 if (vma->vm_flags & VM_MIXEDMAP) { vm_normal_page()
775 off = (addr - vma->vm_start) >> PAGE_SHIFT; vm_normal_page()
776 if (pfn == vma->vm_pgoff + off) vm_normal_page()
778 if (!is_cow_mapping(vma->vm_flags)) vm_normal_page()
787 print_bad_pte(vma, addr, pte, NULL); vm_normal_page()
802 * covered by this vma.
807 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma, copy_one_pte()
810 unsigned long vm_flags = vma->vm_flags; copy_one_pte()
872 page = vm_normal_page(vma, addr, pte); copy_one_pte()
888 pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, copy_pte_range()
927 vma, addr, rss); copy_pte_range()
951 pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma, copy_pmd_range()
967 dst_pmd, src_pmd, addr, vma); copy_pmd_range()
977 vma, addr, next)) copy_pmd_range()
984 pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma, copy_pud_range()
999 vma, addr, next)) copy_pud_range()
1006 struct vm_area_struct *vma) copy_page_range()
1010 unsigned long addr = vma->vm_start; copy_page_range()
1011 unsigned long end = vma->vm_end; copy_page_range()
1023 if (!(vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) && copy_page_range()
1024 !vma->anon_vma) copy_page_range()
1027 if (is_vm_hugetlb_page(vma)) copy_page_range()
1028 return copy_hugetlb_page_range(dst_mm, src_mm, vma); copy_page_range()
1030 if (unlikely(vma->vm_flags & VM_PFNMAP)) { copy_page_range()
1035 ret = track_pfn_copy(vma); copy_page_range()
1046 is_cow = is_cow_mapping(vma->vm_flags); copy_page_range()
1061 vma, addr, next))) { copy_page_range()
1073 struct vm_area_struct *vma, pmd_t *pmd, zap_pte_range()
1099 page = vm_normal_page(vma, addr, ptent); zap_pte_range()
1123 likely(!(vma->vm_flags & VM_SEQ_READ))) zap_pte_range()
1129 print_bad_pte(vma, addr, ptent, page); zap_pte_range()
1155 print_bad_pte(vma, addr, ptent, NULL); zap_pte_range()
1185 struct vm_area_struct *vma, pud_t *pud, zap_pmd_range()
1199 pr_err("%s: mmap_sem is unlocked! addr=0x%lx end=0x%lx vma->vm_start=0x%lx vma->vm_end=0x%lx\n", zap_pmd_range()
1201 vma->vm_start, zap_pmd_range()
1202 vma->vm_end); zap_pmd_range()
1206 split_huge_page_pmd(vma, addr, pmd); zap_pmd_range()
1207 } else if (zap_huge_pmd(tlb, vma, pmd, addr)) zap_pmd_range()
1220 next = zap_pte_range(tlb, vma, pmd, addr, next, details); zap_pmd_range()
1229 struct vm_area_struct *vma, pgd_t *pgd, zap_pud_range()
1241 next = zap_pmd_range(tlb, vma, pud, addr, next, details); zap_pud_range()
1248 struct vm_area_struct *vma, unmap_page_range()
1259 tlb_start_vma(tlb, vma); unmap_page_range()
1260 pgd = pgd_offset(vma->vm_mm, addr); unmap_page_range()
1265 next = zap_pud_range(tlb, vma, pgd, addr, next, details); unmap_page_range()
1267 tlb_end_vma(tlb, vma); unmap_page_range()
1272 struct vm_area_struct *vma, unsigned long start_addr, unmap_single_vma()
1276 unsigned long start = max(vma->vm_start, start_addr); unmap_single_vma()
1279 if (start >= vma->vm_end) unmap_single_vma()
1281 end = min(vma->vm_end, end_addr); unmap_single_vma()
1282 if (end <= vma->vm_start) unmap_single_vma()
1285 if (vma->vm_file) unmap_single_vma()
1286 uprobe_munmap(vma, start, end); unmap_single_vma()
1288 if (unlikely(vma->vm_flags & VM_PFNMAP)) unmap_single_vma()
1289 untrack_pfn(vma, 0, 0); unmap_single_vma()
1292 if (unlikely(is_vm_hugetlb_page(vma))) { unmap_single_vma()
1294 * It is undesirable to test vma->vm_file as it unmap_single_vma()
1299 * mmap_region() nullifies vma->vm_file unmap_single_vma()
1304 if (vma->vm_file) { unmap_single_vma()
1305 i_mmap_lock_write(vma->vm_file->f_mapping); unmap_single_vma()
1306 __unmap_hugepage_range_final(tlb, vma, start, end, NULL); unmap_single_vma()
1307 i_mmap_unlock_write(vma->vm_file->f_mapping); unmap_single_vma()
1310 unmap_page_range(tlb, vma, start, end, details); unmap_single_vma()
1315 * unmap_vmas - unmap a range of memory covered by a list of vma's
1317 * @vma: the starting vma
1321 * Unmap all pages in the vma list.
1333 struct vm_area_struct *vma, unsigned long start_addr, unmap_vmas()
1336 struct mm_struct *mm = vma->vm_mm; unmap_vmas()
1339 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) unmap_vmas()
1340 unmap_single_vma(tlb, vma, start_addr, end_addr, NULL); unmap_vmas()
1346 * @vma: vm_area_struct holding the applicable pages
1353 void zap_page_range(struct vm_area_struct *vma, unsigned long start, zap_page_range() argument
1356 struct mm_struct *mm = vma->vm_mm; zap_page_range()
1364 for ( ; vma && vma->vm_start < end; vma = vma->vm_next) zap_page_range()
1365 unmap_single_vma(&tlb, vma, start, end, details); zap_page_range()
1372 * @vma: vm_area_struct holding the applicable pages
1379 static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, zap_page_range_single() argument
1382 struct mm_struct *mm = vma->vm_mm; zap_page_range_single()
1390 unmap_single_vma(&tlb, vma, address, end, details); zap_page_range_single()
1396 * zap_vma_ptes - remove ptes mapping the vma
1397 * @vma: vm_area_struct holding ptes to be zapped
1403 * The entire address range must be fully contained within the vma.
1407 int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, zap_vma_ptes() argument
1410 if (address < vma->vm_start || address + size > vma->vm_end || zap_vma_ptes()
1411 !(vma->vm_flags & VM_PFNMAP)) zap_vma_ptes()
1413 zap_page_range_single(vma, address, size, NULL); zap_vma_ptes()
1440 static int insert_page(struct vm_area_struct *vma, unsigned long addr, insert_page() argument
1443 struct mm_struct *mm = vma->vm_mm; insert_page()
1476 * vm_insert_page - insert single page into user vma
1477 * @vma: user vma to map to
1482 * into a user vma.
1491 * that. Your vma protection will have to be set up correctly, which
1498 * under mm->mmap_sem write-lock, so it can change vma->vm_flags.
1499 * Caller must set VM_MIXEDMAP on vma if it wants to call this
1502 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, vm_insert_page() argument
1505 if (addr < vma->vm_start || addr >= vma->vm_end) vm_insert_page()
1509 if (!(vma->vm_flags & VM_MIXEDMAP)) { vm_insert_page()
1510 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem)); vm_insert_page()
1511 BUG_ON(vma->vm_flags & VM_PFNMAP); vm_insert_page()
1512 vma->vm_flags |= VM_MIXEDMAP; vm_insert_page()
1514 return insert_page(vma, addr, page, vma->vm_page_prot); vm_insert_page()
1518 static int insert_pfn(struct vm_area_struct *vma, unsigned long addr, insert_pfn() argument
1521 struct mm_struct *mm = vma->vm_mm; insert_pfn()
1537 update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */ insert_pfn()
1547 * vm_insert_pfn - insert single pfn into user vma
1548 * @vma: user vma to map to
1553 * they've allocated into a user vma. Same comments apply.
1558 * vma cannot be a COW mapping.
1563 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, vm_insert_pfn() argument
1567 pgprot_t pgprot = vma->vm_page_prot; vm_insert_pfn()
1574 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); vm_insert_pfn()
1575 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == vm_insert_pfn()
1577 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); vm_insert_pfn()
1578 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); vm_insert_pfn()
1580 if (addr < vma->vm_start || addr >= vma->vm_end) vm_insert_pfn()
1582 if (track_pfn_insert(vma, &pgprot, pfn)) vm_insert_pfn()
1585 ret = insert_pfn(vma, addr, pfn, pgprot); vm_insert_pfn()
1591 int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, vm_insert_mixed() argument
1594 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP)); vm_insert_mixed()
1596 if (addr < vma->vm_start || addr >= vma->vm_end) vm_insert_mixed()
1610 return insert_page(vma, addr, page, vma->vm_page_prot); vm_insert_mixed()
1612 return insert_pfn(vma, addr, pfn, vma->vm_page_prot); vm_insert_mixed()
1685 * @vma: user vma to map to
1693 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, remap_pfn_range() argument
1699 struct mm_struct *mm = vma->vm_mm; remap_pfn_range()
1711 * Disable vma merging and expanding with mremap(). remap_pfn_range()
1713 * Omit vma from core dump, even when VM_IO turned off. remap_pfn_range()
1717 * un-COW'ed pages by matching them up with "vma->vm_pgoff". remap_pfn_range()
1720 if (is_cow_mapping(vma->vm_flags)) { remap_pfn_range()
1721 if (addr != vma->vm_start || end != vma->vm_end) remap_pfn_range()
1723 vma->vm_pgoff = pfn; remap_pfn_range()
1726 err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size)); remap_pfn_range()
1730 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; remap_pfn_range()
1735 flush_cache_range(vma, addr, end); remap_pfn_range()
1745 untrack_pfn(vma, pfn, PAGE_ALIGN(size)); remap_pfn_range()
1753 * @vma: user vma to map to
1759 * we'll figure out the rest from the vma information.
1761 * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
1764 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) vm_iomap_memory() argument
1783 if (vma->vm_pgoff > pages) vm_iomap_memory()
1785 pfn += vma->vm_pgoff; vm_iomap_memory()
1786 pages -= vma->vm_pgoff; vm_iomap_memory()
1789 vm_len = vma->vm_end - vma->vm_start; vm_iomap_memory()
1794 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); vm_iomap_memory()
1923 static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) cow_user_page() argument
1948 copy_user_highpage(dst, src, va, vma); cow_user_page()
1957 static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page, do_page_mkwrite() argument
1969 ret = vma->vm_ops->page_mkwrite(vma, &vmf); do_page_mkwrite()
1985 * Handle write page faults for pages that can be reused in the current vma
1993 struct vm_area_struct *vma, unsigned long address,
2008 flush_cache_page(vma, address, pte_pfn(orig_pte)); __releases()
2010 entry = maybe_mkwrite(pte_mkdirty(entry), vma); __releases()
2011 if (ptep_set_access_flags(vma, address, page_table, entry, 1)) __releases()
2012 update_mmu_cache(vma, address, page_table); __releases()
2037 file_update_time(vma->vm_file); __releases()
2059 static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma, wp_page_copy() argument
2071 if (unlikely(anon_vma_prepare(vma))) wp_page_copy()
2075 new_page = alloc_zeroed_user_highpage_movable(vma, address); wp_page_copy()
2079 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); wp_page_copy()
2082 cow_user_page(new_page, old_page, address, vma); wp_page_copy()
2104 flush_cache_page(vma, address, pte_pfn(orig_pte)); wp_page_copy()
2105 entry = mk_pte(new_page, vma->vm_page_prot); wp_page_copy()
2106 entry = maybe_mkwrite(pte_mkdirty(entry), vma); wp_page_copy()
2113 ptep_clear_flush_notify(vma, address, page_table); wp_page_copy()
2114 page_add_new_anon_rmap(new_page, vma, address); wp_page_copy()
2116 lru_cache_add_active_or_unevictable(new_page, vma); wp_page_copy()
2123 update_mmu_cache(vma, address, page_table); wp_page_copy()
2164 * Don't let another task, with possibly unlocked vma, wp_page_copy()
2167 if (page_copied && (vma->vm_flags & VM_LOCKED)) { wp_page_copy()
2188 struct vm_area_struct *vma, unsigned long address, wp_pfn_shared()
2192 if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) { wp_pfn_shared()
2195 .pgoff = linear_page_index(vma, address), wp_pfn_shared()
2202 ret = vma->vm_ops->pfn_mkwrite(vma, &vmf); wp_pfn_shared()
2215 return wp_page_reuse(mm, vma, address, page_table, ptl, orig_pte, wp_pfn_shared()
2219 static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma,
2234 if (vma->vm_ops && vma->vm_ops->page_mkwrite) { __releases()
2238 tmp = do_page_mkwrite(vma, old_page, address); __releases()
2261 return wp_page_reuse(mm, vma, address, page_table, ptl, __releases()
2279 * We enter with non-exclusive mmap_sem (to exclude vma changes,
2283 static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
2290 old_page = vm_normal_page(vma, address, orig_pte); __releases()
2299 if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == __releases()
2301 return wp_pfn_shared(mm, vma, address, page_table, ptl, __releases()
2305 return wp_page_copy(mm, vma, address, page_table, pmd, __releases()
2334 page_move_anon_rmap(old_page, vma, address); __releases()
2336 return wp_page_reuse(mm, vma, address, page_table, ptl, __releases()
2340 } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == __releases()
2342 return wp_page_shared(mm, vma, address, page_table, pmd, __releases()
2352 return wp_page_copy(mm, vma, address, page_table, pmd, __releases()
2356 static void unmap_mapping_range_vma(struct vm_area_struct *vma, unmap_mapping_range_vma() argument
2360 zap_page_range_single(vma, start_addr, end_addr - start_addr, details); unmap_mapping_range_vma()
2366 struct vm_area_struct *vma; unmap_mapping_range_tree() local
2369 vma_interval_tree_foreach(vma, root, unmap_mapping_range_tree()
2372 vba = vma->vm_pgoff; unmap_mapping_range_tree()
2373 vea = vba + vma_pages(vma) - 1; unmap_mapping_range_tree()
2382 unmap_mapping_range_vma(vma, unmap_mapping_range_tree()
2383 ((zba - vba) << PAGE_SHIFT) + vma->vm_start, unmap_mapping_range_tree()
2384 ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, unmap_mapping_range_tree()
2437 * We enter with non-exclusive mmap_sem (to exclude vma changes,
2444 static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, do_swap_page() argument
2467 print_bad_pte(vma, address, orig_pte, NULL); do_swap_page()
2476 GFP_HIGHUSER_MOVABLE, vma, address); do_swap_page()
2522 page = ksm_might_need_to_copy(page, vma, address); do_swap_page()
2558 pte = mk_pte(page, vma->vm_page_prot); do_swap_page()
2560 pte = maybe_mkwrite(pte_mkdirty(pte), vma); do_swap_page()
2565 flush_icache_page(vma, page); do_swap_page()
2570 do_page_add_anon_rmap(page, vma, address, exclusive); do_swap_page()
2573 page_add_new_anon_rmap(page, vma, address); do_swap_page()
2575 lru_cache_add_active_or_unevictable(page, vma); do_swap_page()
2579 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) do_swap_page()
2596 ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); do_swap_page()
2603 update_mmu_cache(vma, address, page_table); do_swap_page()
2625 * doesn't hit another vma.
2627 static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) check_stack_guard_page() argument
2630 if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { check_stack_guard_page()
2631 struct vm_area_struct *prev = vma->vm_prev; check_stack_guard_page()
2642 return expand_downwards(vma, address - PAGE_SIZE); check_stack_guard_page()
2644 if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { check_stack_guard_page()
2645 struct vm_area_struct *next = vma->vm_next; check_stack_guard_page()
2651 return expand_upwards(vma, address + PAGE_SIZE); check_stack_guard_page()
2657 * We enter with non-exclusive mmap_sem (to exclude vma changes,
2661 static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, do_anonymous_page() argument
2673 if (vma->vm_flags & VM_SHARED) do_anonymous_page()
2677 if (check_stack_guard_page(vma, address) < 0) do_anonymous_page()
2683 vma->vm_page_prot)); do_anonymous_page()
2691 if (unlikely(anon_vma_prepare(vma))) do_anonymous_page()
2693 page = alloc_zeroed_user_highpage_movable(vma, address); do_anonymous_page()
2706 entry = mk_pte(page, vma->vm_page_prot); do_anonymous_page()
2707 if (vma->vm_flags & VM_WRITE) do_anonymous_page()
2715 page_add_new_anon_rmap(page, vma, address); do_anonymous_page()
2717 lru_cache_add_active_or_unevictable(page, vma); do_anonymous_page()
2722 update_mmu_cache(vma, address, page_table); do_anonymous_page()
2738 * released depending on flags and vma->vm_ops->fault() return value.
2741 static int __do_fault(struct vm_area_struct *vma, unsigned long address, __do_fault() argument
2754 ret = vma->vm_ops->fault(vma, &vmf); __do_fault()
2780 * @vma: virtual memory area
2792 void do_set_pte(struct vm_area_struct *vma, unsigned long address, do_set_pte() argument
2797 flush_icache_page(vma, page); do_set_pte()
2798 entry = mk_pte(page, vma->vm_page_prot); do_set_pte()
2800 entry = maybe_mkwrite(pte_mkdirty(entry), vma); do_set_pte()
2802 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); do_set_pte()
2803 page_add_new_anon_rmap(page, vma, address); do_set_pte()
2805 inc_mm_counter_fast(vma->vm_mm, MM_FILEPAGES); do_set_pte()
2808 set_pte_at(vma->vm_mm, address, pte, entry); do_set_pte()
2811 update_mmu_cache(vma, address, pte); do_set_pte()
2878 static void do_fault_around(struct vm_area_struct *vma, unsigned long address, do_fault_around() argument
2889 start_addr = max(address & mask, vma->vm_start); do_fault_around()
2895 * max_pgoff is either end of page table or end of vma do_fault_around()
2900 max_pgoff = min3(max_pgoff, vma_pages(vma) + vma->vm_pgoff - 1, do_fault_around()
2908 if (start_addr >= vma->vm_end) do_fault_around()
2918 vma->vm_ops->map_pages(vma, &vmf); do_fault_around()
2921 static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma, do_read_fault() argument
2935 if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) { do_read_fault()
2937 do_fault_around(vma, address, pte, pgoff, flags); do_read_fault()
2943 ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page); do_read_fault()
2954 do_set_pte(vma, address, fault_page, pte, false, false); do_read_fault()
2961 static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, do_cow_fault() argument
2971 if (unlikely(anon_vma_prepare(vma))) do_cow_fault()
2974 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); do_cow_fault()
2983 ret = __do_fault(vma, address, pgoff, flags, new_page, &fault_page); do_cow_fault()
2988 copy_user_highpage(new_page, fault_page, address, vma); do_cow_fault()
3002 i_mmap_unlock_read(vma->vm_file->f_mapping); do_cow_fault()
3006 do_set_pte(vma, address, new_page, pte, true, true); do_cow_fault()
3008 lru_cache_add_active_or_unevictable(new_page, vma); do_cow_fault()
3018 i_mmap_unlock_read(vma->vm_file->f_mapping); do_cow_fault()
3027 static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma, do_shared_fault() argument
3038 ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page); do_shared_fault()
3046 if (vma->vm_ops->page_mkwrite) { do_shared_fault()
3048 tmp = do_page_mkwrite(vma, fault_page, address); do_shared_fault()
3063 do_set_pte(vma, address, fault_page, pte, true, false); do_shared_fault()
3071 * pinned by vma->vm_file's reference. We rely on unlock_page()'s do_shared_fault()
3076 if ((dirtied || vma->vm_ops->page_mkwrite) && mapping) { do_shared_fault()
3084 if (!vma->vm_ops->page_mkwrite) do_shared_fault()
3085 file_update_time(vma->vm_file); do_shared_fault()
3091 * We enter with non-exclusive mmap_sem (to exclude vma changes,
3096 static int do_fault(struct mm_struct *mm, struct vm_area_struct *vma, do_fault() argument
3101 - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; do_fault()
3105 if (!vma->vm_ops->fault) do_fault()
3108 return do_read_fault(mm, vma, address, pmd, pgoff, flags, do_fault()
3110 if (!(vma->vm_flags & VM_SHARED)) do_fault()
3111 return do_cow_fault(mm, vma, address, pmd, pgoff, flags, do_fault()
3113 return do_shared_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); do_fault()
3116 static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, numa_migrate_prep() argument
3128 return mpol_misplaced(page, vma, addr); numa_migrate_prep()
3131 static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, do_numa_page() argument
3144 BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))); do_numa_page()
3163 pte = pte_modify(pte, vma->vm_page_prot); do_numa_page()
3168 update_mmu_cache(vma, addr, ptep); do_numa_page()
3170 page = vm_normal_page(vma, addr, pte); do_numa_page()
3184 if (!(vma->vm_flags & VM_WRITE)) do_numa_page()
3191 if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED)) do_numa_page()
3196 target_nid = numa_migrate_prep(page, vma, addr, page_nid, &flags); do_numa_page()
3204 migrated = migrate_misplaced_page(page, vma, target_nid); do_numa_page()
3226 * We enter with non-exclusive mmap_sem (to exclude vma changes,
3234 struct vm_area_struct *vma, unsigned long address, handle_pte_fault()
3252 if (vma->vm_ops) handle_pte_fault()
3253 return do_fault(mm, vma, address, pte, pmd, handle_pte_fault()
3256 return do_anonymous_page(mm, vma, address, pte, pmd, handle_pte_fault()
3259 return do_swap_page(mm, vma, address, handle_pte_fault()
3264 return do_numa_page(mm, vma, address, entry, pte, pmd); handle_pte_fault()
3272 return do_wp_page(mm, vma, address, handle_pte_fault()
3277 if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) { handle_pte_fault()
3278 update_mmu_cache(vma, address, pte); handle_pte_fault()
3287 flush_tlb_fix_spurious_fault(vma, address); handle_pte_fault()
3300 static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, __handle_mm_fault() argument
3308 if (unlikely(is_vm_hugetlb_page(vma))) __handle_mm_fault()
3309 return hugetlb_fault(mm, vma, address, flags); __handle_mm_fault()
3318 if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) { __handle_mm_fault()
3320 if (!vma->vm_ops) __handle_mm_fault()
3321 ret = do_huge_pmd_anonymous_page(mm, vma, address, __handle_mm_fault()
3342 return do_huge_pmd_numa_page(mm, vma, address, __handle_mm_fault()
3346 ret = do_huge_pmd_wp_page(mm, vma, address, pmd, __handle_mm_fault()
3351 huge_pmd_set_accessed(mm, vma, address, pmd, __handle_mm_fault()
3364 unlikely(__pte_alloc(mm, vma, pmd, address))) __handle_mm_fault()
3387 return handle_pte_fault(mm, vma, address, pte, pmd, flags); __handle_mm_fault()
3396 int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, handle_mm_fault() argument
3416 ret = __handle_mm_fault(mm, vma, address, flags); handle_mm_fault()
3540 * @vma: memory mapping
3548 int follow_pfn(struct vm_area_struct *vma, unsigned long address, follow_pfn() argument
3555 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) follow_pfn()
3558 ret = follow_pte(vma->vm_mm, address, &ptep, &ptl); follow_pfn()
3568 int follow_phys(struct vm_area_struct *vma, follow_phys() argument
3576 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) follow_phys()
3579 if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) follow_phys()
3596 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, generic_access_phys() argument
3604 if (follow_phys(vma, addr, write, &prot, &phys_addr)) generic_access_phys()
3626 struct vm_area_struct *vma; __access_remote_vm() local
3637 write, 1, &page, &vma); __access_remote_vm()
3646 vma = find_vma(mm, addr); __access_remote_vm()
3647 if (!vma || vma->vm_start > addr) __access_remote_vm()
3649 if (vma->vm_ops && vma->vm_ops->access) __access_remote_vm()
3650 ret = vma->vm_ops->access(vma, addr, buf, __access_remote_vm()
3664 copy_to_user_page(vma, page, addr, __access_remote_vm()
3668 copy_from_user_page(vma, page, addr, __access_remote_vm()
3726 struct vm_area_struct *vma; print_vma_addr() local
3736 vma = find_vma(mm, ip); print_vma_addr()
3737 if (vma && vma->vm_file) { print_vma_addr()
3738 struct file *f = vma->vm_file; print_vma_addr()
3747 vma->vm_start, print_vma_addr()
3748 vma->vm_end - vma->vm_start); print_vma_addr()
3817 struct vm_area_struct *vma, copy_user_gigantic_page()
3826 copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma); copy_user_gigantic_page()
3835 unsigned long addr, struct vm_area_struct *vma, copy_user_huge_page()
3841 copy_user_gigantic_page(dst, src, addr, vma, copy_user_huge_page()
3849 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma); copy_user_huge_page()
806 copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma, unsigned long addr, int *rss) copy_one_pte() argument
887 copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, unsigned long addr, unsigned long end) copy_pte_range() argument
950 copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma, unsigned long addr, unsigned long end) copy_pmd_range() argument
983 copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma, unsigned long addr, unsigned long end) copy_pud_range() argument
1005 copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, struct vm_area_struct *vma) copy_page_range() argument
1072 zap_pte_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, struct zap_details *details) zap_pte_range() argument
1184 zap_pmd_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, struct zap_details *details) zap_pmd_range() argument
1228 zap_pud_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, struct zap_details *details) zap_pud_range() argument
1247 unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long addr, unsigned long end, struct zap_details *details) unmap_page_range() argument
1271 unmap_single_vma(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start_addr, unsigned long end_addr, struct zap_details *details) unmap_single_vma() argument
1332 unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start_addr, unsigned long end_addr) unmap_vmas() argument
2187 wp_pfn_shared(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, spinlock_t *ptl, pte_t orig_pte, pmd_t *pmd) wp_pfn_shared() argument
3233 handle_pte_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, unsigned int flags) handle_pte_fault() argument
3815 copy_user_gigantic_page(struct page *dst, struct page *src, unsigned long addr, struct vm_area_struct *vma, unsigned int pages_per_huge_page) copy_user_gigantic_page() argument
3834 copy_user_huge_page(struct page *dst, struct page *src, unsigned long addr, struct vm_area_struct *vma, unsigned int pages_per_huge_page) copy_user_huge_page() argument
H A Ddebug.c154 void dump_vma(const struct vm_area_struct *vma) dump_vma() argument
156 pr_emerg("vma %p start %p end %p\n" dump_vma()
160 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next, dump_vma()
161 vma->vm_prev, vma->vm_mm, dump_vma()
162 (unsigned long)pgprot_val(vma->vm_page_prot), dump_vma()
163 vma->anon_vma, vma->vm_ops, vma->vm_pgoff, dump_vma()
164 vma->vm_file, vma->vm_private_data); dump_vma()
165 dump_flags(vma->vm_flags, vmaflags_names, ARRAY_SIZE(vmaflags_names)); dump_vma()
H A Dmlock.c48 * vma's VM_LOCKED status is not concurrently being modified, otherwise we
50 * the mmap_sem for read, and verify that the vma really is locked
145 * the page back to the unevictable list if some other vma has it mlocked.
156 * munlock_vma_page - munlock a vma page
163 * When we munlock a page, because the vma where we found the page is being
360 struct vm_area_struct *vma, int zoneid, unsigned long start, __munlock_pagevec_fill()
371 pte = get_locked_pte(vma->vm_mm, start, &ptl); __munlock_pagevec_fill()
383 page = vm_normal_page(vma, start, *pte); __munlock_pagevec_fill()
405 * munlock_vma_pages_range() - munlock all pages in the vma range.'
406 * @vma - vma containing range to be munlock()ed.
407 * @start - start address in @vma of the range
408 * @end - end of range in @vma.
412 * Called with @vma VM_LOCKED.
422 void munlock_vma_pages_range(struct vm_area_struct *vma, munlock_vma_pages_range() argument
425 vma->vm_flags &= ~VM_LOCKED; munlock_vma_pages_range()
443 page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP, munlock_vma_pages_range()
474 start = __munlock_pagevec_fill(&pvec, vma, munlock_vma_pages_range()
498 static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, mlock_fixup() argument
501 struct mm_struct *mm = vma->vm_mm; mlock_fixup()
507 if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) || mlock_fixup()
508 is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm)) mlock_fixup()
511 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); mlock_fixup()
512 *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma, mlock_fixup()
513 vma->vm_file, pgoff, vma_policy(vma)); mlock_fixup()
515 vma = *prev; mlock_fixup()
519 if (start != vma->vm_start) { mlock_fixup()
520 ret = split_vma(mm, vma, start, 1); mlock_fixup()
525 if (end != vma->vm_end) { mlock_fixup()
526 ret = split_vma(mm, vma, end, 0); mlock_fixup()
547 vma->vm_flags = newflags; mlock_fixup()
549 munlock_vma_pages_range(vma, start, end); mlock_fixup()
552 *prev = vma; mlock_fixup()
559 struct vm_area_struct * vma, * prev; do_mlock() local
569 vma = find_vma(current->mm, start); do_mlock()
570 if (!vma || vma->vm_start > start) do_mlock()
573 prev = vma->vm_prev; do_mlock()
574 if (start > vma->vm_start) do_mlock()
575 prev = vma; do_mlock()
580 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ do_mlock()
582 newflags = vma->vm_flags & ~VM_LOCKED; do_mlock()
586 tmp = vma->vm_end; do_mlock()
589 error = mlock_fixup(vma, &prev, nstart, tmp, newflags); do_mlock()
598 vma = prev->vm_next; do_mlock()
599 if (!vma || vma->vm_start != nstart) { do_mlock()
659 struct vm_area_struct * vma, * prev = NULL; do_mlockall() local
668 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) { do_mlockall()
671 newflags = vma->vm_flags & ~VM_LOCKED; do_mlockall()
676 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags); do_mlockall()
359 __munlock_pagevec_fill(struct pagevec *pvec, struct vm_area_struct *vma, int zoneid, unsigned long start, unsigned long end) __munlock_pagevec_fill() argument
H A Dhuge_memory.c63 * it would have happened if the vma was large enough during page
702 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) maybe_pmd_mkwrite() argument
704 if (likely(vma->vm_flags & VM_WRITE)) maybe_pmd_mkwrite()
718 struct vm_area_struct *vma, __do_huge_pmd_anonymous_page()
753 entry = mk_huge_pmd(page, vma->vm_page_prot); __do_huge_pmd_anonymous_page()
754 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); __do_huge_pmd_anonymous_page()
755 page_add_new_anon_rmap(page, vma, haddr); __do_huge_pmd_anonymous_page()
757 lru_cache_add_active_or_unevictable(page, vma); __do_huge_pmd_anonymous_page()
775 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, set_huge_zero_page()
781 entry = mk_pmd(zero_page, vma->vm_page_prot); set_huge_zero_page()
789 int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, do_huge_pmd_anonymous_page() argument
797 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) do_huge_pmd_anonymous_page()
799 if (unlikely(anon_vma_prepare(vma))) do_huge_pmd_anonymous_page()
801 if (unlikely(khugepaged_enter(vma, vma->vm_flags))) do_huge_pmd_anonymous_page()
819 set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd, do_huge_pmd_anonymous_page()
828 gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0); do_huge_pmd_anonymous_page()
829 page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER); do_huge_pmd_anonymous_page()
834 if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page, gfp))) { do_huge_pmd_anonymous_page()
846 struct vm_area_struct *vma) copy_huge_pmd()
883 set = set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd, copy_huge_pmd()
896 wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */ copy_huge_pmd()
920 struct vm_area_struct *vma, huge_pmd_set_accessed()
935 if (pmdp_set_access_flags(vma, haddr, pmd, entry, dirty)) huge_pmd_set_accessed()
936 update_mmu_cache_pmd(vma, address, pmd); huge_pmd_set_accessed()
974 struct vm_area_struct *vma, do_huge_pmd_wp_page_fallback()
999 vma, address, page_to_nid(page)); do_huge_pmd_wp_page_fallback()
1020 haddr + PAGE_SIZE * i, vma); do_huge_pmd_wp_page_fallback()
1034 pmdp_clear_flush_notify(vma, haddr, pmd); do_huge_pmd_wp_page_fallback()
1042 entry = mk_pte(pages[i], vma->vm_page_prot); do_huge_pmd_wp_page_fallback()
1043 entry = maybe_mkwrite(pte_mkdirty(entry), vma); do_huge_pmd_wp_page_fallback()
1046 page_add_new_anon_rmap(pages[i], vma, haddr); do_huge_pmd_wp_page_fallback()
1048 lru_cache_add_active_or_unevictable(pages[i], vma); do_huge_pmd_wp_page_fallback()
1082 int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, do_huge_pmd_wp_page() argument
1095 VM_BUG_ON_VMA(!vma->anon_vma, vma); do_huge_pmd_wp_page()
1108 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); do_huge_pmd_wp_page()
1109 if (pmdp_set_access_flags(vma, haddr, pmd, entry, 1)) do_huge_pmd_wp_page()
1110 update_mmu_cache_pmd(vma, address, pmd); do_huge_pmd_wp_page()
1117 if (transparent_hugepage_enabled(vma) && do_huge_pmd_wp_page()
1119 huge_gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0); do_huge_pmd_wp_page()
1120 new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER); do_huge_pmd_wp_page()
1126 split_huge_page_pmd(vma, address, pmd); do_huge_pmd_wp_page()
1129 ret = do_huge_pmd_wp_page_fallback(mm, vma, address, do_huge_pmd_wp_page()
1147 split_huge_page_pmd(vma, address, pmd); do_huge_pmd_wp_page()
1158 copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); do_huge_pmd_wp_page()
1175 entry = mk_huge_pmd(new_page, vma->vm_page_prot); do_huge_pmd_wp_page()
1176 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); do_huge_pmd_wp_page()
1177 pmdp_clear_flush_notify(vma, haddr, pmd); do_huge_pmd_wp_page()
1178 page_add_new_anon_rmap(new_page, vma, haddr); do_huge_pmd_wp_page()
1180 lru_cache_add_active_or_unevictable(new_page, vma); do_huge_pmd_wp_page()
1182 update_mmu_cache_pmd(vma, address, pmd); do_huge_pmd_wp_page()
1203 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, follow_trans_huge_pmd() argument
1208 struct mm_struct *mm = vma->vm_mm; follow_trans_huge_pmd()
1237 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, follow_trans_huge_pmd()
1239 update_mmu_cache_pmd(vma, addr, pmd); follow_trans_huge_pmd()
1241 if ((flags & FOLL_POPULATE) && (vma->vm_flags & VM_LOCKED)) { follow_trans_huge_pmd()
1259 int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, do_huge_pmd_numa_page() argument
1274 BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))); do_huge_pmd_numa_page()
1303 if (!(vma->vm_flags & VM_WRITE)) do_huge_pmd_numa_page()
1311 target_nid = mpol_misplaced(page, vma, haddr); do_huge_pmd_numa_page()
1355 migrated = migrate_misplaced_transhuge_page(mm, vma, do_huge_pmd_numa_page()
1367 pmd = pmd_modify(pmd, vma->vm_page_prot); do_huge_pmd_numa_page()
1372 update_mmu_cache_pmd(vma, addr, pmdp); do_huge_pmd_numa_page()
1387 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, zap_huge_pmd() argument
1393 if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { zap_huge_pmd()
1427 int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, move_huge_pmd() argument
1436 struct mm_struct *mm = vma->vm_mm; move_huge_pmd()
1457 ret = __pmd_trans_huge_lock(old_pmd, vma, &old_ptl); move_huge_pmd()
1485 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, change_huge_pmd() argument
1488 struct mm_struct *mm = vma->vm_mm; change_huge_pmd()
1492 if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { change_huge_pmd()
1529 int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, __pmd_trans_huge_lock() argument
1532 *ptl = pmd_lock(vma->vm_mm, pmd); __pmd_trans_huge_lock()
1536 wait_split_huge_page(vma->anon_vma, pmd); __pmd_trans_huge_lock()
1603 struct vm_area_struct *vma, __split_huge_page_splitting()
1606 struct mm_struct *mm = vma->vm_mm; __split_huge_page_splitting()
1625 pmdp_splitting_flush(vma, address, pmd); __split_huge_page_splitting()
1751 struct vm_area_struct *vma, __split_huge_page_map()
1754 struct mm_struct *mm = vma->vm_mm; __split_huge_page_map()
1778 entry = mk_pte(page + i, vma->vm_page_prot); __split_huge_page_map()
1779 entry = maybe_mkwrite(pte_mkdirty(entry), vma); __split_huge_page_map()
1817 pmdp_invalidate(vma, address, pmd); __split_huge_page_map()
1840 struct vm_area_struct *vma = avc->vma; __split_huge_page() local
1841 unsigned long addr = vma_address(page, vma); __split_huge_page()
1842 BUG_ON(is_vma_temporary_stack(vma)); __split_huge_page()
1843 mapcount += __split_huge_page_splitting(page, vma, addr); __split_huge_page()
1865 struct vm_area_struct *vma = avc->vma; __split_huge_page() local
1866 unsigned long addr = vma_address(page, vma); __split_huge_page()
1867 BUG_ON(is_vma_temporary_stack(vma)); __split_huge_page()
1868 mapcount2 += __split_huge_page_map(page, vma, addr); __split_huge_page()
1922 int hugepage_madvise(struct vm_area_struct *vma, hugepage_madvise() argument
1933 if (mm_has_pgste(vma->vm_mm)) hugepage_madvise()
1944 * If the vma become good for khugepaged to scan, hugepage_madvise()
1948 if (unlikely(khugepaged_enter_vma_merge(vma, *vm_flags))) hugepage_madvise()
1961 * this vma even if we leave the mm registered in khugepaged if hugepage_madvise()
2054 int khugepaged_enter_vma_merge(struct vm_area_struct *vma, khugepaged_enter_vma_merge() argument
2058 if (!vma->anon_vma) khugepaged_enter_vma_merge()
2064 if (vma->vm_ops || (vm_flags & VM_NO_THP)) khugepaged_enter_vma_merge()
2067 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; khugepaged_enter_vma_merge()
2068 hend = vma->vm_end & HPAGE_PMD_MASK; khugepaged_enter_vma_merge()
2070 return khugepaged_enter(vma, vm_flags); khugepaged_enter_vma_merge()
2123 static int __collapse_huge_page_isolate(struct vm_area_struct *vma, __collapse_huge_page_isolate() argument
2143 page = vm_normal_page(vma, address, pteval); __collapse_huge_page_isolate()
2197 mmu_notifier_test_young(vma->vm_mm, address)) __collapse_huge_page_isolate()
2208 struct vm_area_struct *vma, __collapse_huge_page_copy()
2219 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); __collapse_huge_page_copy()
2229 pte_clear(vma->vm_mm, address, _pte); __collapse_huge_page_copy()
2234 copy_user_highpage(page, src_page, address, vma); __collapse_huge_page_copy()
2247 pte_clear(vma->vm_mm, address, _pte); __collapse_huge_page_copy()
2335 struct vm_area_struct *vma, unsigned long address, khugepaged_alloc_page()
2344 * that. We will recheck the vma after taking it again in write mode. khugepaged_alloc_page()
2403 struct vm_area_struct *vma, unsigned long address, khugepaged_alloc_page()
2413 static bool hugepage_vma_check(struct vm_area_struct *vma) hugepage_vma_check() argument
2415 if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) || hugepage_vma_check()
2416 (vma->vm_flags & VM_NOHUGEPAGE)) hugepage_vma_check()
2419 if (!vma->anon_vma || vma->vm_ops) hugepage_vma_check()
2421 if (is_vma_temporary_stack(vma)) hugepage_vma_check()
2423 return !(vma->vm_flags & VM_NO_THP); hugepage_vma_check()
2429 struct vm_area_struct *vma, collapse_huge_page()
2451 new_page = khugepaged_alloc_page(hpage, gfp, mm, vma, address, node); collapse_huge_page()
2468 vma = find_vma(mm, address); collapse_huge_page()
2469 if (!vma) collapse_huge_page()
2471 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; collapse_huge_page()
2472 hend = vma->vm_end & HPAGE_PMD_MASK; collapse_huge_page()
2475 if (!hugepage_vma_check(vma)) collapse_huge_page()
2481 anon_vma_lock_write(vma->anon_vma); collapse_huge_page()
2496 _pmd = pmdp_clear_flush(vma, address, pmd); collapse_huge_page()
2501 isolated = __collapse_huge_page_isolate(vma, address, pte); collapse_huge_page()
2515 anon_vma_unlock_write(vma->anon_vma); collapse_huge_page()
2523 anon_vma_unlock_write(vma->anon_vma); collapse_huge_page()
2525 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl); collapse_huge_page()
2530 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot); collapse_huge_page()
2531 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); collapse_huge_page()
2542 page_add_new_anon_rmap(new_page, vma, address); collapse_huge_page()
2544 lru_cache_add_active_or_unevictable(new_page, vma); collapse_huge_page()
2547 update_mmu_cache_pmd(vma, address, pmd); collapse_huge_page()
2563 struct vm_area_struct *vma, khugepaged_scan_pmd()
2598 page = vm_normal_page(vma, _address, pteval); khugepaged_scan_pmd()
2622 mmu_notifier_test_young(vma->vm_mm, address)) khugepaged_scan_pmd()
2632 collapse_huge_page(mm, address, hpage, vma, node); khugepaged_scan_pmd()
2668 struct vm_area_struct *vma; variable in typeref:struct:vm_area_struct
2687 vma = NULL;
2689 vma = find_vma(mm, khugepaged_scan.address);
2692 for (; vma; vma = vma->vm_next) {
2700 if (!hugepage_vma_check(vma)) {
2705 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2706 hend = vma->vm_end & HPAGE_PMD_MASK;
2724 ret = khugepaged_scan_pmd(mm, vma,
2747 if (khugepaged_test_exit(mm) || !vma) {
2854 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, __split_huge_zero_page_pmd() argument
2857 struct mm_struct *mm = vma->vm_mm; __split_huge_zero_page_pmd()
2862 pmdp_clear_flush_notify(vma, haddr, pmd); __split_huge_zero_page_pmd()
2870 entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot); __split_huge_zero_page_pmd()
2882 void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address, __split_huge_page_pmd() argument
2887 struct mm_struct *mm = vma->vm_mm; __split_huge_page_pmd()
2892 BUG_ON(vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE); __split_huge_page_pmd()
2905 __split_huge_zero_page_pmd(vma, haddr, pmd); __split_huge_page_pmd()
2932 struct vm_area_struct *vma; split_huge_page_pmd_mm() local
2934 vma = find_vma(mm, address); split_huge_page_pmd_mm()
2935 BUG_ON(vma == NULL); split_huge_page_pmd_mm()
2936 split_huge_page_pmd(vma, address, pmd); split_huge_page_pmd_mm()
2966 void __vma_adjust_trans_huge(struct vm_area_struct *vma, __vma_adjust_trans_huge() argument
2977 (start & HPAGE_PMD_MASK) >= vma->vm_start && __vma_adjust_trans_huge()
2978 (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) __vma_adjust_trans_huge()
2979 split_huge_page_address(vma->vm_mm, start); __vma_adjust_trans_huge()
2987 (end & HPAGE_PMD_MASK) >= vma->vm_start && __vma_adjust_trans_huge()
2988 (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) __vma_adjust_trans_huge()
2989 split_huge_page_address(vma->vm_mm, end); __vma_adjust_trans_huge()
2992 * If we're also updating the vma->vm_next->vm_start, if the new __vma_adjust_trans_huge()
2997 struct vm_area_struct *next = vma->vm_next; __vma_adjust_trans_huge()
717 __do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, struct page *page, gfp_t gfp) __do_huge_pmd_anonymous_page() argument
774 set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, struct page *zero_page) set_huge_zero_page() argument
844 copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, struct vm_area_struct *vma) copy_huge_pmd() argument
919 huge_pmd_set_accessed(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pmd_t orig_pmd, int dirty) huge_pmd_set_accessed() argument
973 do_huge_pmd_wp_page_fallback(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pmd_t orig_pmd, struct page *page, unsigned long haddr) do_huge_pmd_wp_page_fallback() argument
1602 __split_huge_page_splitting(struct page *page, struct vm_area_struct *vma, unsigned long address) __split_huge_page_splitting() argument
1750 __split_huge_page_map(struct page *page, struct vm_area_struct *vma, unsigned long address) __split_huge_page_map() argument
2207 __collapse_huge_page_copy(pte_t *pte, struct page *page, struct vm_area_struct *vma, unsigned long address, spinlock_t *ptl) __collapse_huge_page_copy() argument
2334 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int node) khugepaged_alloc_page() argument
2402 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int node) khugepaged_alloc_page() argument
2426 collapse_huge_page(struct mm_struct *mm, unsigned long address, struct page **hpage, struct vm_area_struct *vma, int node) collapse_huge_page() argument
2562 khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, struct page **hpage) khugepaged_scan_pmd() argument
H A Dgup.c19 static struct page *no_page_table(struct vm_area_struct *vma, no_page_table() argument
30 if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault)) no_page_table()
35 static struct page *follow_page_pte(struct vm_area_struct *vma, follow_page_pte() argument
38 struct mm_struct *mm = vma->vm_mm; follow_page_pte()
45 return no_page_table(vma, flags); follow_page_pte()
74 page = vm_normal_page(vma, address, pte); follow_page_pte()
95 if ((flags & FOLL_POPULATE) && (vma->vm_flags & VM_LOCKED)) { follow_page_pte()
127 return no_page_table(vma, flags); follow_page_pte()
132 * @vma: vm_area_struct mapping @address
143 struct page *follow_page_mask(struct vm_area_struct *vma, follow_page_mask() argument
152 struct mm_struct *mm = vma->vm_mm; follow_page_mask()
164 return no_page_table(vma, flags); follow_page_mask()
168 return no_page_table(vma, flags); follow_page_mask()
169 if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { follow_page_mask()
173 return no_page_table(vma, flags); follow_page_mask()
176 return no_page_table(vma, flags); follow_page_mask()
180 return no_page_table(vma, flags); follow_page_mask()
181 if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) { follow_page_mask()
185 return no_page_table(vma, flags); follow_page_mask()
188 return no_page_table(vma, flags); follow_page_mask()
191 split_huge_page_pmd(vma, address, pmd); follow_page_mask()
192 return follow_page_pte(vma, address, pmd, flags); follow_page_mask()
198 wait_split_huge_page(vma->anon_vma, pmd); follow_page_mask()
200 page = follow_trans_huge_pmd(vma, address, follow_page_mask()
209 return follow_page_pte(vma, address, pmd, flags); follow_page_mask()
213 unsigned int gup_flags, struct vm_area_struct **vma, get_gate_page()
239 *vma = get_gate_vma(mm); get_gate_page()
242 *page = vm_normal_page(*vma, address, *pte); get_gate_page()
261 static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, faultin_page() argument
264 struct mm_struct *mm = vma->vm_mm; faultin_page()
270 (stack_guard_page_start(vma, address) || faultin_page()
271 stack_guard_page_end(vma, address + PAGE_SIZE))) faultin_page()
284 ret = handle_mm_fault(mm, vma, address, fault_flags); faultin_page()
317 if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) faultin_page()
322 static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) check_vma_flags() argument
324 vm_flags_t vm_flags = vma->vm_flags; check_vma_flags()
335 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could check_vma_flags()
351 * Is there actually any vma we can reach here which does not check_vma_flags()
423 struct vm_area_struct *vma = NULL; __get_user_pages() local
443 /* first iteration or cross vma bound */ __get_user_pages()
444 if (!vma || start >= vma->vm_end) { __get_user_pages()
445 vma = find_extend_vma(mm, start); __get_user_pages()
446 if (!vma && in_gate_area(mm, start)) { __get_user_pages()
449 gup_flags, &vma, __get_user_pages()
457 if (!vma || check_vma_flags(vma, gup_flags)) __get_user_pages()
459 if (is_vm_hugetlb_page(vma)) { __get_user_pages()
460 i = follow_hugetlb_page(mm, vma, pages, vmas, __get_user_pages()
474 page = follow_page_mask(vma, start, foll_flags, &page_mask); __get_user_pages()
477 ret = faultin_page(tsk, vma, start, &foll_flags, __get_user_pages()
497 flush_anon_page(vma, page, start); __get_user_pages()
503 vmas[i] = vma; __get_user_pages()
547 struct vm_area_struct *vma; fixup_user_fault() local
551 vma = find_extend_vma(mm, address); fixup_user_fault()
552 if (!vma || address < vma->vm_start) fixup_user_fault()
556 if (!(vm_flags & vma->vm_flags)) fixup_user_fault()
559 ret = handle_mm_fault(mm, vma, address, fault_flags); fixup_user_fault()
822 * populate_vma_page_range() - populate a range of pages in the vma.
823 * @vma: target vma
832 * vma->vm_mm->mmap_sem must be held.
840 long populate_vma_page_range(struct vm_area_struct *vma, populate_vma_page_range() argument
843 struct mm_struct *mm = vma->vm_mm; populate_vma_page_range()
849 VM_BUG_ON_VMA(start < vma->vm_start, vma); populate_vma_page_range()
850 VM_BUG_ON_VMA(end > vma->vm_end, vma); populate_vma_page_range()
859 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) populate_vma_page_range()
866 if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) populate_vma_page_range()
888 struct vm_area_struct *vma = NULL; __mm_populate() local
904 vma = find_vma(mm, nstart); __mm_populate()
905 } else if (nstart >= vma->vm_end) __mm_populate()
906 vma = vma->vm_next; __mm_populate()
907 if (!vma || vma->vm_start >= end) __mm_populate()
913 nend = min(end, vma->vm_end); __mm_populate()
914 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) __mm_populate()
916 if (nstart < vma->vm_start) __mm_populate()
917 nstart = vma->vm_start; __mm_populate()
920 * double checks the vma flags, so that it won't mlock pages __mm_populate()
921 * if the vma was already munlocked. __mm_populate()
923 ret = populate_vma_page_range(vma, nstart, nend, &locked); __mm_populate()
956 struct vm_area_struct *vma; get_dump_page() local
960 FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma, get_dump_page()
963 flush_cache_page(vma, addr, page_to_pfn(page)); get_dump_page()
212 get_gate_page(struct mm_struct *mm, unsigned long address, unsigned int gup_flags, struct vm_area_struct **vma, struct page **page) get_gate_page() argument
H A Dmincore.c85 struct vm_area_struct *vma, unsigned char *vec) __mincore_unmapped_range()
90 if (vma->vm_file) { __mincore_unmapped_range()
93 pgoff = linear_page_index(vma, addr); __mincore_unmapped_range()
95 vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff); __mincore_unmapped_range()
107 walk->vma, walk->private); mincore_unmapped_range()
115 struct vm_area_struct *vma = walk->vma; mincore_pte_range() local
120 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { mincore_pte_range()
127 __mincore_unmapped_range(addr, end, vma, vec); mincore_pte_range()
137 vma, vec); mincore_pte_range()
175 struct vm_area_struct *vma; do_mincore() local
185 vma = find_vma(current->mm, addr); do_mincore()
186 if (!vma || addr < vma->vm_start) do_mincore()
188 mincore_walk.mm = vma->vm_mm; do_mincore()
189 end = min(vma->vm_end, addr + (pages << PAGE_SHIFT)); do_mincore()
84 __mincore_unmapped_range(unsigned long addr, unsigned long end, struct vm_area_struct *vma, unsigned char *vec) __mincore_unmapped_range() argument
H A Dmempolicy.c438 * Rebind each vma in mm to new nodemask.
445 struct vm_area_struct *vma; mpol_rebind_mm() local
448 for (vma = mm->mmap; vma; vma = vma->vm_next) mpol_rebind_mm()
449 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE); mpol_rebind_mm()
488 struct vm_area_struct *vma = walk->vma; queue_pages_pte_range() local
496 split_huge_page_pmd(vma, addr, pmd); queue_pages_pte_range()
504 page = vm_normal_page(vma, addr, *pte); queue_pages_pte_range()
537 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); queue_pages_hugetlb()
567 unsigned long change_prot_numa(struct vm_area_struct *vma, change_prot_numa() argument
572 nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1); change_prot_numa()
579 static unsigned long change_prot_numa(struct vm_area_struct *vma, change_prot_numa() argument
589 struct vm_area_struct *vma = walk->vma; queue_pages_test_walk() local
591 unsigned long endvma = vma->vm_end; queue_pages_test_walk()
594 if (vma->vm_flags & VM_PFNMAP) queue_pages_test_walk()
599 if (vma->vm_start > start) queue_pages_test_walk()
600 start = vma->vm_start; queue_pages_test_walk()
603 if (!vma->vm_next && vma->vm_end < end) queue_pages_test_walk()
605 if (qp->prev && qp->prev->vm_end < vma->vm_start) queue_pages_test_walk()
609 qp->prev = vma; queue_pages_test_walk()
611 if (vma->vm_flags & VM_PFNMAP) queue_pages_test_walk()
616 if (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) queue_pages_test_walk()
617 change_prot_numa(vma, start, endvma); queue_pages_test_walk()
623 vma_migratable(vma))) queue_pages_test_walk()
624 /* queue pages from current vma */ queue_pages_test_walk()
662 static int vma_replace_policy(struct vm_area_struct *vma, vma_replace_policy() argument
669 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", vma_replace_policy()
670 vma->vm_start, vma->vm_end, vma->vm_pgoff, vma_replace_policy()
671 vma->vm_ops, vma->vm_file, vma_replace_policy()
672 vma->vm_ops ? vma->vm_ops->set_policy : NULL); vma_replace_policy()
678 if (vma->vm_ops && vma->vm_ops->set_policy) { vma_replace_policy()
679 err = vma->vm_ops->set_policy(vma, new); vma_replace_policy()
684 old = vma->vm_policy; vma_replace_policy()
685 vma->vm_policy = new; /* protected by mmap_sem */ vma_replace_policy()
700 struct vm_area_struct *vma; mbind_range() local
706 vma = find_vma(mm, start); mbind_range()
707 if (!vma || vma->vm_start > start) mbind_range()
710 prev = vma->vm_prev; mbind_range()
711 if (start > vma->vm_start) mbind_range()
712 prev = vma; mbind_range()
714 for (; vma && vma->vm_start < end; prev = vma, vma = next) { mbind_range()
715 next = vma->vm_next; mbind_range()
716 vmstart = max(start, vma->vm_start); mbind_range()
717 vmend = min(end, vma->vm_end); mbind_range()
719 if (mpol_equal(vma_policy(vma), new_pol)) mbind_range()
722 pgoff = vma->vm_pgoff + mbind_range()
723 ((vmstart - vma->vm_start) >> PAGE_SHIFT); mbind_range()
724 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, mbind_range()
725 vma->anon_vma, vma->vm_file, pgoff, mbind_range()
728 vma = prev; mbind_range()
729 next = vma->vm_next; mbind_range()
730 if (mpol_equal(vma_policy(vma), new_pol)) mbind_range()
732 /* vma_merge() joined vma && vma->next, case 8 */ mbind_range()
735 if (vma->vm_start != vmstart) { mbind_range()
736 err = split_vma(vma->vm_mm, vma, vmstart, 1); mbind_range()
740 if (vma->vm_end != vmend) { mbind_range()
741 err = split_vma(vma->vm_mm, vma, vmend, 0); mbind_range()
746 err = vma_replace_policy(vma, new_pol); mbind_range()
838 struct vm_area_struct *vma = NULL; do_get_mempolicy() local
858 * vma/shared policy at addr is NULL. We do_get_mempolicy()
862 vma = find_vma_intersection(mm, addr, addr+1); do_get_mempolicy()
863 if (!vma) { do_get_mempolicy()
867 if (vma->vm_ops && vma->vm_ops->get_policy) do_get_mempolicy()
868 pol = vma->vm_ops->get_policy(vma, addr); do_get_mempolicy()
870 pol = vma->vm_policy; do_get_mempolicy()
900 if (vma) { do_get_mempolicy()
902 vma = NULL; do_get_mempolicy()
918 if (vma) do_get_mempolicy()
1091 * Allocate a new page for page migration based on vma policy.
1092 * Start by assuming the page is mapped by the same vma as contains @start.
1099 struct vm_area_struct *vma; new_page() local
1102 vma = find_vma(current->mm, start); new_page()
1103 while (vma) { new_page()
1104 address = page_address_in_vma(page, vma); new_page()
1107 vma = vma->vm_next; new_page()
1111 BUG_ON(!vma); new_page()
1112 return alloc_huge_page_noerr(vma, address, 1); new_page()
1115 * if !vma, alloc_page_vma() will use task or system default policy new_page()
1117 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); new_page()
1544 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, __get_vma_policy() argument
1549 if (vma) { __get_vma_policy()
1550 if (vma->vm_ops && vma->vm_ops->get_policy) { __get_vma_policy()
1551 pol = vma->vm_ops->get_policy(vma, addr); __get_vma_policy()
1552 } else if (vma->vm_policy) { __get_vma_policy()
1553 pol = vma->vm_policy; __get_vma_policy()
1557 * a pseudo vma whose vma->vm_ops=NULL. Take a reference __get_vma_policy()
1570 * get_vma_policy(@vma, @addr)
1571 * @vma: virtual memory area whose policy is sought
1572 * @addr: address in @vma for shared policy lookup
1581 static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, get_vma_policy() argument
1584 struct mempolicy *pol = __get_vma_policy(vma, addr); get_vma_policy()
1592 bool vma_policy_mof(struct vm_area_struct *vma) vma_policy_mof() argument
1596 if (vma->vm_ops && vma->vm_ops->get_policy) { vma_policy_mof()
1599 pol = vma->vm_ops->get_policy(vma, vma->vm_start); vma_policy_mof()
1607 pol = vma->vm_policy; vma_policy_mof()
1738 struct vm_area_struct *vma, unsigned long off) offset_il_node()
1758 struct vm_area_struct *vma, unsigned long addr, int shift) interleave_nid()
1760 if (vma) { interleave_nid()
1771 off = vma->vm_pgoff >> (shift - PAGE_SHIFT); interleave_nid()
1772 off += (addr - vma->vm_start) >> shift; interleave_nid()
1773 return offset_il_node(pol, vma, off); interleave_nid()
1795 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1796 * @vma: virtual memory area whose policy is sought
1797 * @addr: address in @vma for shared policy lookup and interleave policy
1809 struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, huge_zonelist() argument
1815 *mpol = get_vma_policy(vma, addr); huge_zonelist()
1819 zl = node_zonelist(interleave_nid(*mpol, vma, addr, huge_zonelist()
1820 huge_page_shift(hstate_vma(vma))), gfp_flags); huge_zonelist()
1949 * @vma: Pointer to VMA or NULL if not available.
1962 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, alloc_pages_vma() argument
1972 pol = get_vma_policy(vma, addr); alloc_pages_vma()
1978 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); alloc_pages_vma()
2237 * @vma: vm area where page mapped
2240 * Lookup current policy node id for vma,addr and "compare to" page's
2248 * Called from fault path where we know the vma and faulting address.
2250 int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) mpol_misplaced() argument
2261 BUG_ON(!vma); mpol_misplaced()
2263 pol = get_vma_policy(vma, addr); mpol_misplaced()
2269 BUG_ON(addr >= vma->vm_end); mpol_misplaced()
2270 BUG_ON(addr < vma->vm_start); mpol_misplaced()
2272 pgoff = vma->vm_pgoff; mpol_misplaced()
2273 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT; mpol_misplaced()
2274 polnid = offset_il_node(pol, vma, pgoff); mpol_misplaced()
2457 /* Create pseudo-vma that contains just the policy */ mpol_shared_policy_init()
2472 struct vm_area_struct *vma, struct mempolicy *npol) mpol_set_shared_policy()
2476 unsigned long sz = vma_pages(vma); mpol_set_shared_policy()
2479 vma->vm_pgoff, mpol_set_shared_policy()
2485 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); mpol_set_shared_policy()
2489 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); mpol_set_shared_policy()
1737 offset_il_node(struct mempolicy *pol, struct vm_area_struct *vma, unsigned long off) offset_il_node() argument
1757 interleave_nid(struct mempolicy *pol, struct vm_area_struct *vma, unsigned long addr, int shift) interleave_nid() argument
2471 mpol_set_shared_policy(struct shared_policy *info, struct vm_area_struct *vma, struct mempolicy *npol) mpol_set_shared_policy() argument
H A Dhugetlb.c211 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) subpool_vma() argument
213 return subpool_inode(file_inode(vma->vm_file)); subpool_vma()
397 * Convert the address within this vma to the page offset within
401 struct vm_area_struct *vma, unsigned long address) vma_hugecache_offset()
403 return ((address - vma->vm_start) >> huge_page_shift(h)) + vma_hugecache_offset()
404 (vma->vm_pgoff >> huge_page_order(h)); vma_hugecache_offset()
407 pgoff_t linear_hugepage_index(struct vm_area_struct *vma, linear_hugepage_index() argument
410 return vma_hugecache_offset(hstate_vma(vma), vma, address); linear_hugepage_index()
417 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) vma_kernel_pagesize() argument
421 if (!is_vm_hugetlb_page(vma)) vma_kernel_pagesize()
424 hstate = hstate_vma(vma); vma_kernel_pagesize()
437 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) vma_mmu_pagesize() argument
439 return vma_kernel_pagesize(vma); vma_mmu_pagesize()
471 static unsigned long get_vma_private_data(struct vm_area_struct *vma) get_vma_private_data() argument
473 return (unsigned long)vma->vm_private_data; get_vma_private_data()
476 static void set_vma_private_data(struct vm_area_struct *vma, set_vma_private_data() argument
479 vma->vm_private_data = (void *)value; set_vma_private_data()
509 static struct resv_map *vma_resv_map(struct vm_area_struct *vma) vma_resv_map() argument
511 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); vma_resv_map()
512 if (vma->vm_flags & VM_MAYSHARE) { vma_resv_map()
513 struct address_space *mapping = vma->vm_file->f_mapping; vma_resv_map()
519 return (struct resv_map *)(get_vma_private_data(vma) & vma_resv_map()
524 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) set_vma_resv_map() argument
526 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); set_vma_resv_map()
527 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); set_vma_resv_map()
529 set_vma_private_data(vma, (get_vma_private_data(vma) & set_vma_resv_map()
533 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) set_vma_resv_flags() argument
535 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); set_vma_resv_flags()
536 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); set_vma_resv_flags()
538 set_vma_private_data(vma, get_vma_private_data(vma) | flags); set_vma_resv_flags()
541 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) is_vma_resv_set() argument
543 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); is_vma_resv_set()
545 return (get_vma_private_data(vma) & flag) != 0; is_vma_resv_set()
549 void reset_vma_resv_huge_pages(struct vm_area_struct *vma) reset_vma_resv_huge_pages() argument
551 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); reset_vma_resv_huge_pages()
552 if (!(vma->vm_flags & VM_MAYSHARE)) reset_vma_resv_huge_pages()
553 vma->vm_private_data = (void *)0; reset_vma_resv_huge_pages()
557 static int vma_has_reserves(struct vm_area_struct *vma, long chg) vma_has_reserves() argument
559 if (vma->vm_flags & VM_NORESERVE) { vma_has_reserves()
569 if (vma->vm_flags & VM_MAYSHARE && chg == 0) vma_has_reserves()
576 if (vma->vm_flags & VM_MAYSHARE) vma_has_reserves()
583 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) vma_has_reserves()
627 struct vm_area_struct *vma, dequeue_huge_page_vma()
644 if (!vma_has_reserves(vma, chg) && dequeue_huge_page_vma()
654 zonelist = huge_zonelist(vma, address, dequeue_huge_page_vma()
664 if (!vma_has_reserves(vma, chg)) dequeue_huge_page_vma()
1284 * This allocation function is useful in the context where vma is irrelevant.
1425 * Determine if the huge page at addr within the vma has an associated
1435 struct vm_area_struct *vma, unsigned long addr) vma_needs_reservation()
1441 resv = vma_resv_map(vma); vma_needs_reservation()
1445 idx = vma_hugecache_offset(h, vma, addr); vma_needs_reservation()
1448 if (vma->vm_flags & VM_MAYSHARE) vma_needs_reservation()
1454 struct vm_area_struct *vma, unsigned long addr) vma_commit_reservation()
1459 resv = vma_resv_map(vma); vma_commit_reservation()
1463 idx = vma_hugecache_offset(h, vma, addr); vma_commit_reservation()
1467 static struct page *alloc_huge_page(struct vm_area_struct *vma, alloc_huge_page() argument
1470 struct hugepage_subpool *spool = subpool_vma(vma); alloc_huge_page()
1471 struct hstate *h = hstate_vma(vma); alloc_huge_page()
1486 chg = vma_needs_reservation(h, vma, addr); alloc_huge_page()
1498 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg); alloc_huge_page()
1514 vma_commit_reservation(h, vma, addr); alloc_huge_page()
1530 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma, alloc_huge_page_noerr() argument
1533 struct page *page = alloc_huge_page(vma, addr, avoid_reserve); alloc_huge_page_noerr()
2541 static void hugetlb_vm_op_open(struct vm_area_struct *vma) hugetlb_vm_op_open() argument
2543 struct resv_map *resv = vma_resv_map(vma); hugetlb_vm_op_open()
2553 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) hugetlb_vm_op_open()
2557 static void hugetlb_vm_op_close(struct vm_area_struct *vma) hugetlb_vm_op_close() argument
2559 struct hstate *h = hstate_vma(vma); hugetlb_vm_op_close()
2560 struct resv_map *resv = vma_resv_map(vma); hugetlb_vm_op_close()
2561 struct hugepage_subpool *spool = subpool_vma(vma); hugetlb_vm_op_close()
2565 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER)) hugetlb_vm_op_close()
2568 start = vma_hugecache_offset(h, vma, vma->vm_start); hugetlb_vm_op_close()
2569 end = vma_hugecache_offset(h, vma, vma->vm_end); hugetlb_vm_op_close()
2591 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf) hugetlb_vm_op_fault() argument
2603 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, make_huge_pte() argument
2610 vma->vm_page_prot))); make_huge_pte()
2613 vma->vm_page_prot)); make_huge_pte()
2617 entry = arch_make_huge_pte(entry, vma, page, writable); make_huge_pte()
2622 static void set_huge_ptep_writable(struct vm_area_struct *vma, set_huge_ptep_writable() argument
2628 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) set_huge_ptep_writable()
2629 update_mmu_cache(vma, address, ptep); set_huge_ptep_writable()
2659 struct vm_area_struct *vma) copy_hugetlb_page_range()
2665 struct hstate *h = hstate_vma(vma); copy_hugetlb_page_range()
2671 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; copy_hugetlb_page_range()
2673 mmun_start = vma->vm_start; copy_hugetlb_page_range()
2674 mmun_end = vma->vm_end; copy_hugetlb_page_range()
2678 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { copy_hugetlb_page_range()
2735 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, __unmap_hugepage_range() argument
2740 struct mm_struct *mm = vma->vm_mm; __unmap_hugepage_range()
2746 struct hstate *h = hstate_vma(vma); __unmap_hugepage_range()
2751 WARN_ON(!is_vm_hugetlb_page(vma)); __unmap_hugepage_range()
2755 tlb_start_vma(tlb, vma); __unmap_hugepage_range()
2796 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); __unmap_hugepage_range()
2831 tlb_end_vma(tlb, vma); __unmap_hugepage_range()
2835 struct vm_area_struct *vma, unsigned long start, __unmap_hugepage_range_final()
2838 __unmap_hugepage_range(tlb, vma, start, end, ref_page); __unmap_hugepage_range_final()
2842 * test will fail on a vma being torn down, and not grab a page table __unmap_hugepage_range_final()
2850 vma->vm_flags &= ~VM_MAYSHARE; __unmap_hugepage_range_final()
2853 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unmap_hugepage_range() argument
2859 mm = vma->vm_mm; unmap_hugepage_range()
2862 __unmap_hugepage_range(&tlb, vma, start, end, ref_page); unmap_hugepage_range()
2872 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, unmap_ref_private() argument
2875 struct hstate *h = hstate_vma(vma); unmap_ref_private()
2885 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + unmap_ref_private()
2886 vma->vm_pgoff; unmap_ref_private()
2887 mapping = file_inode(vma->vm_file)->i_mapping; unmap_ref_private()
2897 if (iter_vma == vma) unmap_ref_private()
2928 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, hugetlb_cow() argument
2932 struct hstate *h = hstate_vma(vma); hugetlb_cow()
2944 page_move_anon_rmap(old_page, vma, address); hugetlb_cow()
2945 set_huge_ptep_writable(vma, address, ptep); hugetlb_cow()
2958 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && hugetlb_cow()
2969 new_page = alloc_huge_page(vma, address, outside_reserve); hugetlb_cow()
2982 unmap_ref_private(mm, vma, old_page, address); hugetlb_cow()
3005 if (unlikely(anon_vma_prepare(vma))) { hugetlb_cow()
3010 copy_user_huge_page(new_page, old_page, address, vma, hugetlb_cow()
3029 huge_ptep_clear_flush(vma, address, ptep); hugetlb_cow()
3032 make_huge_pte(vma, new_page, 1)); hugetlb_cow()
3034 hugepage_add_new_anon_rmap(new_page, vma, address); hugetlb_cow()
3051 struct vm_area_struct *vma, unsigned long address) hugetlbfs_pagecache_page()
3056 mapping = vma->vm_file->f_mapping; hugetlbfs_pagecache_page()
3057 idx = vma_hugecache_offset(h, vma, address); hugetlbfs_pagecache_page()
3067 struct vm_area_struct *vma, unsigned long address) hugetlbfs_pagecache_present()
3073 mapping = vma->vm_file->f_mapping; hugetlbfs_pagecache_present()
3074 idx = vma_hugecache_offset(h, vma, address); hugetlbfs_pagecache_present()
3082 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, hugetlb_no_page() argument
3086 struct hstate *h = hstate_vma(vma); hugetlb_no_page()
3099 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { hugetlb_no_page()
3115 page = alloc_huge_page(vma, address, 0); hugetlb_no_page()
3128 if (vma->vm_flags & VM_MAYSHARE) { hugetlb_no_page()
3146 if (unlikely(anon_vma_prepare(vma))) { hugetlb_no_page()
3171 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) hugetlb_no_page()
3172 if (vma_needs_reservation(h, vma, address) < 0) { hugetlb_no_page()
3189 hugepage_add_new_anon_rmap(page, vma, address); hugetlb_no_page()
3192 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) hugetlb_no_page()
3193 && (vma->vm_flags & VM_SHARED))); hugetlb_no_page()
3196 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { hugetlb_no_page()
3198 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl); hugetlb_no_page()
3216 struct vm_area_struct *vma, fault_mutex_hash()
3223 if (vma->vm_flags & VM_SHARED) { fault_mutex_hash()
3241 struct vm_area_struct *vma, fault_mutex_hash()
3249 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, hugetlb_fault() argument
3259 struct hstate *h = hstate_vma(vma); hugetlb_fault()
3269 migration_entry_wait_huge(vma, mm, ptep); hugetlb_fault()
3280 mapping = vma->vm_file->f_mapping; hugetlb_fault()
3281 idx = vma_hugecache_offset(h, vma, address); hugetlb_fault()
3288 hash = fault_mutex_hash(h, mm, vma, mapping, idx, address); hugetlb_fault()
3293 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags); hugetlb_fault()
3318 if (vma_needs_reservation(h, vma, address) < 0) { hugetlb_fault()
3323 if (!(vma->vm_flags & VM_MAYSHARE)) hugetlb_fault()
3325 vma, address); hugetlb_fault()
3350 ret = hugetlb_cow(mm, vma, address, ptep, entry, hugetlb_fault()
3357 if (huge_ptep_set_access_flags(vma, address, ptep, entry, hugetlb_fault()
3359 update_mmu_cache(vma, address, ptep); hugetlb_fault()
3385 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, follow_hugetlb_page() argument
3393 struct hstate *h = hstate_vma(vma); follow_hugetlb_page()
3395 while (vaddr < vma->vm_end && remainder) { follow_hugetlb_page()
3430 !hugetlbfs_pagecache_present(h, vma, vaddr)) { follow_hugetlb_page()
3454 ret = hugetlb_fault(mm, vma, vaddr, follow_hugetlb_page()
3472 vmas[i] = vma; follow_hugetlb_page()
3478 if (vaddr < vma->vm_end && remainder && follow_hugetlb_page()
3494 unsigned long hugetlb_change_protection(struct vm_area_struct *vma, hugetlb_change_protection() argument
3497 struct mm_struct *mm = vma->vm_mm; hugetlb_change_protection()
3501 struct hstate *h = hstate_vma(vma); hugetlb_change_protection()
3505 flush_cache_range(vma, address, end); hugetlb_change_protection()
3508 i_mmap_lock_write(vma->vm_file->f_mapping); hugetlb_change_protection()
3542 pte = arch_make_huge_pte(pte, vma, NULL, 0); hugetlb_change_protection()
3554 flush_tlb_range(vma, start, end); hugetlb_change_protection()
3556 i_mmap_unlock_write(vma->vm_file->f_mapping); hugetlb_change_protection()
3564 struct vm_area_struct *vma, hugetlb_reserve_pages()
3585 * called to make the mapping read-write. Assume !vma is a shm mapping hugetlb_reserve_pages()
3587 if (!vma || vma->vm_flags & VM_MAYSHARE) { hugetlb_reserve_pages()
3599 set_vma_resv_map(vma, resv_map); hugetlb_reserve_pages()
3600 set_vma_resv_flags(vma, HPAGE_RESV_OWNER); hugetlb_reserve_pages()
3641 if (!vma || vma->vm_flags & VM_MAYSHARE) hugetlb_reserve_pages()
3645 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) hugetlb_reserve_pages()
3674 struct vm_area_struct *vma, page_table_shareable()
3683 unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED; page_table_shareable()
3698 static int vma_shareable(struct vm_area_struct *vma, unsigned long addr) vma_shareable() argument
3706 if (vma->vm_flags & VM_MAYSHARE && vma_shareable()
3707 vma->vm_start <= base && end <= vma->vm_end) vma_shareable()
3723 struct vm_area_struct *vma = find_vma(mm, addr); huge_pmd_share() local
3724 struct address_space *mapping = vma->vm_file->f_mapping; huge_pmd_share()
3725 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + huge_pmd_share()
3726 vma->vm_pgoff; huge_pmd_share()
3733 if (!vma_shareable(vma, addr)) huge_pmd_share()
3738 if (svma == vma) huge_pmd_share()
3741 saddr = page_table_shareable(svma, vma, addr, idx); huge_pmd_share()
3755 ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte); huge_pmd_share()
400 vma_hugecache_offset(struct hstate *h, struct vm_area_struct *vma, unsigned long address) vma_hugecache_offset() argument
626 dequeue_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, unsigned long address, int avoid_reserve, long chg) dequeue_huge_page_vma() argument
1434 vma_needs_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) vma_needs_reservation() argument
1453 vma_commit_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) vma_commit_reservation() argument
2658 copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma) copy_hugetlb_page_range() argument
2834 __unmap_hugepage_range_final(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct page *ref_page) __unmap_hugepage_range_final() argument
3050 hugetlbfs_pagecache_page(struct hstate *h, struct vm_area_struct *vma, unsigned long address) hugetlbfs_pagecache_page() argument
3066 hugetlbfs_pagecache_present(struct hstate *h, struct vm_area_struct *vma, unsigned long address) hugetlbfs_pagecache_present() argument
3215 fault_mutex_hash(struct hstate *h, struct mm_struct *mm, struct vm_area_struct *vma, struct address_space *mapping, pgoff_t idx, unsigned long address) fault_mutex_hash() argument
3240 fault_mutex_hash(struct hstate *h, struct mm_struct *mm, struct vm_area_struct *vma, struct address_space *mapping, pgoff_t idx, unsigned long address) fault_mutex_hash() argument
3562 hugetlb_reserve_pages(struct inode *inode, long from, long to, struct vm_area_struct *vma, vm_flags_t vm_flags) hugetlb_reserve_pages() argument
3673 page_table_shareable(struct vm_area_struct *svma, struct vm_area_struct *vma, unsigned long addr, pgoff_t idx) page_table_shareable() argument
/linux-4.1.27/arch/mips/include/asm/
H A Dtlb.h5 * MIPS doesn't need any special per-pte or per-vma handling, except
8 #define tlb_start_vma(tlb, vma) \
11 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
13 #define tlb_end_vma(tlb, vma) do { } while (0)
H A Dfb.h8 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, fb_pgprotect() argument
11 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); fb_pgprotect()
H A Dtlbflush.h11 * - flush_tlb_page(vma, vmaddr) flushes one page
12 * - flush_tlb_range(vma, start, end) flushes a range of pages
17 extern void local_flush_tlb_range(struct vm_area_struct *vma,
21 extern void local_flush_tlb_page(struct vm_area_struct *vma,
29 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long,
39 #define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, end)
42 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
H A Dhugetlb.h71 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, huge_ptep_clear_flush() argument
74 flush_tlb_page(vma, addr & huge_page_mask(hstate_vma(vma))); huge_ptep_clear_flush()
94 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, huge_ptep_set_access_flags() argument
102 set_pte_at(vma->vm_mm, addr, ptep, pte); huge_ptep_set_access_flags()
107 flush_tlb_range(vma, addr, addr + HPAGE_SIZE); huge_ptep_set_access_flags()
/linux-4.1.27/arch/m68k/include/asm/
H A Dfb.h11 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, fb_pgprotect() argument
14 pgprot_val(vma->vm_page_prot) |= SUN3_PAGE_NOCACHE; fb_pgprotect()
17 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, fb_pgprotect() argument
21 pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE030; fb_pgprotect()
23 pgprot_val(vma->vm_page_prot) &= _CACHEMASK040; fb_pgprotect()
25 pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE_S; fb_pgprotect()
H A Dtlb.h6 * per-vma handling..
8 #define tlb_start_vma(tlb, vma) do { } while (0)
9 #define tlb_end_vma(tlb, vma) do { } while (0)
H A Dcacheflush_no.h13 #define flush_cache_range(vma, start, end) do { } while (0)
14 #define flush_cache_page(vma, vmaddr) do { } while (0)
21 #define flush_icache_page(vma,pg) do { } while (0)
22 #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
26 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
28 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
H A Dtlbflush.h84 static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) flush_tlb_page() argument
86 if (vma->vm_mm == current->active_mm) { flush_tlb_page()
94 static inline void flush_tlb_range(struct vm_area_struct *vma, flush_tlb_range() argument
97 if (vma->vm_mm == current->active_mm) flush_tlb_range()
170 static inline void flush_tlb_page (struct vm_area_struct *vma, flush_tlb_page() argument
177 sun3_put_context(vma->vm_mm->context); flush_tlb_page()
191 static inline void flush_tlb_range (struct vm_area_struct *vma, flush_tlb_range() argument
194 struct mm_struct *mm = vma->vm_mm; flush_tlb_range()
260 static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) flush_tlb_page() argument
H A Dpage_no.h18 #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
19 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
/linux-4.1.27/arch/m32r/include/asm/
H A Dcacheflush.h13 #define flush_cache_range(vma, start, end) do { } while (0)
14 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
21 #define flush_icache_page(vma,pg) _flush_cache_copyback_all()
22 #define flush_icache_user_range(vma,pg,adr,len) _flush_cache_copyback_all()
27 #define flush_icache_page(vma,pg) smp_flush_cache_all()
28 #define flush_icache_user_range(vma,pg,adr,len) smp_flush_cache_all()
35 #define flush_cache_range(vma, start, end) do { } while (0)
36 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
42 #define flush_icache_page(vma,pg) _flush_cache_all()
43 #define flush_icache_user_range(vma,pg,adr,len) _flush_cache_all()
49 #define flush_cache_range(vma, start, end) do { } while (0)
50 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
56 #define flush_icache_page(vma,pg) do { } while (0)
57 #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
64 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
67 flush_icache_user_range(vma, page, vaddr, len); \
69 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
H A Dfb.h8 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, fb_pgprotect() argument
11 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); fb_pgprotect()
H A Dtlb.h6 * per-vma handling..
8 #define tlb_start_vma(tlb, vma) do { } while (0)
9 #define tlb_end_vma(tlb, vma) do { } while (0)
H A Dtlbflush.h12 * - flush_tlb_page(vma, vmaddr) flushes one page
13 * - flush_tlb_range(vma, start, end) flushes a range of pages
27 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
28 #define flush_tlb_range(vma, start, end) \
29 local_flush_tlb_range(vma, start, end)
34 #define flush_tlb_page(vma, vmaddr) do { } while (0)
35 #define flush_tlb_range(vma, start, end) do { } while (0)
46 #define flush_tlb_page(vma, page) smp_flush_tlb_page(vma, page)
47 #define flush_tlb_range(vma, start, end) \
48 smp_flush_tlb_range(vma, start, end)
/linux-4.1.27/arch/x86/include/asm/
H A Dfb.h8 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, fb_pgprotect() argument
13 prot = pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK; fb_pgprotect()
15 pgprot_val(vma->vm_page_prot) = fb_pgprotect()
H A Dtlb.h4 #define tlb_start_vma(tlb, vma) do { } while (0)
5 #define tlb_end_vma(tlb, vma) do { } while (0)
H A Dhugetlb.h53 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, huge_ptep_clear_flush() argument
56 ptep_clear_flush(vma, addr, ptep); huge_ptep_clear_flush()
75 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, huge_ptep_set_access_flags() argument
79 return ptep_set_access_flags(vma, addr, ptep, pte, dirty); huge_ptep_set_access_flags()
/linux-4.1.27/arch/score/include/asm/
H A Dtlb.h5 * SCORE doesn't need any special per-pte or per-vma handling, except
8 #define tlb_start_vma(tlb, vma) do {} while (0)
9 #define tlb_end_vma(tlb, vma) do {} while (0)
H A Dcacheflush.h9 extern void flush_cache_range(struct vm_area_struct *vma,
11 extern void flush_cache_page(struct vm_area_struct *vma,
28 static inline void flush_icache_page(struct vm_area_struct *vma, flush_icache_page() argument
31 if (vma->vm_flags & VM_EXEC) { flush_icache_page()
38 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
41 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
44 if ((vma->vm_flags & VM_EXEC)) \
45 flush_cache_page(vma, vaddr, page_to_pfn(page));\
H A Dtlbflush.h11 * - flush_tlb_page(vma, vmaddr) flushes one page
12 * - flush_tlb_range(vma, start, end) flushes a range of pages
17 extern void local_flush_tlb_range(struct vm_area_struct *vma,
21 extern void local_flush_tlb_page(struct vm_area_struct *vma,
27 #define flush_tlb_range(vma, vmaddr, end) \
28 local_flush_tlb_range(vma, vmaddr, end)
31 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
/linux-4.1.27/arch/sh/include/asm/
H A Dfb.h8 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, fb_pgprotect() argument
11 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); fb_pgprotect()
H A Dtlbflush.h9 * - flush_tlb_page(vma, vmaddr) flushes one page
10 * - flush_tlb_range(vma, start, end) flushes a range of pages
15 extern void local_flush_tlb_range(struct vm_area_struct *vma,
18 extern void local_flush_tlb_page(struct vm_area_struct *vma,
30 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
32 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
40 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
43 #define flush_tlb_range(vma, start, end) \
44 local_flush_tlb_range(vma, start, end)
H A Dtlb.h69 * In the case of tlb vma handling, we can optimise these away in the
74 tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) tlb_start_vma() argument
77 flush_cache_range(vma, vma->vm_start, vma->vm_end); tlb_start_vma()
81 tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) tlb_end_vma() argument
84 flush_tlb_range(vma, tlb->start, tlb->end); tlb_end_vma()
122 static inline void tlb_wire_entry(struct vm_area_struct *vma , tlb_wire_entry()
136 #define tlb_start_vma(tlb, vma) do { } while (0)
137 #define tlb_end_vma(tlb, vma) do { } while (0)
H A Dcacheflush.h15 * - flush_cache_range(vma, start, end) flushes a range of pages
19 * - flush_icache_page(vma, pg) flushes(invalidates) a page for icache
41 extern void flush_cache_page(struct vm_area_struct *vma, cache_noop()
43 extern void flush_cache_range(struct vm_area_struct *vma, cache_noop()
48 extern void flush_icache_page(struct vm_area_struct *vma, cache_noop()
53 struct vm_area_struct *vma; cache_noop() member in struct:flusher_data
60 static inline void flush_anon_page(struct vm_area_struct *vma, flush_anon_page() argument
81 extern void copy_to_user_page(struct vm_area_struct *vma,
85 extern void copy_from_user_page(struct vm_area_struct *vma,
/linux-4.1.27/arch/cris/include/asm/
H A Dtlb.h10 * per-vma handling..
12 #define tlb_start_vma(tlb, vma) do { } while (0)
13 #define tlb_end_vma(tlb, vma) do { } while (0)
H A Dcacheflush.h13 #define flush_cache_range(vma, start, end) do { } while (0)
14 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
20 #define flush_icache_page(vma,pg) do { } while (0)
21 #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
25 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
27 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
H A Dtlbflush.h15 * - flush_tlb_page(vma, vmaddr) flushes one page
22 extern void __flush_tlb_page(struct vm_area_struct *vma,
29 static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end) flush_tlb_range() argument
31 flush_tlb_mm(vma->vm_mm); flush_tlb_range()
/linux-4.1.27/arch/arm/include/asm/
H A Dfb.h8 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, fb_pgprotect() argument
11 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); fb_pgprotect()
H A Dtlb.h72 struct vm_area_struct *vma; member in struct:mmu_gather
89 * tlb->vma will be non-NULL.
92 * tlb->vma will be non-NULL. Additionally, page tables will be freed.
95 * tlb->vma will be NULL.
99 if (tlb->fullmm || !tlb->vma) tlb_flush()
102 flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end); tlb_flush()
157 tlb->vma = NULL; tlb_gather_mmu()
190 * In the case of tlb vma handling, we can optimise these away in the
195 tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) tlb_start_vma() argument
198 flush_cache_range(vma, vma->vm_start, vma->vm_end); tlb_start_vma()
199 tlb->vma = vma; tlb_start_vma()
206 tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) tlb_end_vma() argument
/linux-4.1.27/drivers/infiniband/hw/ehca/
H A Dehca_uverbs.c71 static void ehca_mm_open(struct vm_area_struct *vma) ehca_mm_open() argument
73 u32 *count = (u32 *)vma->vm_private_data; ehca_mm_open()
75 ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx", ehca_mm_open()
76 vma->vm_start, vma->vm_end); ehca_mm_open()
82 vma->vm_start, vma->vm_end); ehca_mm_open()
84 vma->vm_start, vma->vm_end, *count); ehca_mm_open()
87 static void ehca_mm_close(struct vm_area_struct *vma) ehca_mm_close() argument
89 u32 *count = (u32 *)vma->vm_private_data; ehca_mm_close()
91 ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx", ehca_mm_close()
92 vma->vm_start, vma->vm_end); ehca_mm_close()
97 vma->vm_start, vma->vm_end, *count); ehca_mm_close()
105 static int ehca_mmap_fw(struct vm_area_struct *vma, struct h_galpas *galpas, ehca_mmap_fw() argument
111 vsize = vma->vm_end - vma->vm_start; ehca_mmap_fw()
113 ehca_gen_err("invalid vsize=%lx", vma->vm_end - vma->vm_start); ehca_mmap_fw()
118 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); ehca_mmap_fw()
121 ret = remap_4k_pfn(vma, vma->vm_start, physical >> EHCA_PAGESHIFT, ehca_mmap_fw()
122 vma->vm_page_prot); ehca_mmap_fw()
128 vma->vm_private_data = mm_count; ehca_mmap_fw()
130 vma->vm_ops = &vm_ops; ehca_mmap_fw()
135 static int ehca_mmap_queue(struct vm_area_struct *vma, struct ipz_queue *queue, ehca_mmap_queue() argument
142 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; ehca_mmap_queue()
143 start = vma->vm_start; ehca_mmap_queue()
147 ret = vm_insert_page(vma, start, page); ehca_mmap_queue()
154 vma->vm_private_data = mm_count; ehca_mmap_queue()
156 vma->vm_ops = &vm_ops; ehca_mmap_queue()
161 static int ehca_mmap_cq(struct vm_area_struct *vma, struct ehca_cq *cq, ehca_mmap_cq() argument
169 ret = ehca_mmap_fw(vma, &cq->galpas, &cq->mm_count_galpa); ehca_mmap_cq()
180 ret = ehca_mmap_queue(vma, &cq->ipz_queue, &cq->mm_count_queue); ehca_mmap_cq()
198 static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp, ehca_mmap_qp() argument
206 ret = ehca_mmap_fw(vma, &qp->galpas, &qp->mm_count_galpa); ehca_mmap_qp()
217 ret = ehca_mmap_queue(vma, &qp->ipz_rqueue, ehca_mmap_qp()
229 ret = ehca_mmap_queue(vma, &qp->ipz_squeue, ehca_mmap_qp()
248 int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) ehca_mmap() argument
250 u64 fileoffset = vma->vm_pgoff; ehca_mmap()
272 ret = ehca_mmap_cq(vma, cq, rsrc_type); ehca_mmap()
294 ret = ehca_mmap_qp(vma, qp, rsrc_type); ehca_mmap()
/linux-4.1.27/include/asm-generic/
H A Dcacheflush.h14 #define flush_cache_range(vma, start, end) do { } while (0)
15 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
21 #define flush_icache_page(vma,pg) do { } while (0)
22 #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
26 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
29 flush_icache_user_range(vma, page, vaddr, len); \
31 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
H A Dmm_hooks.h19 struct vm_area_struct *vma, arch_unmap()
25 struct vm_area_struct *vma) arch_bprm_mm_init()
18 arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long start, unsigned long end) arch_unmap() argument
24 arch_bprm_mm_init(struct mm_struct *mm, struct vm_area_struct *vma) arch_bprm_mm_init() argument
H A Ddma-coherent.h13 int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
29 #define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
/linux-4.1.27/drivers/xen/xenfs/
H A Dxenstored.c33 static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma) xsd_kva_mmap() argument
35 size_t size = vma->vm_end - vma->vm_start; xsd_kva_mmap()
37 if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0)) xsd_kva_mmap()
40 if (remap_pfn_range(vma, vma->vm_start, xsd_kva_mmap()
42 size, vma->vm_page_prot)) xsd_kva_mmap()
/linux-4.1.27/arch/nios2/include/asm/
H A Dtlb.h19 * NiosII doesn't need any special per-pte or per-vma handling, except
22 #define tlb_start_vma(tlb, vma) \
25 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
28 #define tlb_end_vma(tlb, vma) do { } while (0)
H A Dcacheflush.h26 extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
28 extern void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
34 extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
39 extern void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
42 extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
H A Dtlbflush.h29 * - flush_tlb_page(vma, vmaddr) flushes one page
30 * - flush_tlb_range(vma, start, end) flushes a range of pages
35 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
40 static inline void flush_tlb_page(struct vm_area_struct *vma, flush_tlb_page() argument
/linux-4.1.27/arch/nios2/kernel/
H A Dsys_nios2.c24 struct vm_area_struct *vma; sys_cacheflush() local
41 vma = find_vma(current->mm, addr); sys_cacheflush()
42 if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) sys_cacheflush()
45 flush_cache_range(vma, addr, addr + len); sys_cacheflush()
/linux-4.1.27/arch/frv/include/asm/
H A Dtlb.h13 * we don't need any special per-pte or per-vma handling...
15 #define tlb_start_vma(tlb, vma) do { } while (0)
16 #define tlb_end_vma(tlb, vma) do { } while (0)
H A Dtlbflush.h42 #define flush_tlb_range(vma,start,end) \
45 __flush_tlb_range((vma)->vm_mm->context.id, start, end); \
49 #define flush_tlb_page(vma,addr) \
52 __flush_tlb_page((vma)->vm_mm->context.id, addr); \
66 #define flush_tlb_page(vma,addr) BUG()
H A Dcacheflush.h25 #define flush_cache_page(vma, vmaddr, pfn) do {} while(0)
77 extern void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
80 static inline void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, flush_icache_user_range() argument
87 static inline void flush_icache_page(struct vm_area_struct *vma, struct page *page) flush_icache_page() argument
89 flush_icache_user_range(vma, page, page_to_phys(page), PAGE_SIZE); flush_icache_page()
96 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
99 flush_icache_user_range((vma), (page), (vaddr), (len)); \
102 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
/linux-4.1.27/arch/um/include/asm/
H A Dtlbflush.h17 * - flush_tlb_page(vma, vmaddr) flushes one page
19 * - flush_tlb_range(vma, start, end) flushes a range of pages
24 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
26 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long address);
/linux-4.1.27/arch/arc/kernel/
H A Darc_hostlink.c21 static int arc_hl_mmap(struct file *fp, struct vm_area_struct *vma) arc_hl_mmap() argument
23 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); arc_hl_mmap()
25 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, arc_hl_mmap()
26 vma->vm_end - vma->vm_start, arc_hl_mmap()
27 vma->vm_page_prot)) { arc_hl_mmap()
/linux-4.1.27/arch/alpha/include/asm/
H A Dcacheflush.h10 #define flush_cache_range(vma, start, end) do { } while (0)
11 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
50 flush_icache_user_range(struct vm_area_struct *vma, struct page *page, flush_icache_user_range() argument
53 if (vma->vm_flags & VM_EXEC) { flush_icache_user_range()
54 struct mm_struct *mm = vma->vm_mm; flush_icache_user_range()
62 extern void flush_icache_user_range(struct vm_area_struct *vma,
67 #define flush_icache_page(vma, page) \
68 flush_icache_user_range((vma), (page), 0, 0)
70 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
72 flush_icache_user_range(vma, page, vaddr, len); \
74 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
H A Dtlb.h4 #define tlb_start_vma(tlb, vma) do { } while (0)
5 #define tlb_end_vma(tlb, vma) do { } while (0)
H A Dtlbflush.h39 struct vm_area_struct *vma, ev4_flush_tlb_current_page()
43 if (vma->vm_flags & VM_EXEC) { ev4_flush_tlb_current_page()
52 struct vm_area_struct *vma, ev5_flush_tlb_current_page()
55 if (vma->vm_flags & VM_EXEC) ev5_flush_tlb_current_page()
117 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) flush_tlb_page() argument
119 struct mm_struct *mm = vma->vm_mm; flush_tlb_page()
122 flush_tlb_current_page(mm, vma, addr); flush_tlb_page()
130 flush_tlb_range(struct vm_area_struct *vma, unsigned long start, flush_tlb_range() argument
133 flush_tlb_mm(vma->vm_mm); flush_tlb_range()
38 ev4_flush_tlb_current_page(struct mm_struct * mm, struct vm_area_struct *vma, unsigned long addr) ev4_flush_tlb_current_page() argument
51 ev5_flush_tlb_current_page(struct mm_struct * mm, struct vm_area_struct *vma, unsigned long addr) ev5_flush_tlb_current_page() argument
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
H A Dbase.c30 nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node) nvkm_vm_map_at() argument
32 struct nvkm_vm *vm = vma->vm; nvkm_vm_map_at()
35 int big = vma->node->type != mmu->spg_shift; nvkm_vm_map_at()
36 u32 offset = vma->node->offset + (delta >> 12); nvkm_vm_map_at()
37 u32 bits = vma->node->type - 12; nvkm_vm_map_at()
56 mmu->map(vma, pgt, node, pte, len, phys, delta); nvkm_vm_map_at()
66 delta += (u64)len << vma->node->type; nvkm_vm_map_at()
74 nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length, nvkm_vm_map_sg_table() argument
77 struct nvkm_vm *vm = vma->vm; nvkm_vm_map_sg_table()
79 int big = vma->node->type != mmu->spg_shift; nvkm_vm_map_sg_table()
80 u32 offset = vma->node->offset + (delta >> 12); nvkm_vm_map_sg_table()
81 u32 bits = vma->node->type - 12; nvkm_vm_map_sg_table()
82 u32 num = length >> vma->node->type; nvkm_vm_map_sg_table()
103 mmu->map_sg(vma, pgt, mem, pte, 1, &addr); nvkm_vm_map_sg_table()
118 mmu->map_sg(vma, pgt, mem, pte, 1, &addr); nvkm_vm_map_sg_table()
132 nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length, nvkm_vm_map_sg() argument
135 struct nvkm_vm *vm = vma->vm; nvkm_vm_map_sg()
138 int big = vma->node->type != mmu->spg_shift; nvkm_vm_map_sg()
139 u32 offset = vma->node->offset + (delta >> 12); nvkm_vm_map_sg()
140 u32 bits = vma->node->type - 12; nvkm_vm_map_sg()
141 u32 num = length >> vma->node->type; nvkm_vm_map_sg()
155 mmu->map_sg(vma, pgt, mem, pte, len, list); nvkm_vm_map_sg()
170 nvkm_vm_map(struct nvkm_vma *vma, struct nvkm_mem *node) nvkm_vm_map() argument
173 nvkm_vm_map_sg_table(vma, 0, node->size << 12, node); nvkm_vm_map()
176 nvkm_vm_map_sg(vma, 0, node->size << 12, node); nvkm_vm_map()
178 nvkm_vm_map_at(vma, 0, node); nvkm_vm_map()
182 nvkm_vm_unmap_at(struct nvkm_vma *vma, u64 delta, u64 length) nvkm_vm_unmap_at() argument
184 struct nvkm_vm *vm = vma->vm; nvkm_vm_unmap_at()
186 int big = vma->node->type != mmu->spg_shift; nvkm_vm_unmap_at()
187 u32 offset = vma->node->offset + (delta >> 12); nvkm_vm_unmap_at()
188 u32 bits = vma->node->type - 12; nvkm_vm_unmap_at()
189 u32 num = length >> vma->node->type; nvkm_vm_unmap_at()
217 nvkm_vm_unmap(struct nvkm_vma *vma) nvkm_vm_unmap() argument
219 nvkm_vm_unmap_at(vma, 0, (u64)vma->node->length << 12); nvkm_vm_unmap()
288 struct nvkm_vma *vma) nvkm_vm_get()
298 &vma->node); nvkm_vm_get()
304 fpde = (vma->node->offset >> mmu->pgt_bits); nvkm_vm_get()
305 lpde = (vma->node->offset + vma->node->length - 1) >> mmu->pgt_bits; nvkm_vm_get()
309 int big = (vma->node->type != mmu->spg_shift); nvkm_vm_get()
316 ret = nvkm_vm_map_pgt(vm, pde, vma->node->type); nvkm_vm_get()
320 nvkm_mm_free(&vm->mm, &vma->node); nvkm_vm_get()
327 vma->vm = NULL; nvkm_vm_get()
328 nvkm_vm_ref(vm, &vma->vm, NULL); nvkm_vm_get()
329 vma->offset = (u64)vma->node->offset << 12; nvkm_vm_get()
330 vma->access = access; nvkm_vm_get()
335 nvkm_vm_put(struct nvkm_vma *vma) nvkm_vm_put() argument
337 struct nvkm_vm *vm = vma->vm; nvkm_vm_put()
341 if (unlikely(vma->node == NULL)) nvkm_vm_put()
343 fpde = (vma->node->offset >> mmu->pgt_bits); nvkm_vm_put()
344 lpde = (vma->node->offset + vma->node->length - 1) >> mmu->pgt_bits; nvkm_vm_put()
347 nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->spg_shift, fpde, lpde); nvkm_vm_put()
348 nvkm_mm_free(&vm->mm, &vma->node); nvkm_vm_put()
351 nvkm_vm_ref(NULL, &vma->vm, NULL); nvkm_vm_put()
287 nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access, struct nvkm_vma *vma) nvkm_vm_get() argument
/linux-4.1.27/arch/blackfin/include/asm/
H A Dtlb.h10 #define tlb_start_vma(tlb, vma) do { } while (0)
11 #define tlb_end_vma(tlb, vma) do { } while (0)
/linux-4.1.27/drivers/xen/
H A Dprivcmd.c47 struct vm_area_struct *vma,
198 struct vm_area_struct *vma; member in struct:mmap_mfn_state
206 struct vm_area_struct *vma = st->vma; mmap_mfn_range() local
216 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end)) mmap_mfn_range()
219 rc = xen_remap_domain_mfn_range(vma, mmap_mfn_range()
222 vma->vm_page_prot, mmap_mfn_range()
236 struct vm_area_struct *vma; privcmd_ioctl_mmap() local
262 vma = find_vma(mm, msg->va); privcmd_ioctl_mmap()
265 if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data) privcmd_ioctl_mmap()
267 vma->vm_private_data = PRIV_VMA_LOCKED; privcmd_ioctl_mmap()
270 state.va = vma->vm_start; privcmd_ioctl_mmap()
271 state.vma = vma; privcmd_ioctl_mmap()
291 struct vm_area_struct *vma; member in struct:mmap_batch_state
315 struct vm_area_struct *vma = st->vma; mmap_batch_fn() local
316 struct page **pages = vma->vm_private_data; mmap_batch_fn()
324 ret = xen_remap_domain_mfn_array(st->vma, st->va & PAGE_MASK, mfnp, nr, mmap_batch_fn()
325 (int *)mfnp, st->vma->vm_page_prot, mmap_batch_fn()
392 * the vma with the page info to use later.
395 static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs) alloc_empty_pages() argument
411 BUG_ON(vma->vm_private_data != NULL); alloc_empty_pages()
412 vma->vm_private_data = pages; alloc_empty_pages()
424 struct vm_area_struct *vma; privcmd_ioctl_mmap_batch() local
472 vma = find_vma(mm, m.addr); privcmd_ioctl_mmap_batch()
473 if (!vma || privcmd_ioctl_mmap_batch()
474 vma->vm_ops != &privcmd_vm_ops) { privcmd_ioctl_mmap_batch()
490 if (vma->vm_private_data == NULL) { privcmd_ioctl_mmap_batch()
491 if (m.addr != vma->vm_start || privcmd_ioctl_mmap_batch()
492 m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) { privcmd_ioctl_mmap_batch()
497 ret = alloc_empty_pages(vma, m.num); privcmd_ioctl_mmap_batch()
501 vma->vm_private_data = PRIV_VMA_LOCKED; privcmd_ioctl_mmap_batch()
503 if (m.addr < vma->vm_start || privcmd_ioctl_mmap_batch()
504 m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) { privcmd_ioctl_mmap_batch()
508 if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) { privcmd_ioctl_mmap_batch()
515 state.vma = vma; privcmd_ioctl_mmap_batch()
581 static void privcmd_close(struct vm_area_struct *vma) privcmd_close() argument
583 struct page **pages = vma->vm_private_data; privcmd_close()
584 int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; privcmd_close()
590 rc = xen_unmap_domain_mfn_range(vma, numpgs, pages); privcmd_close()
599 static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf) privcmd_fault() argument
601 printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n", privcmd_fault()
602 vma, vma->vm_start, vma->vm_end, privcmd_fault()
613 static int privcmd_mmap(struct file *file, struct vm_area_struct *vma) privcmd_mmap() argument
617 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY | privcmd_mmap()
619 vma->vm_ops = &privcmd_vm_ops; privcmd_mmap()
620 vma->vm_private_data = NULL; privcmd_mmap()
637 struct vm_area_struct *vma, privcmd_vma_range_is_mapped()
641 return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT, privcmd_vma_range_is_mapped()
636 privcmd_vma_range_is_mapped( struct vm_area_struct *vma, unsigned long addr, unsigned long nr_pages) privcmd_vma_range_is_mapped() argument
H A Dgntdev.c84 struct vm_area_struct *vma; member in struct:grant_map
241 unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT; find_grant_ptes()
397 static void gntdev_vma_open(struct vm_area_struct *vma) gntdev_vma_open() argument
399 struct grant_map *map = vma->vm_private_data; gntdev_vma_open()
401 pr_debug("gntdev_vma_open %p\n", vma); gntdev_vma_open()
405 static void gntdev_vma_close(struct vm_area_struct *vma) gntdev_vma_close() argument
407 struct grant_map *map = vma->vm_private_data; gntdev_vma_close()
408 struct file *file = vma->vm_file; gntdev_vma_close()
411 pr_debug("gntdev_vma_close %p\n", vma); gntdev_vma_close()
414 * concurrently, so take priv->lock to ensure that the vma won't gntdev_vma_close()
418 * closing the vma, but it may still iterate the unmap_ops list. gntdev_vma_close()
421 map->vma = NULL; gntdev_vma_close()
424 vma->vm_private_data = NULL; gntdev_vma_close()
428 static struct page *gntdev_vma_find_special_page(struct vm_area_struct *vma, gntdev_vma_find_special_page() argument
431 struct grant_map *map = vma->vm_private_data; gntdev_vma_find_special_page()
450 if (!map->vma) unmap_if_in_range()
452 if (map->vma->vm_start >= end) unmap_if_in_range()
454 if (map->vma->vm_end <= start) unmap_if_in_range()
456 mstart = max(start, map->vma->vm_start); unmap_if_in_range()
457 mend = min(end, map->vma->vm_end); unmap_if_in_range()
460 map->vma->vm_start, map->vma->vm_end, unmap_if_in_range()
463 (mstart - map->vma->vm_start) >> PAGE_SHIFT, unmap_if_in_range()
501 if (!map->vma) mn_release()
505 map->vma->vm_start, map->vma->vm_end); mn_release()
510 if (!map->vma) mn_release()
514 map->vma->vm_start, map->vma->vm_end); mn_release()
656 struct vm_area_struct *vma; gntdev_ioctl_get_offset_for_vaddr() local
665 vma = find_vma(current->mm, op.vaddr); gntdev_ioctl_get_offset_for_vaddr()
666 if (!vma || vma->vm_ops != &gntdev_vmops) gntdev_ioctl_get_offset_for_vaddr()
669 map = vma->vm_private_data; gntdev_ioctl_get_offset_for_vaddr()
778 static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) gntdev_mmap() argument
781 int index = vma->vm_pgoff; gntdev_mmap()
782 int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; gntdev_mmap()
786 if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED)) gntdev_mmap()
790 index, count, vma->vm_start, vma->vm_pgoff); gntdev_mmap()
796 if (use_ptemod && map->vma) gntdev_mmap()
798 if (use_ptemod && priv->mm != vma->vm_mm) { gntdev_mmap()
805 vma->vm_ops = &gntdev_vmops; gntdev_mmap()
807 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO; gntdev_mmap()
810 vma->vm_flags |= VM_DONTCOPY; gntdev_mmap()
812 vma->vm_private_data = map; gntdev_mmap()
815 map->vma = vma; gntdev_mmap()
818 if ((vma->vm_flags & VM_WRITE) && gntdev_mmap()
823 if (!(vma->vm_flags & VM_WRITE)) gntdev_mmap()
830 err = apply_to_page_range(vma->vm_mm, vma->vm_start, gntdev_mmap()
831 vma->vm_end - vma->vm_start, gntdev_mmap()
845 err = vm_insert_page(vma, vma->vm_start + i*PAGE_SIZE, gntdev_mmap()
862 apply_to_page_range(vma->vm_mm, vma->vm_start, gntdev_mmap()
863 vma->vm_end - vma->vm_start, gntdev_mmap()
867 map->pages_vm_start = vma->vm_start; gntdev_mmap()
880 map->vma = NULL; gntdev_mmap()
H A Dxlate_mmu.c68 struct vm_area_struct *vma; member in struct:remap_data
88 set_pte_at(info->vma->vm_mm, addr, ptep, pte); remap_pte_fn()
96 int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, xen_xlate_remap_gfn_array() argument
109 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO))); xen_xlate_remap_gfn_array()
114 data.vma = vma; xen_xlate_remap_gfn_array()
120 err = apply_to_page_range(vma->vm_mm, addr, range, xen_xlate_remap_gfn_array()
126 int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma, xen_xlate_unmap_gfn_range() argument
/linux-4.1.27/fs/proc/
H A Dtask_nommu.c20 struct vm_area_struct *vma; task_mem() local
27 vma = rb_entry(p, struct vm_area_struct, vm_rb); task_mem()
29 bytes += kobjsize(vma); task_mem()
31 region = vma->vm_region; task_mem()
36 size = vma->vm_end - vma->vm_start; task_mem()
40 vma->vm_flags & VM_MAYSHARE) { task_mem()
45 slack = region->vm_end - vma->vm_end; task_mem()
82 struct vm_area_struct *vma; task_vsize() local
88 vma = rb_entry(p, struct vm_area_struct, vm_rb); task_vsize()
89 vsize += vma->vm_end - vma->vm_start; task_vsize()
99 struct vm_area_struct *vma; task_statm() local
106 vma = rb_entry(p, struct vm_area_struct, vm_rb); task_statm()
107 size += kobjsize(vma); task_statm()
108 region = vma->vm_region; task_statm()
127 struct vm_area_struct *vma, bool is_pid) pid_of_stack()
136 task = task_of_stack(task, vma, is_pid); pid_of_stack()
148 static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma, nommu_vma_show() argument
151 struct mm_struct *mm = vma->vm_mm; nommu_vma_show()
159 flags = vma->vm_flags; nommu_vma_show()
160 file = vma->vm_file; nommu_vma_show()
163 struct inode *inode = file_inode(vma->vm_file); nommu_vma_show()
166 pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT; nommu_vma_show()
172 vma->vm_start, nommu_vma_show()
173 vma->vm_end, nommu_vma_show()
185 pid_t tid = pid_of_stack(priv, vma, is_pid); nommu_vma_show()
193 if (!is_pid || (vma->vm_start <= mm->start_stack && nommu_vma_show()
194 vma->vm_end >= mm->start_stack)) nommu_vma_show()
126 pid_of_stack(struct proc_maps_private *priv, struct vm_area_struct *vma, bool is_pid) pid_of_stack() argument
H A Dtask_mmu.c127 m_next_vma(struct proc_maps_private *priv, struct vm_area_struct *vma) m_next_vma() argument
129 if (vma == priv->tail_vma) m_next_vma()
131 return vma->vm_next ?: priv->tail_vma; m_next_vma()
134 static void m_cache_vma(struct seq_file *m, struct vm_area_struct *vma) m_cache_vma() argument
136 if (m->count < m->size) /* vma is copied successfully */ m_cache_vma()
137 m->version = m_next_vma(m->private, vma) ? vma->vm_start : -1UL; m_cache_vma()
145 struct vm_area_struct *vma; m_start() local
165 vma = find_vma(mm, last_addr); m_start()
166 if (vma && (vma = m_next_vma(priv, vma))) m_start()
167 return vma; m_start()
172 for (vma = mm->mmap; pos; pos--) { m_start()
173 m->version = vma->vm_start; m_start()
174 vma = vma->vm_next; m_start()
176 return vma; m_start()
250 struct vm_area_struct *vma, bool is_pid) pid_of_stack()
259 task = task_of_stack(task, vma, is_pid); pid_of_stack()
269 show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) show_map_vma() argument
271 struct mm_struct *mm = vma->vm_mm; show_map_vma()
272 struct file *file = vma->vm_file; show_map_vma()
274 vm_flags_t flags = vma->vm_flags; show_map_vma()
282 struct inode *inode = file_inode(vma->vm_file); show_map_vma()
285 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; show_map_vma()
289 start = vma->vm_start; show_map_vma()
290 if (stack_guard_page_start(vma, start)) show_map_vma()
292 end = vma->vm_end; show_map_vma()
293 if (stack_guard_page_end(vma, end)) show_map_vma()
317 if (vma->vm_ops && vma->vm_ops->name) { show_map_vma()
318 name = vma->vm_ops->name(vma); show_map_vma()
323 name = arch_vma_name(vma); show_map_vma()
332 if (vma->vm_start <= mm->brk && show_map_vma()
333 vma->vm_end >= mm->start_brk) { show_map_vma()
338 tid = pid_of_stack(priv, vma, is_pid); show_map_vma()
344 if (!is_pid || (vma->vm_start <= mm->start_stack && show_map_vma()
345 vma->vm_end >= mm->start_stack)) { show_map_vma()
487 struct vm_area_struct *vma = walk->vma; smaps_pte_entry() local
491 page = vm_normal_page(vma, addr, *pte); smaps_pte_entry()
511 struct vm_area_struct *vma = walk->vma; smaps_pmd_entry() local
515 page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP); smaps_pmd_entry()
532 struct vm_area_struct *vma = walk->vma; smaps_pte_range() local
536 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { smaps_pte_range()
549 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); smaps_pte_range()
557 static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma) show_smap_vma_flags() argument
605 if (vma->vm_flags & (1UL << i)) { show_smap_vma_flags()
615 struct vm_area_struct *vma = v; show_smap() local
619 .mm = vma->vm_mm, show_smap()
625 walk_page_vma(vma, &smaps_walk); show_smap()
627 show_map_vma(m, vma, is_pid); show_smap()
644 (vma->vm_end - vma->vm_start) >> 10, show_smap()
655 vma_kernel_pagesize(vma) >> 10, show_smap()
656 vma_mmu_pagesize(vma) >> 10, show_smap()
657 (vma->vm_flags & VM_LOCKED) ? show_smap()
660 show_smap_vma_flags(m, vma); show_smap()
661 m_cache_vma(m, vma); show_smap()
744 static inline void clear_soft_dirty(struct vm_area_struct *vma, clear_soft_dirty() argument
762 set_pte_at(vma->vm_mm, addr, pte, ptent); clear_soft_dirty()
765 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, clear_soft_dirty_pmd() argument
773 if (vma->vm_flags & VM_SOFTDIRTY) clear_soft_dirty_pmd()
774 vma->vm_flags &= ~VM_SOFTDIRTY; clear_soft_dirty_pmd()
776 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); clear_soft_dirty_pmd()
781 static inline void clear_soft_dirty(struct vm_area_struct *vma, clear_soft_dirty() argument
786 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, clear_soft_dirty_pmd() argument
796 struct vm_area_struct *vma = walk->vma; clear_refs_pte_range() local
801 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { clear_refs_pte_range()
803 clear_soft_dirty_pmd(vma, addr, pmd); clear_refs_pte_range()
810 pmdp_test_and_clear_young(vma, addr, pmd); clear_refs_pte_range()
820 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); clear_refs_pte_range()
825 clear_soft_dirty(vma, addr, pte); clear_refs_pte_range()
832 page = vm_normal_page(vma, addr, ptent); clear_refs_pte_range()
837 ptep_test_and_clear_young(vma, addr, pte); clear_refs_pte_range()
849 struct vm_area_struct *vma = walk->vma; clear_refs_test_walk() local
851 if (vma->vm_flags & VM_PFNMAP) clear_refs_test_walk()
860 if (cp->type == CLEAR_REFS_ANON && vma->vm_file) clear_refs_test_walk()
862 if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file) clear_refs_test_walk()
873 struct vm_area_struct *vma; clear_refs_write() local
925 for (vma = mm->mmap; vma; vma = vma->vm_next) { clear_refs_write()
926 if (!(vma->vm_flags & VM_SOFTDIRTY)) clear_refs_write()
930 for (vma = mm->mmap; vma; vma = vma->vm_next) { clear_refs_write()
931 vma->vm_flags &= ~VM_SOFTDIRTY; clear_refs_write()
932 vma_set_page_prot(vma); clear_refs_write()
1013 struct vm_area_struct *vma = find_vma(walk->mm, addr); pagemap_pte_hole() local
1018 if (vma) pagemap_pte_hole()
1019 hole_end = min(end, vma->vm_start); pagemap_pte_hole()
1029 if (!vma) pagemap_pte_hole()
1033 if (vma->vm_flags & VM_SOFTDIRTY) pagemap_pte_hole()
1035 for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) { pagemap_pte_hole()
1046 struct vm_area_struct *vma, unsigned long addr, pte_t pte) pte_to_pagemap_entry()
1055 page = vm_normal_page(vma, addr, pte); pte_to_pagemap_entry()
1069 if (vma->vm_flags & VM_SOFTDIRTY) pte_to_pagemap_entry()
1077 if ((vma->vm_flags & VM_SOFTDIRTY)) pte_to_pagemap_entry()
1108 struct vm_area_struct *vma = walk->vma; pagemap_pte_range() local
1114 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { pagemap_pte_range()
1117 if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(*pmd)) pagemap_pte_range()
1141 * We can assume that @vma always points to a valid one and @end never pagemap_pte_range()
1142 * goes beyond vma->vm_end. pagemap_pte_range()
1148 pte_to_pagemap_entry(&pme, pm, vma, addr, *pte); pagemap_pte_range()
1179 struct vm_area_struct *vma = walk->vma; pagemap_hugetlb_range() local
1184 if (vma->vm_flags & VM_SOFTDIRTY) pagemap_hugetlb_range()
1389 static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma, can_gather_numa_stats() argument
1398 page = vm_normal_page(vma, addr, pte); can_gather_numa_stats()
1416 struct vm_area_struct *vma = walk->vma; gather_pte_stats() local
1421 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { gather_pte_stats()
1425 page = can_gather_numa_stats(huge_pte, vma, addr); gather_pte_stats()
1437 struct page *page = can_gather_numa_stats(*pte, vma, addr); gather_pte_stats()
1480 struct vm_area_struct *vma = v; show_numa_map() local
1482 struct file *file = vma->vm_file; show_numa_map()
1483 struct mm_struct *mm = vma->vm_mm; show_numa_map()
1500 pol = __get_vma_policy(vma, vma->vm_start); show_numa_map()
1508 seq_printf(m, "%08lx %s", vma->vm_start, buffer); show_numa_map()
1513 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { show_numa_map()
1516 pid_t tid = pid_of_stack(proc_priv, vma, is_pid); show_numa_map()
1522 if (!is_pid || (vma->vm_start <= mm->start_stack && show_numa_map()
1523 vma->vm_end >= mm->start_stack)) show_numa_map()
1530 if (is_vm_hugetlb_page(vma)) show_numa_map()
1534 walk_page_vma(vma, &walk); show_numa_map()
1554 if (md->active < md->pages && !is_vm_hugetlb_page(vma)) show_numa_map()
1564 seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10); show_numa_map()
1567 m_cache_vma(m, vma); show_numa_map()
249 pid_of_stack(struct proc_maps_private *priv, struct vm_area_struct *vma, bool is_pid) pid_of_stack() argument
1045 pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm, struct vm_area_struct *vma, unsigned long addr, pte_t pte) pte_to_pagemap_entry() argument
/linux-4.1.27/drivers/sbus/char/
H A Dflash.c36 flash_mmap(struct file *file, struct vm_area_struct *vma) flash_mmap() argument
46 if ((vma->vm_flags & VM_READ) && flash_mmap()
47 (vma->vm_flags & VM_WRITE)) { flash_mmap()
51 if (vma->vm_flags & VM_READ) { flash_mmap()
54 } else if (vma->vm_flags & VM_WRITE) { flash_mmap()
64 if ((vma->vm_pgoff << PAGE_SHIFT) > size) flash_mmap()
66 addr = vma->vm_pgoff + (addr >> PAGE_SHIFT); flash_mmap()
68 if (vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT)) > size) flash_mmap()
69 size = vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT)); flash_mmap()
71 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); flash_mmap()
73 if (io_remap_pfn_range(vma, vma->vm_start, addr, size, vma->vm_page_prot)) flash_mmap()
/linux-4.1.27/arch/alpha/kernel/
H A Dpci-sysfs.c18 struct vm_area_struct *vma, hose_mmap_page_range()
28 vma->vm_pgoff += base >> PAGE_SHIFT; hose_mmap_page_range()
30 return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, hose_mmap_page_range()
31 vma->vm_end - vma->vm_start, hose_mmap_page_range()
32 vma->vm_page_prot); hose_mmap_page_range()
36 struct vm_area_struct *vma, int sparse) __pci_mmap_fits()
41 nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; __pci_mmap_fits()
42 start = vma->vm_pgoff; __pci_mmap_fits()
58 * @vma: struct vm_area_struct passed into the mmap
65 struct vm_area_struct *vma, int sparse) pci_mmap_resource()
80 if (!__pci_mmap_fits(pdev, i, vma, sparse)) pci_mmap_resource()
87 vma->vm_pgoff += bar.start >> (PAGE_SHIFT - (sparse ? 5 : 0)); pci_mmap_resource()
90 return hose_mmap_page_range(pdev->sysdata, vma, mmap_type, sparse); pci_mmap_resource()
95 struct vm_area_struct *vma) pci_mmap_resource_sparse()
97 return pci_mmap_resource(kobj, attr, vma, 1); pci_mmap_resource_sparse()
102 struct vm_area_struct *vma) pci_mmap_resource_dense()
104 return pci_mmap_resource(kobj, attr, vma, 0); pci_mmap_resource_dense()
253 struct vm_area_struct *vma, __legacy_mmap_fits()
258 nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; __legacy_mmap_fits()
259 start = vma->vm_pgoff; __legacy_mmap_fits()
282 int pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma, pci_mmap_legacy_page_range() argument
291 if (!__legacy_mmap_fits(hose, vma, res_size, sparse)) pci_mmap_legacy_page_range()
294 return hose_mmap_page_range(hose, vma, mmap_type, sparse); pci_mmap_legacy_page_range()
17 hose_mmap_page_range(struct pci_controller *hose, struct vm_area_struct *vma, enum pci_mmap_state mmap_type, int sparse) hose_mmap_page_range() argument
35 __pci_mmap_fits(struct pci_dev *pdev, int num, struct vm_area_struct *vma, int sparse) __pci_mmap_fits() argument
63 pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, struct vm_area_struct *vma, int sparse) pci_mmap_resource() argument
93 pci_mmap_resource_sparse(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, struct vm_area_struct *vma) pci_mmap_resource_sparse() argument
100 pci_mmap_resource_dense(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, struct vm_area_struct *vma) pci_mmap_resource_dense() argument
252 __legacy_mmap_fits(struct pci_controller *hose, struct vm_area_struct *vma, unsigned long res_size, int sparse) __legacy_mmap_fits() argument
/linux-4.1.27/drivers/gpu/drm/ttm/
H A Dttm_bo_vm.c45 struct vm_area_struct *vma, ttm_bo_vm_fault_idle()
69 up_read(&vma->vm_mm->mmap_sem); ttm_bo_vm_fault_idle()
86 static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ttm_bo_vm_fault() argument
89 vma->vm_private_data; ttm_bo_vm_fault()
117 up_read(&vma->vm_mm->mmap_sem); ttm_bo_vm_fault()
160 ret = ttm_bo_vm_fault_idle(bo, vma, vmf); ttm_bo_vm_fault()
177 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + ttm_bo_vm_fault()
178 vma->vm_pgoff - drm_vma_node_start(&bo->vma_node); ttm_bo_vm_fault()
179 page_last = vma_pages(vma) + vma->vm_pgoff - ttm_bo_vm_fault()
188 * Make a local vma copy to modify the page_prot member ttm_bo_vm_fault()
189 * and vm_flags if necessary. The vma parameter is protected ttm_bo_vm_fault()
192 cvma = *vma; ttm_bo_vm_fault()
225 page->mapping = vma->vm_file->f_mapping; ttm_bo_vm_fault()
231 if (vma->vm_flags & VM_MIXEDMAP) ttm_bo_vm_fault()
260 static void ttm_bo_vm_open(struct vm_area_struct *vma) ttm_bo_vm_open() argument
263 (struct ttm_buffer_object *)vma->vm_private_data; ttm_bo_vm_open()
265 WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping); ttm_bo_vm_open()
270 static void ttm_bo_vm_close(struct vm_area_struct *vma) ttm_bo_vm_close() argument
272 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data; ttm_bo_vm_close()
275 vma->vm_private_data = NULL; ttm_bo_vm_close()
308 int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, ttm_bo_mmap() argument
315 bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma)); ttm_bo_mmap()
328 vma->vm_ops = &ttm_bo_vm_ops; ttm_bo_mmap()
332 * vma->vm_private_data here. ttm_bo_mmap()
335 vma->vm_private_data = bo; ttm_bo_mmap()
339 * (vma->vm_flags & VM_SHARED) != 0, for performance reasons, ttm_bo_mmap()
344 vma->vm_flags |= VM_MIXEDMAP; ttm_bo_mmap()
345 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; ttm_bo_mmap()
353 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo) ttm_fbdev_mmap() argument
355 if (vma->vm_pgoff != 0) ttm_fbdev_mmap()
358 vma->vm_ops = &ttm_bo_vm_ops; ttm_fbdev_mmap()
359 vma->vm_private_data = ttm_bo_reference(bo); ttm_fbdev_mmap()
360 vma->vm_flags |= VM_MIXEDMAP; ttm_fbdev_mmap()
361 vma->vm_flags |= VM_IO | VM_DONTEXPAND; ttm_fbdev_mmap()
44 ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, struct vm_area_struct *vma, struct vm_fault *vmf) ttm_bo_vm_fault_idle() argument
/linux-4.1.27/arch/cris/arch-v32/drivers/pci/
H A Dbios.c17 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, pci_mmap_page_range() argument
25 prot = pgprot_val(vma->vm_page_prot); pci_mmap_page_range()
26 vma->vm_page_prot = __pgprot(prot); pci_mmap_page_range()
31 if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, pci_mmap_page_range()
32 vma->vm_end - vma->vm_start, pci_mmap_page_range()
33 vma->vm_page_prot)) pci_mmap_page_range()
/linux-4.1.27/drivers/staging/lustre/lustre/llite/
H A Dllite_mmap.c57 struct vm_area_struct *vma, unsigned long addr, policy_from_vma()
60 policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) + policy_from_vma()
61 (vma->vm_pgoff << PAGE_CACHE_SHIFT); policy_from_vma()
69 struct vm_area_struct *vma, *ret = NULL; our_vma() local
74 for (vma = find_vma(mm, addr); our_vma()
75 vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) { our_vma()
76 if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops && our_vma()
77 vma->vm_flags & VM_SHARED) { our_vma()
78 ret = vma; our_vma()
87 * \param vma - virtual memory area addressed to page fault
91 * \parm ra_flags - vma readahead flags.
98 ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret, ll_fault_io_init() argument
102 struct file *file = vma->vm_file; ll_fault_io_init()
132 fio->ft_executable = vma->vm_flags&VM_EXEC; ll_fault_io_init()
140 *ra_flags = vma->vm_flags & (VM_RAND_READ|VM_SEQ_READ); ll_fault_io_init()
141 vma->vm_flags &= ~VM_SEQ_READ; ll_fault_io_init()
142 vma->vm_flags |= VM_RAND_READ; ll_fault_io_init()
144 CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags, ll_fault_io_init()
169 static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage, ll_page_mkwrite0() argument
183 io = ll_fault_io_init(vma, &env, &nest, vmpage->index, NULL); ll_page_mkwrite0()
197 vio->u.fault.ft_vma = vma; ll_page_mkwrite0()
216 struct inode *inode = file_inode(vma->vm_file); ll_page_mkwrite0()
286 * \param vma - is virtual area struct related to page fault
293 static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf) ll_fault0() argument
304 io = ll_fault_io_init(vma, &env, &nest, vmf->pgoff, &ra_flags); ll_fault0()
311 vio->u.fault.ft_vma = vma; ll_fault0()
333 vma->vm_flags |= ra_flags; ll_fault0()
342 static int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ll_fault() argument
355 result = ll_fault0(vma, vmf); ll_fault()
382 static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) ll_page_mkwrite() argument
391 result = ll_page_mkwrite0(vma, vmf->page, &retry); ll_page_mkwrite()
396 file_inode(vma->vm_file)->i_ino); ll_page_mkwrite()
426 * we track the mapped vma count in ccc_object::cob_mmap_cnt.
428 static void ll_vm_open(struct vm_area_struct *vma) ll_vm_open() argument
430 struct inode *inode = file_inode(vma->vm_file); ll_vm_open()
433 LASSERT(vma->vm_file); ll_vm_open()
441 static void ll_vm_close(struct vm_area_struct *vma) ll_vm_close() argument
443 struct inode *inode = file_inode(vma->vm_file); ll_vm_close()
446 LASSERT(vma->vm_file); ll_vm_close()
474 int ll_file_mmap(struct file *file, struct vm_area_struct *vma) ll_file_mmap() argument
483 rc = generic_file_mmap(file, vma); ll_file_mmap()
485 vma->vm_ops = &ll_file_vm_ops; ll_file_mmap()
486 vma->vm_ops->open(vma); ll_file_mmap()
56 policy_from_vma(ldlm_policy_data_t *policy, struct vm_area_struct *vma, unsigned long addr, size_t count) policy_from_vma() argument
/linux-4.1.27/drivers/gpu/drm/exynos/
H A Dexynos_drm_gem.c57 struct vm_area_struct *vma) update_vm_cache_attr()
63 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); update_vm_cache_attr()
65 vma->vm_page_prot = update_vm_cache_attr()
66 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); update_vm_cache_attr()
68 vma->vm_page_prot = update_vm_cache_attr()
69 pgprot_noncached(vm_get_page_prot(vma->vm_flags)); update_vm_cache_attr()
80 struct vm_area_struct *vma, exynos_drm_gem_map_buf()
107 return vm_insert_mixed(vma, f_vaddr, pfn); exynos_drm_gem_map_buf()
322 struct vm_area_struct *vma) exynos_drm_gem_mmap_buffer()
329 vma->vm_flags &= ~VM_PFNMAP; exynos_drm_gem_mmap_buffer()
330 vma->vm_pgoff = 0; exynos_drm_gem_mmap_buffer()
332 vm_size = vma->vm_end - vma->vm_start; exynos_drm_gem_mmap_buffer()
344 ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages, exynos_drm_gem_mmap_buffer()
381 struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma) exynos_gem_get_vma() argument
389 if (vma->vm_ops && vma->vm_ops->open) exynos_gem_get_vma()
390 vma->vm_ops->open(vma); exynos_gem_get_vma()
392 if (vma->vm_file) exynos_gem_get_vma()
393 get_file(vma->vm_file); exynos_gem_get_vma()
395 memcpy(vma_copy, vma, sizeof(*vma)); exynos_gem_get_vma()
404 void exynos_gem_put_vma(struct vm_area_struct *vma) exynos_gem_put_vma() argument
406 if (!vma) exynos_gem_put_vma()
409 if (vma->vm_ops && vma->vm_ops->close) exynos_gem_put_vma()
410 vma->vm_ops->close(vma); exynos_gem_put_vma()
412 if (vma->vm_file) exynos_gem_put_vma()
413 fput(vma->vm_file); exynos_gem_put_vma()
415 kfree(vma); exynos_gem_put_vma()
421 struct vm_area_struct *vma) exynos_gem_get_pages_from_userptr()
426 if (vma_is_io(vma)) { exynos_gem_get_pages_from_userptr()
431 int ret = follow_pfn(vma, start, &pfn); exynos_gem_get_pages_from_userptr()
461 struct vm_area_struct *vma) exynos_gem_put_pages_to_userptr()
463 if (!vma_is_io(vma)) { exynos_gem_put_pages_to_userptr()
595 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) exynos_drm_gem_fault() argument
597 struct drm_gem_object *obj = vma->vm_private_data; exynos_drm_gem_fault()
604 vma->vm_start) >> PAGE_SHIFT; exynos_drm_gem_fault()
609 ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset); exynos_drm_gem_fault()
618 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) exynos_drm_gem_mmap() argument
625 ret = drm_gem_mmap(filp, vma); exynos_drm_gem_mmap()
631 obj = vma->vm_private_data; exynos_drm_gem_mmap()
638 update_vm_cache_attr(exynos_gem_obj, vma); exynos_drm_gem_mmap()
640 ret = exynos_drm_gem_mmap_buffer(exynos_gem_obj, vma); exynos_drm_gem_mmap()
647 drm_gem_vm_close(vma); exynos_drm_gem_mmap()
56 update_vm_cache_attr(struct exynos_drm_gem_obj *obj, struct vm_area_struct *vma) update_vm_cache_attr() argument
79 exynos_drm_gem_map_buf(struct drm_gem_object *obj, struct vm_area_struct *vma, unsigned long f_vaddr, pgoff_t page_offset) exynos_drm_gem_map_buf() argument
321 exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj, struct vm_area_struct *vma) exynos_drm_gem_mmap_buffer() argument
418 exynos_gem_get_pages_from_userptr(unsigned long start, unsigned int npages, struct page **pages, struct vm_area_struct *vma) exynos_gem_get_pages_from_userptr() argument
459 exynos_gem_put_pages_to_userptr(struct page **pages, unsigned int npages, struct vm_area_struct *vma) exynos_gem_put_pages_to_userptr() argument
/linux-4.1.27/drivers/gpu/drm/udl/
H A Dudl_gem.c61 struct vm_area_struct *vma) update_vm_cache_attr()
67 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); update_vm_cache_attr()
69 vma->vm_page_prot = update_vm_cache_attr()
70 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); update_vm_cache_attr()
72 vma->vm_page_prot = update_vm_cache_attr()
73 pgprot_noncached(vm_get_page_prot(vma->vm_flags)); update_vm_cache_attr()
87 int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) udl_drm_gem_mmap() argument
91 ret = drm_gem_mmap(filp, vma); udl_drm_gem_mmap()
95 vma->vm_flags &= ~VM_PFNMAP; udl_drm_gem_mmap()
96 vma->vm_flags |= VM_MIXEDMAP; udl_drm_gem_mmap()
98 update_vm_cache_attr(to_udl_bo(vma->vm_private_data), vma); udl_drm_gem_mmap()
103 int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) udl_gem_fault() argument
105 struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data); udl_gem_fault()
110 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> udl_gem_fault()
117 ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page); udl_gem_fault()
60 update_vm_cache_attr(struct udl_gem_object *obj, struct vm_area_struct *vma) update_vm_cache_attr() argument
/linux-4.1.27/arch/metag/mm/
H A Dhugetlbpage.c35 struct vm_area_struct *vma; prepare_hugepage_range() local
44 vma = find_vma(mm, ALIGN_HUGEPT(addr)); prepare_hugepage_range()
45 if (vma && !(vma->vm_flags & MAP_HUGETLB)) prepare_hugepage_range()
48 vma = find_vma(mm, addr); prepare_hugepage_range()
49 if (vma) { prepare_hugepage_range()
50 if (addr + len > vma->vm_start) prepare_hugepage_range()
52 if (!(vma->vm_flags & MAP_HUGETLB) && prepare_hugepage_range()
53 (ALIGN_HUGEPT(addr + len) > vma->vm_start)) prepare_hugepage_range()
116 * Look for an unmapped area starting after another hugetlb vma.
131 struct vm_area_struct *vma; hugetlb_get_unmapped_area_existing() local
145 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { hugetlb_get_unmapped_area_existing()
146 if ((!vma && !after_huge) || TASK_SIZE - len < addr) { hugetlb_get_unmapped_area_existing()
158 if (vma && vma->vm_end <= addr) hugetlb_get_unmapped_area_existing()
160 /* space before the next vma? */ hugetlb_get_unmapped_area_existing()
161 if (after_huge && (!vma || ALIGN_HUGEPT(addr + len) hugetlb_get_unmapped_area_existing()
162 <= vma->vm_start)) { hugetlb_get_unmapped_area_existing()
170 if (vma->vm_flags & MAP_HUGETLB) { hugetlb_get_unmapped_area_existing()
171 /* space after a huge vma in 2nd level page table? */ hugetlb_get_unmapped_area_existing()
172 if (vma->vm_end & HUGEPT_MASK) { hugetlb_get_unmapped_area_existing()
175 addr = vma->vm_end; hugetlb_get_unmapped_area_existing()
180 addr = ALIGN_HUGEPT(vma->vm_end); hugetlb_get_unmapped_area_existing()
224 * Look for an existing hugetlb vma with space after it (this is to to hugetlb_get_unmapped_area()
H A Dfault.c53 struct vm_area_struct *vma, *prev_vma; do_page_fault() local
116 vma = find_vma_prev(mm, address, &prev_vma); do_page_fault()
118 if (!vma || address < vma->vm_start) do_page_fault()
123 if (!(vma->vm_flags & VM_WRITE)) do_page_fault()
127 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) do_page_fault()
136 fault = handle_mm_fault(mm, vma, address, flags); do_page_fault()
173 vma = prev_vma; do_page_fault()
174 if (vma && (expand_stack(vma, address) == 0)) do_page_fault()
/linux-4.1.27/arch/microblaze/include/asm/
H A Dtlb.h19 #define tlb_start_vma(tlb, vma) do { } while (0)
20 #define tlb_end_vma(tlb, vma) do { } while (0)
H A Dtlbflush.h33 static inline void local_flush_tlb_page(struct vm_area_struct *vma, local_flush_tlb_page() argument
36 static inline void local_flush_tlb_range(struct vm_area_struct *vma, local_flush_tlb_range() argument
42 #define update_mmu_cache(vma, addr, ptep) do { } while (0)
62 #define flush_tlb_page(vma, addr) BUG()
H A Dcacheflush.h64 #define flush_icache_user_range(vma, pg, adr, len) flush_icache();
65 #define flush_icache_page(vma, pg) do { } while (0)
92 #define flush_cache_page(vma, vmaddr, pfn) \
97 #define flush_cache_range(vma, start, len) { \
103 #define flush_cache_range(vma, start, len) do { } while (0)
105 static inline void copy_to_user_page(struct vm_area_struct *vma, copy_to_user_page() argument
111 if (vma->vm_flags & VM_EXEC) { copy_to_user_page()
117 static inline void copy_from_user_page(struct vm_area_struct *vma, copy_from_user_page() argument
/linux-4.1.27/fs/ext2/
H A Dfile.c29 static int ext2_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ext2_dax_fault() argument
31 return dax_fault(vma, vmf, ext2_get_block, NULL); ext2_dax_fault()
34 static int ext2_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) ext2_dax_mkwrite() argument
36 return dax_mkwrite(vma, vmf, ext2_get_block, NULL); ext2_dax_mkwrite()
45 static int ext2_file_mmap(struct file *file, struct vm_area_struct *vma) ext2_file_mmap() argument
48 return generic_file_mmap(file, vma); ext2_file_mmap()
51 vma->vm_ops = &ext2_dax_vm_ops; ext2_file_mmap()
52 vma->vm_flags |= VM_MIXEDMAP; ext2_file_mmap()
/linux-4.1.27/drivers/char/
H A Dmspec.c83 * structure is pointed to by the vma->vm_private_data field in the vma struct.
85 * This structure is shared by all vma's that are split off from the
86 * original vma when split_vma()'s are done.
145 mspec_open(struct vm_area_struct *vma) mspec_open() argument
149 vdata = vma->vm_private_data; mspec_open()
157 * belonging to all the vma's sharing this vma_data structure.
160 mspec_close(struct vm_area_struct *vma) mspec_close() argument
166 vdata = vma->vm_private_data; mspec_close()
200 mspec_fault(struct vm_area_struct *vma, struct vm_fault *vmf) mspec_fault() argument
205 struct vma_data *vdata = vma->vm_private_data; mspec_fault()
236 vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); mspec_fault()
250 * Called when mmapping the device. Initializes the vma with a fault handler
255 mspec_mmap(struct file *file, struct vm_area_struct *vma, mspec_mmap() argument
261 if (vma->vm_pgoff != 0) mspec_mmap()
264 if ((vma->vm_flags & VM_SHARED) == 0) mspec_mmap()
267 if ((vma->vm_flags & VM_WRITE) == 0) mspec_mmap()
270 pages = vma_pages(vma); mspec_mmap()
281 vdata->vm_start = vma->vm_start; mspec_mmap()
282 vdata->vm_end = vma->vm_end; mspec_mmap()
287 vma->vm_private_data = vdata; mspec_mmap()
289 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; mspec_mmap()
291 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); mspec_mmap()
292 vma->vm_ops = &mspec_vm_ops; mspec_mmap()
298 fetchop_mmap(struct file *file, struct vm_area_struct *vma) fetchop_mmap() argument
300 return mspec_mmap(file, vma, MSPEC_FETCHOP); fetchop_mmap()
304 cached_mmap(struct file *file, struct vm_area_struct *vma) cached_mmap() argument
306 return mspec_mmap(file, vma, MSPEC_CACHED); cached_mmap()
310 uncached_mmap(struct file *file, struct vm_area_struct *vma) uncached_mmap() argument
312 return mspec_mmap(file, vma, MSPEC_UNCACHED); uncached_mmap()
H A Duv_mmtimer.c43 static int uv_mmtimer_mmap(struct file *file, struct vm_area_struct *vma);
142 * @vma: VMA to map the registers into
147 static int uv_mmtimer_mmap(struct file *file, struct vm_area_struct *vma) uv_mmtimer_mmap() argument
151 if (vma->vm_end - vma->vm_start != PAGE_SIZE) uv_mmtimer_mmap()
154 if (vma->vm_flags & VM_WRITE) uv_mmtimer_mmap()
160 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); uv_mmtimer_mmap()
166 if (remap_pfn_range(vma, vma->vm_start, uv_mmtimer_addr >> PAGE_SHIFT, uv_mmtimer_mmap()
167 PAGE_SIZE, vma->vm_page_prot)) { uv_mmtimer_mmap()
/linux-4.1.27/arch/tile/kernel/
H A Dtlb.c53 void flush_tlb_page_mm(struct vm_area_struct *vma, struct mm_struct *mm, flush_tlb_page_mm() argument
56 unsigned long size = vma_kernel_pagesize(vma); flush_tlb_page_mm()
57 int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0; flush_tlb_page_mm()
62 void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) flush_tlb_page() argument
64 flush_tlb_page_mm(vma, vma->vm_mm, va); flush_tlb_page()
68 void flush_tlb_range(struct vm_area_struct *vma, flush_tlb_range() argument
71 unsigned long size = vma_kernel_pagesize(vma); flush_tlb_range()
72 struct mm_struct *mm = vma->vm_mm; flush_tlb_range()
73 int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0; flush_tlb_range()
/linux-4.1.27/arch/arm/kernel/
H A Dsmp_tlb.c156 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) flush_tlb_page() argument
160 ta.ta_vma = vma; flush_tlb_page()
162 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, flush_tlb_page()
165 __flush_tlb_page(vma, uaddr); flush_tlb_page()
166 broadcast_tlb_mm_a15_erratum(vma->vm_mm); flush_tlb_page()
180 void flush_tlb_range(struct vm_area_struct *vma, flush_tlb_range() argument
185 ta.ta_vma = vma; flush_tlb_range()
188 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, flush_tlb_range()
191 local_flush_tlb_range(vma, start, end); flush_tlb_range()
192 broadcast_tlb_mm_a15_erratum(vma->vm_mm); flush_tlb_range()
/linux-4.1.27/arch/ia64/mm/
H A Dfault.c83 struct vm_area_struct *vma, *prev_vma; ia64_do_page_fault() local
106 * is no vma for region 5 addr's anyway, so skip getting the semaphore ia64_do_page_fault()
127 vma = find_vma_prev(mm, address, &prev_vma); ia64_do_page_fault()
128 if (!vma && !prev_vma ) ia64_do_page_fault()
132 * find_vma_prev() returns vma such that address < vma->vm_end or NULL ia64_do_page_fault()
134 * May find no vma, but could be that the last vm area is the ia64_do_page_fault()
136 * this case vma will be null, but prev_vma will ne non-null ia64_do_page_fault()
138 if (( !vma && prev_vma ) || (address < vma->vm_start) ) ia64_do_page_fault()
151 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE)))) ia64_do_page_fault()
154 if ((vma->vm_flags & mask) != mask) ia64_do_page_fault()
162 fault = handle_mm_fault(mm, vma, address, flags); ia64_do_page_fault()
207 if (!vma) ia64_do_page_fault()
209 if (!(vma->vm_flags & VM_GROWSDOWN)) ia64_do_page_fault()
211 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) ia64_do_page_fault()
214 if (expand_stack(vma, address)) ia64_do_page_fault()
217 vma = prev_vma; ia64_do_page_fault()
218 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) ia64_do_page_fault()
225 if (address > vma->vm_end + PAGE_SIZE - sizeof(long)) ia64_do_page_fault()
227 if (expand_upwards(vma, address)) ia64_do_page_fault()
273 * Since we have no vma's for region 5, we might get here even if the address is ia64_do_page_fault()
/linux-4.1.27/arch/um/drivers/
H A Dmmapper_kern.c48 static int mmapper_mmap(struct file *file, struct vm_area_struct *vma) mmapper_mmap() argument
53 if (vma->vm_pgoff != 0) mmapper_mmap()
56 size = vma->vm_end - vma->vm_start; mmapper_mmap()
64 if (remap_pfn_range(vma, vma->vm_start, p_buf >> PAGE_SHIFT, size, mmapper_mmap()
65 vma->vm_page_prot)) mmapper_mmap()
/linux-4.1.27/arch/unicore32/mm/
H A Dflush.c23 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, flush_cache_range() argument
26 if (vma->vm_flags & VM_EXEC) flush_cache_range()
30 void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, flush_cache_page() argument
35 static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, flush_ptrace_access() argument
39 if (vma->vm_flags & VM_EXEC) { flush_ptrace_access()
53 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, copy_to_user_page() argument
58 flush_ptrace_access(vma, page, uaddr, dst, len); copy_to_user_page()
/linux-4.1.27/arch/parisc/kernel/
H A Dcache.c78 update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) update_mmu_cache() argument
276 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, __flush_cache_page() argument
281 if (vma->vm_flags & VM_EXEC) __flush_cache_page()
491 struct vm_area_struct *vma; mm_total_size() local
494 for (vma = mm->mmap; vma; vma = vma->vm_next) mm_total_size()
495 usize += vma->vm_end - vma->vm_start; mm_total_size()
516 struct vm_area_struct *vma; flush_cache_mm() local
527 for (vma = mm->mmap; vma; vma = vma->vm_next) { flush_cache_mm()
528 flush_user_dcache_range_asm(vma->vm_start, vma->vm_end); flush_cache_mm()
529 if ((vma->vm_flags & VM_EXEC) == 0) flush_cache_mm()
531 flush_user_icache_range_asm(vma->vm_start, vma->vm_end); flush_cache_mm()
537 for (vma = mm->mmap; vma; vma = vma->vm_next) { flush_cache_mm()
540 for (addr = vma->vm_start; addr < vma->vm_end; flush_cache_mm()
549 __flush_cache_page(vma, addr, PFN_PHYS(pfn)); flush_cache_mm()
572 void flush_cache_range(struct vm_area_struct *vma, flush_cache_range() argument
578 BUG_ON(!vma->vm_mm->context); flush_cache_range()
585 if (vma->vm_mm->context == mfsp(3)) { flush_cache_range()
587 if (vma->vm_flags & VM_EXEC) flush_cache_range()
592 pgd = vma->vm_mm->pgd; flush_cache_range()
600 __flush_cache_page(vma, addr, PFN_PHYS(pfn)); flush_cache_range()
605 flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) flush_cache_page() argument
607 BUG_ON(!vma->vm_mm->context); flush_cache_page()
610 flush_tlb_page(vma, vmaddr); flush_cache_page()
611 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); flush_cache_page()
/linux-4.1.27/drivers/gpu/drm/i915/
H A Di915_gem_evict.c37 mark_free(struct i915_vma *vma, struct list_head *unwind) mark_free() argument
39 if (vma->pin_count) mark_free()
42 if (WARN_ON(!list_empty(&vma->exec_list))) mark_free()
45 list_add(&vma->exec_list, unwind); mark_free()
46 return drm_mm_scan_add_block(&vma->node); mark_free()
64 * This function is used by the object/vma binding code.
80 struct i915_vma *vma; i915_gem_evict_something() local
119 list_for_each_entry(vma, &vm->inactive_list, mm_list) { i915_gem_evict_something()
120 if (mark_free(vma, &unwind_list)) i915_gem_evict_something()
128 list_for_each_entry(vma, &vm->active_list, mm_list) { i915_gem_evict_something()
129 if (mark_free(vma, &unwind_list)) i915_gem_evict_something()
136 vma = list_first_entry(&unwind_list, i915_gem_evict_something()
139 ret = drm_mm_scan_remove_block(&vma->node); i915_gem_evict_something()
142 list_del_init(&vma->exec_list); i915_gem_evict_something()
173 vma = list_first_entry(&unwind_list, i915_gem_evict_something()
176 if (drm_mm_scan_remove_block(&vma->node)) { i915_gem_evict_something()
177 list_move(&vma->exec_list, &eviction_list); i915_gem_evict_something()
178 drm_gem_object_reference(&vma->obj->base); i915_gem_evict_something()
181 list_del_init(&vma->exec_list); i915_gem_evict_something()
187 vma = list_first_entry(&eviction_list, i915_gem_evict_something()
191 obj = &vma->obj->base; i915_gem_evict_something()
192 list_del_init(&vma->exec_list); i915_gem_evict_something()
194 ret = i915_vma_unbind(vma); i915_gem_evict_something()
218 struct i915_vma *vma, *next; i915_gem_evict_vm() local
234 list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list) i915_gem_evict_vm()
235 if (vma->pin_count == 0) i915_gem_evict_vm()
236 WARN_ON(i915_vma_unbind(vma)); i915_gem_evict_vm()
H A Di915_gem_execbuffer.c132 struct i915_vma *vma; eb_lookup_vmas() local
142 * lookup_or_create exists as an interface to get at the vma eb_lookup_vmas()
146 vma = i915_gem_obj_lookup_or_create_vma(obj, vm); eb_lookup_vmas()
147 if (IS_ERR(vma)) { eb_lookup_vmas()
149 ret = PTR_ERR(vma); eb_lookup_vmas()
154 list_add_tail(&vma->exec_list, &eb->vmas); eb_lookup_vmas()
157 vma->exec_entry = &exec[i]; eb_lookup_vmas()
159 eb->lut[i] = vma; eb_lookup_vmas()
162 vma->exec_handle = handle; eb_lookup_vmas()
163 hlist_add_head(&vma->exec_node, eb_lookup_vmas()
200 struct i915_vma *vma; hlist_for_each() local
202 vma = hlist_entry(node, struct i915_vma, exec_node); hlist_for_each()
203 if (vma->exec_handle == handle) hlist_for_each()
204 return vma; hlist_for_each()
211 i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma) i915_gem_execbuffer_unreserve_vma() argument
214 struct drm_i915_gem_object *obj = vma->obj; i915_gem_execbuffer_unreserve_vma()
216 if (!drm_mm_node_allocated(&vma->node)) i915_gem_execbuffer_unreserve_vma()
219 entry = vma->exec_entry; i915_gem_execbuffer_unreserve_vma()
225 vma->pin_count--; i915_gem_execbuffer_unreserve_vma()
238 struct i915_vma *vma; eb_destroy() local
240 vma = list_first_entry(&eb->vmas, eb_destroy()
243 list_del_init(&vma->exec_list); eb_destroy()
244 i915_gem_execbuffer_unreserve_vma(vma); eb_destroy()
245 drm_gem_object_unreference(&vma->obj->base); eb_destroy()
492 i915_gem_execbuffer_relocate_vma(struct i915_vma *vma, i915_gem_execbuffer_relocate_vma() argument
498 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; i915_gem_execbuffer_relocate_vma()
517 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r); i915_gem_execbuffer_relocate_vma()
538 i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma, i915_gem_execbuffer_relocate_vma_slow() argument
542 const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; i915_gem_execbuffer_relocate_vma_slow()
546 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]); i915_gem_execbuffer_relocate_vma_slow()
557 struct i915_vma *vma; i915_gem_execbuffer_relocate() local
568 list_for_each_entry(vma, &eb->vmas, exec_list) { i915_gem_execbuffer_relocate()
569 ret = i915_gem_execbuffer_relocate_vma(vma, eb); i915_gem_execbuffer_relocate()
585 i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, i915_gem_execbuffer_reserve_vma() argument
589 struct drm_i915_gem_object *obj = vma->obj; i915_gem_execbuffer_reserve_vma()
590 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; i915_gem_execbuffer_reserve_vma()
595 if (!drm_mm_node_allocated(&vma->node)) { i915_gem_execbuffer_reserve_vma()
604 ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags); i915_gem_execbuffer_reserve_vma()
607 ret = i915_gem_object_pin(obj, vma->vm, i915_gem_execbuffer_reserve_vma()
624 if (entry->offset != vma->node.start) { i915_gem_execbuffer_reserve_vma()
625 entry->offset = vma->node.start; i915_gem_execbuffer_reserve_vma()
638 need_reloc_mappable(struct i915_vma *vma) need_reloc_mappable() argument
640 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; need_reloc_mappable()
645 if (!i915_is_ggtt(vma->vm)) need_reloc_mappable()
649 if (HAS_LLC(vma->obj->base.dev)) need_reloc_mappable()
652 if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU) need_reloc_mappable()
659 eb_vma_misplaced(struct i915_vma *vma) eb_vma_misplaced() argument
661 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; eb_vma_misplaced()
662 struct drm_i915_gem_object *obj = vma->obj; eb_vma_misplaced()
665 !i915_is_ggtt(vma->vm)); eb_vma_misplaced()
668 vma->node.start & (entry->alignment - 1)) eb_vma_misplaced()
672 vma->node.start < BATCH_OFFSET_BIAS) eb_vma_misplaced()
688 struct i915_vma *vma; i915_gem_execbuffer_reserve() local
703 vma = list_first_entry(vmas, struct i915_vma, exec_list); i915_gem_execbuffer_reserve()
704 obj = vma->obj; i915_gem_execbuffer_reserve()
705 entry = vma->exec_entry; i915_gem_execbuffer_reserve()
712 need_mappable = need_fence || need_reloc_mappable(vma); i915_gem_execbuffer_reserve()
716 list_move(&vma->exec_list, &ordered_vmas); i915_gem_execbuffer_reserve()
718 list_move_tail(&vma->exec_list, &ordered_vmas); i915_gem_execbuffer_reserve()
742 list_for_each_entry(vma, vmas, exec_list) { list_for_each_entry()
743 if (!drm_mm_node_allocated(&vma->node)) list_for_each_entry()
746 if (eb_vma_misplaced(vma)) list_for_each_entry()
747 ret = i915_vma_unbind(vma); list_for_each_entry()
749 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); list_for_each_entry()
755 list_for_each_entry(vma, vmas, exec_list) { list_for_each_entry()
756 if (drm_mm_node_allocated(&vma->node)) list_for_each_entry()
759 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); list_for_each_entry()
769 list_for_each_entry(vma, vmas, exec_list)
770 i915_gem_execbuffer_unreserve_vma(vma);
788 struct i915_vma *vma; i915_gem_execbuffer_relocate_slow() local
798 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list); i915_gem_execbuffer_relocate_slow()
799 list_del_init(&vma->exec_list); i915_gem_execbuffer_relocate_slow()
800 i915_gem_execbuffer_unreserve_vma(vma); i915_gem_execbuffer_relocate_slow()
801 drm_gem_object_unreference(&vma->obj->base); i915_gem_execbuffer_relocate_slow()
874 list_for_each_entry(vma, &eb->vmas, exec_list) { i915_gem_execbuffer_relocate_slow()
875 int offset = vma->exec_entry - exec; i915_gem_execbuffer_relocate_slow()
876 ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb, i915_gem_execbuffer_relocate_slow()
898 struct i915_vma *vma; i915_gem_execbuffer_move_to_gpu() local
903 list_for_each_entry(vma, vmas, exec_list) { list_for_each_entry()
904 struct drm_i915_gem_object *obj = vma->obj; list_for_each_entry()
1020 struct i915_vma *vma; i915_gem_execbuffer_move_to_active() local
1022 list_for_each_entry(vma, vmas, exec_list) { list_for_each_entry()
1023 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; list_for_each_entry()
1024 struct drm_i915_gem_object *obj = vma->obj; list_for_each_entry()
1034 i915_vma_move_to_active(vma, ring); list_for_each_entry()
1147 struct i915_vma *vma; i915_gem_execbuffer_parse() local
1170 vma = i915_gem_obj_to_ggtt(shadow_batch_obj); i915_gem_execbuffer_parse()
1171 vma->exec_entry = shadow_exec_entry; i915_gem_execbuffer_parse()
1172 vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE | __EXEC_OBJECT_HAS_PIN; i915_gem_execbuffer_parse()
1174 list_add_tail(&vma->exec_list, &eb->vmas); i915_gem_execbuffer_parse()
1385 struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list); eb_get_batch() local
1396 vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS; eb_get_batch()
1398 return vma->obj; eb_get_batch()
1610 * batch vma for correctness. For less ugly and less fragility this i915_gem_do_execbuffer()
1611 * needs to be adjusted to also track the ggtt batch vma properly as i915_gem_do_execbuffer()
/linux-4.1.27/drivers/infiniband/hw/ipath/
H A Dipath_mmap.c64 static void ipath_vma_open(struct vm_area_struct *vma) ipath_vma_open() argument
66 struct ipath_mmap_info *ip = vma->vm_private_data; ipath_vma_open()
71 static void ipath_vma_close(struct vm_area_struct *vma) ipath_vma_close() argument
73 struct ipath_mmap_info *ip = vma->vm_private_data; ipath_vma_close()
86 * @vma: the VMA to be initialized
89 int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) ipath_mmap() argument
92 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; ipath_mmap()
93 unsigned long size = vma->vm_end - vma->vm_start; ipath_mmap()
115 ret = remap_vmalloc_range(vma, ip->obj, 0); ipath_mmap()
118 vma->vm_ops = &ipath_vm_ops; ipath_mmap()
119 vma->vm_private_data = ip; ipath_mmap()
120 ipath_vma_open(vma); ipath_mmap()
/linux-4.1.27/drivers/infiniband/hw/qib/
H A Dqib_mmap.c64 static void qib_vma_open(struct vm_area_struct *vma) qib_vma_open() argument
66 struct qib_mmap_info *ip = vma->vm_private_data; qib_vma_open()
71 static void qib_vma_close(struct vm_area_struct *vma) qib_vma_close() argument
73 struct qib_mmap_info *ip = vma->vm_private_data; qib_vma_close()
86 * @vma: the VMA to be initialized
89 int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) qib_mmap() argument
92 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; qib_mmap()
93 unsigned long size = vma->vm_end - vma->vm_start; qib_mmap()
115 ret = remap_vmalloc_range(vma, ip->obj, 0); qib_mmap()
118 vma->vm_ops = &qib_vm_ops; qib_mmap()
119 vma->vm_private_data = ip; qib_mmap()
120 qib_vma_open(vma); qib_mmap()
/linux-4.1.27/arch/powerpc/mm/
H A Dcopro_fault.c39 struct vm_area_struct *vma; copro_handle_mm_fault() local
51 vma = find_vma(mm, ea); copro_handle_mm_fault()
52 if (!vma) copro_handle_mm_fault()
55 if (ea < vma->vm_start) { copro_handle_mm_fault()
56 if (!(vma->vm_flags & VM_GROWSDOWN)) copro_handle_mm_fault()
58 if (expand_stack(vma, ea)) copro_handle_mm_fault()
64 if (!(vma->vm_flags & VM_WRITE)) copro_handle_mm_fault()
67 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) copro_handle_mm_fault()
78 *flt = handle_mm_fault(mm, vma, ea, is_write ? FAULT_FLAG_WRITE : 0); copro_handle_mm_fault()
H A Dsubpage-prot.c137 struct vm_area_struct *vma = walk->vma; subpage_walk_pmd_entry() local
138 split_huge_page_pmd(vma, addr, pmd); subpage_walk_pmd_entry()
145 struct vm_area_struct *vma; subpage_mark_vma_nohuge() local
152 * We don't try too hard, we just mark all the vma in that range subpage_mark_vma_nohuge()
155 vma = find_vma(mm, addr); subpage_mark_vma_nohuge()
159 if (vma && ((addr + len) <= vma->vm_start)) subpage_mark_vma_nohuge()
162 while (vma) { subpage_mark_vma_nohuge()
163 if (vma->vm_start >= (addr + len)) subpage_mark_vma_nohuge()
165 vma->vm_flags |= VM_NOHUGEPAGE; subpage_mark_vma_nohuge()
166 walk_page_vma(vma, &subpage_proto_walk); subpage_mark_vma_nohuge()
167 vma = vma->vm_next; subpage_mark_vma_nohuge()
H A Dhugetlbpage-book3e.c78 void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, book3e_hugetlb_preload() argument
94 mm = vma->vm_mm; book3e_hugetlb_preload()
101 psize = vma_mmu_pagesize(vma); book3e_hugetlb_preload()
147 void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) flush_hugetlb_page() argument
149 struct hstate *hstate = hstate_file(vma->vm_file); flush_hugetlb_page()
152 __flush_tlb_page(vma->vm_mm, vmaddr, tsize, 0); flush_hugetlb_page()
/linux-4.1.27/arch/mips/mm/
H A Dc-octeon.c61 * vma. If no vma is supplied, all cores are flushed.
63 * @vma: VMA to flush or NULL to flush all icaches.
65 static void octeon_flush_icache_all_cores(struct vm_area_struct *vma) octeon_flush_icache_all_cores() argument
80 * If we have a vma structure, we only need to worry about octeon_flush_icache_all_cores()
83 if (vma) octeon_flush_icache_all_cores()
84 mask = *mm_cpumask(vma->vm_mm); octeon_flush_icache_all_cores()
138 struct vm_area_struct *vma; octeon_flush_cache_sigtramp() local
141 vma = find_vma(current->mm, addr); octeon_flush_cache_sigtramp()
142 octeon_flush_icache_all_cores(vma); octeon_flush_cache_sigtramp()
148 * Flush a range out of a vma
150 * @vma: VMA to flush
154 static void octeon_flush_cache_range(struct vm_area_struct *vma, octeon_flush_cache_range() argument
157 if (vma->vm_flags & VM_EXEC) octeon_flush_cache_range()
158 octeon_flush_icache_all_cores(vma); octeon_flush_cache_range()
163 * Flush a specific page of a vma
165 * @vma: VMA to flush page for
169 static void octeon_flush_cache_page(struct vm_area_struct *vma, octeon_flush_cache_page() argument
172 if (vma->vm_flags & VM_EXEC) octeon_flush_cache_page()
173 octeon_flush_icache_all_cores(vma); octeon_flush_cache_page()
H A Dtlb-r3k.c78 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, local_flush_tlb_range() argument
81 struct mm_struct *mm = vma->vm_mm; local_flush_tlb_range()
157 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) local_flush_tlb_page() argument
161 if (cpu_context(cpu, vma->vm_mm) != 0) { local_flush_tlb_page()
166 printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page); local_flush_tlb_page()
168 newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK; local_flush_tlb_page()
188 void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) __update_tlb() argument
196 if (current->active_mm != vma->vm_mm) __update_tlb()
202 if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) { __update_tlb()
204 (cpu_context(cpu, vma->vm_mm)), pid); __update_tlb()
H A Dtlb-r8k.c61 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, local_flush_tlb_range() argument
64 struct mm_struct *mm = vma->vm_mm; local_flush_tlb_range()
148 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) local_flush_tlb_page() argument
155 if (!cpu_context(cpu, vma->vm_mm)) local_flush_tlb_page()
158 newpid = cpu_asid(cpu, vma->vm_mm); local_flush_tlb_page()
183 void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) __update_tlb() argument
194 if (current->active_mm != vma->vm_mm) __update_tlb()
203 pgdp = pgd_offset(vma->vm_mm, address); __update_tlb()
/linux-4.1.27/arch/c6x/include/asm/
H A Dcacheflush.h29 #define flush_cache_page(vma, vmaddr, pfn) do {} while (0)
46 #define flush_icache_page(vma, page) \
48 if ((vma)->vm_flags & PROT_EXEC) \
56 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
62 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
/linux-4.1.27/arch/tile/include/asm/
H A Dtlbflush.h41 /* Pass as vma pointer for non-executable mapping, if no vma available. */
45 static inline void local_flush_tlb_page(struct vm_area_struct *vma, local_flush_tlb_page() argument
53 if (!vma || (vma != FLUSH_NONEXEC && (vma->vm_flags & VM_EXEC))) local_flush_tlb_page()
58 static inline void local_flush_tlb_pages(struct vm_area_struct *vma, local_flush_tlb_pages() argument
67 if (!vma || (vma != FLUSH_NONEXEC && (vma->vm_flags & VM_EXEC))) local_flush_tlb_pages()
102 * - flush_tlb_page(vma, vmaddr) flushes one page
103 * - flush_tlb_range(vma, start, end) flushes a range of pages
H A Dtlb.h18 #define tlb_start_vma(tlb, vma) do { } while (0)
19 #define tlb_end_vma(tlb, vma) do { } while (0)
H A Dhugetlb.h67 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, huge_ptep_clear_flush() argument
70 ptep_clear_flush(vma, addr, ptep); huge_ptep_clear_flush()
89 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, huge_ptep_set_access_flags() argument
93 return ptep_set_access_flags(vma, addr, ptep, pte, dirty); huge_ptep_set_access_flags()
115 static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, arch_make_huge_pte() argument
118 size_t pagesize = huge_page_size(hstate_vma(vma)); arch_make_huge_pte()
/linux-4.1.27/include/media/
H A Dvideobuf2-memops.h20 * vb2_vmarea_handler - common vma refcount tracking handler
36 struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma);
37 void vb2_put_vma(struct vm_area_struct *vma);
/linux-4.1.27/arch/mn10300/include/asm/
H A Dtlb.h20 * we don't need any special per-pte or per-vma handling...
22 #define tlb_start_vma(tlb, vma) do { } while (0)
23 #define tlb_end_vma(tlb, vma) do { } while (0)
H A Dtlbflush.h93 * - flush_tlb_page(vma, vmaddr) flushes one page
108 static inline void flush_tlb_range(struct vm_area_struct *vma, flush_tlb_range() argument
111 flush_tlb_mm(vma->vm_mm); flush_tlb_range()
130 static inline void flush_tlb_range(struct vm_area_struct *vma, flush_tlb_range() argument
138 #define flush_tlb_page(vma, addr) local_flush_tlb_page((vma)->vm_mm, addr)
/linux-4.1.27/arch/openrisc/include/asm/
H A Dtlb.h24 * per-vma handling..
26 #define tlb_start_vma(tlb, vma) do { } while (0)
27 #define tlb_end_vma(tlb, vma) do { } while (0)
/linux-4.1.27/arch/arm64/include/asm/
H A Dfb.h23 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, fb_pgprotect() argument
26 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); fb_pgprotect()
H A Dtlbflush.h55 * flush_tlb_page(vaddr,vma)
59 * - vma - vma_struct describing address range
85 static inline void flush_tlb_page(struct vm_area_struct *vma, flush_tlb_page() argument
89 ((unsigned long)ASID(vma->vm_mm) << 48); flush_tlb_page()
96 static inline void __flush_tlb_range(struct vm_area_struct *vma, __flush_tlb_range() argument
99 unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48; __flush_tlb_range()
129 static inline void flush_tlb_range(struct vm_area_struct *vma, flush_tlb_range() argument
133 __flush_tlb_range(vma, start, end); flush_tlb_range()
135 flush_tlb_mm(vma->vm_mm); flush_tlb_range()
162 static inline void update_mmu_cache(struct vm_area_struct *vma, update_mmu_cache() argument
172 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
/linux-4.1.27/arch/powerpc/kernel/
H A Dproc_powerpc.c44 static int page_map_mmap( struct file *file, struct vm_area_struct *vma ) page_map_mmap()
46 if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) page_map_mmap()
49 remap_pfn_range(vma, vma->vm_start, page_map_mmap()
51 PAGE_SIZE, vma->vm_page_prot); page_map_mmap()
/linux-4.1.27/drivers/staging/unisys/visorchipset/
H A Dfile.c61 visorchipset_mmap(struct file *file, struct vm_area_struct *vma) visorchipset_mmap() argument
64 ulong offset = vma->vm_pgoff << PAGE_SHIFT; visorchipset_mmap()
73 vma->vm_flags |= VM_IO; visorchipset_mmap()
85 if (remap_pfn_range(vma, vma->vm_start, visorchipset_mmap()
87 vma->vm_end - vma->vm_start, visorchipset_mmap()
89 (vma->vm_page_prot))) { visorchipset_mmap()
/linux-4.1.27/arch/sh/mm/
H A Dmmap.c37 struct vm_area_struct *vma; arch_get_unmapped_area() local
64 vma = find_vma(mm, addr); arch_get_unmapped_area()
66 (!vma || addr + len <= vma->vm_start)) arch_get_unmapped_area()
84 struct vm_area_struct *vma; arch_get_unmapped_area_topdown() local
114 vma = find_vma(mm, addr); arch_get_unmapped_area_topdown()
116 (!vma || addr + len <= vma->vm_start)) arch_get_unmapped_area_topdown()
H A Dtlbflush_32.c15 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) local_flush_tlb_page() argument
19 if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) { local_flush_tlb_page()
24 asid = cpu_asid(cpu, vma->vm_mm); local_flush_tlb_page()
28 if (vma->vm_mm != current->mm) { local_flush_tlb_page()
39 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, local_flush_tlb_range() argument
42 struct mm_struct *mm = vma->vm_mm; local_flush_tlb_range()
H A Dcache.c58 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, copy_to_user_page() argument
73 if (vma->vm_flags & VM_EXEC) copy_to_user_page()
74 flush_cache_page(vma, vaddr, page_to_pfn(page)); copy_to_user_page()
77 void copy_from_user_page(struct vm_area_struct *vma, struct page *page, copy_from_user_page() argument
94 unsigned long vaddr, struct vm_area_struct *vma) copy_user_highpage()
112 (vma->vm_flags & VM_EXEC)) copy_user_highpage()
134 void __update_cache(struct vm_area_struct *vma, __update_cache() argument
191 void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, flush_cache_page() argument
196 data.vma = vma; flush_cache_page()
203 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, flush_cache_range() argument
208 data.vma = vma; flush_cache_range()
226 data.vma = NULL; flush_icache_range()
234 void flush_icache_page(struct vm_area_struct *vma, struct page *page) flush_icache_page() argument
93 copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) copy_user_highpage() argument
/linux-4.1.27/arch/tile/mm/
H A Delf.c45 struct vm_area_struct *vma; notify_exec() local
64 for (vma = current->mm->mmap; ; vma = vma->vm_next) { notify_exec()
65 if (vma == NULL) { notify_exec()
69 if (vma->vm_file == exe_file) notify_exec()
78 if (vma->vm_start == (ELF_ET_DYN_BASE & PAGE_MASK)) { notify_exec()
82 snprintf(buf, sizeof(buf), "0x%lx:@", vma->vm_start); notify_exec()
/linux-4.1.27/arch/unicore32/include/asm/
H A Dtlb.h15 #define tlb_start_vma(tlb, vma) do { } while (0)
16 #define tlb_end_vma(tlb, vma) do { } while (0)
/linux-4.1.27/arch/arm64/mm/
H A Dflush.c30 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, flush_cache_range() argument
33 if (vma->vm_flags & VM_EXEC) flush_cache_range()
37 static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, flush_ptrace_access() argument
41 if (vma->vm_flags & VM_EXEC) { flush_ptrace_access()
59 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, copy_to_user_page() argument
67 flush_ptrace_access(vma, page, uaddr, dst, len); copy_to_user_page()
110 void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, pmdp_splitting_flush() argument
116 set_pmd_at(vma->vm_mm, address, pmdp, pmd); pmdp_splitting_flush()
/linux-4.1.27/arch/microblaze/kernel/
H A Ddma.c157 int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, dma_direct_mmap_coherent() argument
162 unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; dma_direct_mmap_coherent()
164 unsigned long off = vma->vm_pgoff; dma_direct_mmap_coherent()
171 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); dma_direct_mmap_coherent()
176 return remap_pfn_range(vma, vma->vm_start, pfn + off, dma_direct_mmap_coherent()
177 vma->vm_end - vma->vm_start, vma->vm_page_prot); dma_direct_mmap_coherent()
/linux-4.1.27/arch/arm/mm/
H A Dfault-armv.c40 static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, do_adjust_pte() argument
56 flush_cache_page(vma, address, pfn); do_adjust_pte()
61 set_pte_at(vma->vm_mm, address, ptep, entry); do_adjust_pte()
62 flush_tlb_page(vma, address); do_adjust_pte()
92 static int adjust_pte(struct vm_area_struct *vma, unsigned long address, adjust_pte() argument
102 pgd = pgd_offset(vma->vm_mm, address); adjust_pte()
119 ptl = pte_lockptr(vma->vm_mm, pmd); adjust_pte()
123 ret = do_adjust_pte(vma, address, pfn, pte); adjust_pte()
132 make_coherent(struct address_space *mapping, struct vm_area_struct *vma, make_coherent() argument
135 struct mm_struct *mm = vma->vm_mm; make_coherent()
141 pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT); make_coherent()
155 if (mpnt->vm_mm != mm || mpnt == vma) make_coherent()
164 do_adjust_pte(vma, addr, pfn, ptep); make_coherent()
180 void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, update_mmu_cache() argument
203 make_coherent(mapping, vma, addr, ptep, pfn); update_mmu_cache()
204 else if (vma->vm_flags & VM_EXEC) update_mmu_cache()
H A Dmmap.c59 struct vm_area_struct *vma; arch_get_unmapped_area() local
90 vma = find_vma(mm, addr); arch_get_unmapped_area()
92 (!vma || addr + len <= vma->vm_start)) arch_get_unmapped_area()
110 struct vm_area_struct *vma; arch_get_unmapped_area_topdown() local
141 vma = find_vma(mm, addr); arch_get_unmapped_area_topdown()
143 (!vma || addr + len <= vma->vm_start)) arch_get_unmapped_area_topdown()
/linux-4.1.27/drivers/vfio/platform/
H A Dvfio_platform_common.c418 struct vm_area_struct *vma) vfio_platform_mmap_mmio()
422 req_len = vma->vm_end - vma->vm_start; vfio_platform_mmap_mmio()
423 pgoff = vma->vm_pgoff & vfio_platform_mmap_mmio()
430 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vfio_platform_mmap_mmio()
431 vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff; vfio_platform_mmap_mmio()
433 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, vfio_platform_mmap_mmio()
434 req_len, vma->vm_page_prot); vfio_platform_mmap_mmio()
437 static int vfio_platform_mmap(void *device_data, struct vm_area_struct *vma) vfio_platform_mmap() argument
442 index = vma->vm_pgoff >> (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT); vfio_platform_mmap()
444 if (vma->vm_end < vma->vm_start) vfio_platform_mmap()
446 if (!(vma->vm_flags & VM_SHARED)) vfio_platform_mmap()
450 if (vma->vm_start & ~PAGE_MASK) vfio_platform_mmap()
452 if (vma->vm_end & ~PAGE_MASK) vfio_platform_mmap()
459 && (vma->vm_flags & VM_READ)) vfio_platform_mmap()
463 && (vma->vm_flags & VM_WRITE)) vfio_platform_mmap()
466 vma->vm_private_data = vdev; vfio_platform_mmap()
469 return vfio_platform_mmap_mmio(vdev->regions[index], vma); vfio_platform_mmap()
417 vfio_platform_mmap_mmio(struct vfio_platform_region region, struct vm_area_struct *vma) vfio_platform_mmap_mmio() argument
/linux-4.1.27/arch/sh/kernel/vsyscall/
H A Dvsyscall.c88 const char *arch_vma_name(struct vm_area_struct *vma) arch_vma_name() argument
90 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) arch_vma_name()
/linux-4.1.27/drivers/gpu/drm/msm/
H A Dmsm_gem_prime.c40 int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) msm_gem_prime_mmap() argument
45 ret = drm_gem_mmap_obj(obj, obj->size, vma); msm_gem_prime_mmap()
50 return msm_gem_mmap_obj(vma->vm_private_data, vma); msm_gem_prime_mmap()
/linux-4.1.27/arch/arc/mm/
H A Dmmap.c35 struct vm_area_struct *vma; arch_get_unmapped_area() local
65 vma = find_vma(mm, addr); arch_get_unmapped_area()
67 (!vma || addr + len <= vma->vm_start)) arch_get_unmapped_area()
H A Dfault.c58 struct vm_area_struct *vma = NULL; do_page_fault() local
96 vma = find_vma(mm, address); do_page_fault()
97 if (!vma) do_page_fault()
99 if (vma->vm_start <= address) do_page_fault()
101 if (!(vma->vm_flags & VM_GROWSDOWN)) do_page_fault()
103 if (expand_stack(vma, address)) do_page_fault()
120 if (!(vma->vm_flags & VM_WRITE)) do_page_fault()
124 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) do_page_fault()
133 fault = handle_mm_fault(mm, vma, address, flags); do_page_fault()
/linux-4.1.27/include/xen/
H A Dxen-ops.h34 * @vma: VMA to map the pages into
49 int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
57 * @vma: VMA to map the pages into
68 int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
73 int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
75 int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
81 int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
/linux-4.1.27/ipc/
H A Dshm.c68 static void shm_open(struct vm_area_struct *vma);
69 static void shm_close(struct vm_area_struct *vma);
190 static int __shm_open(struct vm_area_struct *vma) __shm_open() argument
192 struct file *file = vma->vm_file; __shm_open()
209 static void shm_open(struct vm_area_struct *vma) shm_open() argument
211 int err = __shm_open(vma); shm_open()
264 * remove the attach descriptor vma.
269 static void shm_close(struct vm_area_struct *vma) shm_close() argument
271 struct file *file = vma->vm_file; shm_close()
376 static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) shm_fault() argument
378 struct file *file = vma->vm_file; shm_fault()
381 return sfd->vm_ops->fault(vma, vmf); shm_fault()
385 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new) shm_set_policy() argument
387 struct file *file = vma->vm_file; shm_set_policy()
391 err = sfd->vm_ops->set_policy(vma, new); shm_set_policy()
395 static struct mempolicy *shm_get_policy(struct vm_area_struct *vma, shm_get_policy() argument
398 struct file *file = vma->vm_file; shm_get_policy()
403 pol = sfd->vm_ops->get_policy(vma, addr); shm_get_policy()
404 else if (vma->vm_policy) shm_get_policy()
405 pol = vma->vm_policy; shm_get_policy()
411 static int shm_mmap(struct file *file, struct vm_area_struct *vma) shm_mmap() argument
420 ret =__shm_open(vma); shm_mmap()
424 ret = sfd->file->f_op->mmap(sfd->file, vma); shm_mmap()
426 shm_close(vma); shm_mmap()
429 sfd->vm_ops = vma->vm_ops; shm_mmap()
433 vma->vm_ops = &shm_vm_ops; shm_mmap()
1262 struct vm_area_struct *vma; SYSCALL_DEFINE1() local
1280 * unmapped: It searches for a vma that is backed by shm and that SYSCALL_DEFINE1()
1288 * a part of a vma. Both calls in this function are for full vmas, SYSCALL_DEFINE1()
1289 * the parameters are directly copied from the vma itself and always SYSCALL_DEFINE1()
1294 * match the usual checks anyway. So assume all vma's are SYSCALL_DEFINE1()
1297 vma = find_vma(mm, addr); SYSCALL_DEFINE1()
1300 while (vma) { SYSCALL_DEFINE1()
1301 next = vma->vm_next; SYSCALL_DEFINE1()
1308 if ((vma->vm_ops == &shm_vm_ops) && SYSCALL_DEFINE1()
1309 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) { SYSCALL_DEFINE1()
1317 file = vma->vm_file; SYSCALL_DEFINE1()
1318 size = i_size_read(file_inode(vma->vm_file)); SYSCALL_DEFINE1()
1319 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); SYSCALL_DEFINE1()
1324 * searching for matching vma's. SYSCALL_DEFINE1()
1327 vma = next; SYSCALL_DEFINE1()
1330 vma = next; SYSCALL_DEFINE1()
1339 while (vma && (loff_t)(vma->vm_end - addr) <= size) { SYSCALL_DEFINE1()
1340 next = vma->vm_next; SYSCALL_DEFINE1()
1342 /* finding a matching vma now does not alter retval */ SYSCALL_DEFINE1()
1343 if ((vma->vm_ops == &shm_vm_ops) && SYSCALL_DEFINE1()
1344 ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) && SYSCALL_DEFINE1()
1345 (vma->vm_file == file)) SYSCALL_DEFINE1()
1346 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); SYSCALL_DEFINE1()
1347 vma = next; SYSCALL_DEFINE1()
1353 if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) { SYSCALL_DEFINE1()
1354 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); SYSCALL_DEFINE1()
/linux-4.1.27/drivers/xen/xenbus/
H A Dxenbus_dev_backend.c93 static int xenbus_backend_mmap(struct file *file, struct vm_area_struct *vma) xenbus_backend_mmap() argument
95 size_t size = vma->vm_end - vma->vm_start; xenbus_backend_mmap()
100 if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0)) xenbus_backend_mmap()
103 if (remap_pfn_range(vma, vma->vm_start, xenbus_backend_mmap()
105 size, vma->vm_page_prot)) xenbus_backend_mmap()
/linux-4.1.27/drivers/misc/mic/host/
H A Dmic_fops.c192 mic_mmap(struct file *f, struct vm_area_struct *vma) mic_mmap() argument
195 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; mic_mmap()
196 unsigned long pa, size = vma->vm_end - vma->vm_start, size_rem = size; mic_mmap()
203 if (vma->vm_flags & VM_WRITE) mic_mmap()
210 err = remap_pfn_range(vma, vma->vm_start + offset, mic_mmap()
211 pa >> PAGE_SHIFT, size, vma->vm_page_prot); mic_mmap()
215 "%s %d type %d size 0x%lx off 0x%lx pa 0x%lx vma 0x%lx\n", mic_mmap()
217 pa, vma->vm_start + offset); mic_mmap()
/linux-4.1.27/arch/hexagon/mm/
H A Dvm_fault.c50 struct vm_area_struct *vma; do_page_fault() local
71 vma = find_vma(mm, address); do_page_fault()
72 if (!vma) do_page_fault()
75 if (vma->vm_start <= address) do_page_fault()
78 if (!(vma->vm_flags & VM_GROWSDOWN)) do_page_fault()
81 if (expand_stack(vma, address)) do_page_fault()
90 if (!(vma->vm_flags & VM_EXEC)) do_page_fault()
94 if (!(vma->vm_flags & VM_READ)) do_page_fault()
98 if (!(vma->vm_flags & VM_WRITE)) do_page_fault()
104 fault = handle_mm_fault(mm, vma, address, flags); do_page_fault()
H A Dvm_tlb.c37 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, flush_tlb_range() argument
40 struct mm_struct *mm = vma->vm_mm; flush_tlb_range()
76 * Flush TLB state associated with a page of a vma.
78 void flush_tlb_page(struct vm_area_struct *vma, unsigned long vaddr) flush_tlb_page() argument
80 struct mm_struct *mm = vma->vm_mm; flush_tlb_page()
88 * Like flush range, but without the check on the vma->vm_mm.
/linux-4.1.27/arch/m68k/mm/
H A Dfault.c73 struct vm_area_struct * vma; do_page_fault() local
92 vma = find_vma(mm, address); do_page_fault()
93 if (!vma) do_page_fault()
95 if (vma->vm_flags & VM_IO) do_page_fault()
97 if (vma->vm_start <= address) do_page_fault()
99 if (!(vma->vm_flags & VM_GROWSDOWN)) do_page_fault()
109 if (expand_stack(vma, address)) do_page_fault()
122 if (!(vma->vm_flags & VM_WRITE)) do_page_fault()
129 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) do_page_fault()
139 fault = handle_mm_fault(mm, vma, address, flags); do_page_fault()
/linux-4.1.27/arch/sh/kernel/
H A Dsys_sh.c59 struct vm_area_struct *vma; sys_cacheflush() local
72 vma = find_vma (current->mm, addr); sys_cacheflush()
73 if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) { sys_cacheflush()
H A Dsmp.c376 struct vm_area_struct *vma; member in struct:flush_tlb_data
385 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); flush_tlb_range_ipi()
388 void flush_tlb_range(struct vm_area_struct *vma, flush_tlb_range() argument
391 struct mm_struct *mm = vma->vm_mm; flush_tlb_range()
397 fd.vma = vma; flush_tlb_range()
407 local_flush_tlb_range(vma, start, end); flush_tlb_range()
431 local_flush_tlb_page(fd->vma, fd->addr1); flush_tlb_page_ipi()
434 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) flush_tlb_page() argument
437 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || flush_tlb_page()
438 (current->mm != vma->vm_mm)) { flush_tlb_page()
441 fd.vma = vma; flush_tlb_page()
448 cpu_context(i, vma->vm_mm) = 0; flush_tlb_page()
450 local_flush_tlb_page(vma, page); flush_tlb_page()
/linux-4.1.27/fs/ncpfs/
H A Dmmap.c104 int ncp_mmap(struct file *file, struct vm_area_struct *vma) ncp_mmap() argument
114 if (vma->vm_flags & VM_SHARED) ncp_mmap()
118 if (vma_pages(vma) + vma->vm_pgoff ncp_mmap()
122 vma->vm_ops = &ncp_file_mmap; ncp_mmap()
/linux-4.1.27/arch/s390/include/asm/
H A Dhugetlb.h51 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, huge_ptep_clear_flush() argument
54 huge_ptep_get_and_clear(vma->vm_mm, address, ptep); huge_ptep_clear_flush()
57 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, huge_ptep_set_access_flags() argument
63 huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); huge_ptep_set_access_flags()
64 set_huge_pte_at(vma->vm_mm, addr, ptep, pte); huge_ptep_set_access_flags()
/linux-4.1.27/arch/s390/pci/
H A Dpci_mmio.c17 struct vm_area_struct *vma; get_pfn() local
22 vma = find_vma(current->mm, user_addr); get_pfn()
23 if (!vma) get_pfn()
26 if (!(vma->vm_flags & access)) get_pfn()
28 ret = follow_pfn(vma, user_addr, pfn); get_pfn()
/linux-4.1.27/arch/parisc/mm/
H A Dfault.c120 /* This is the treewalk to find a vma which is the highest that has
182 struct vm_area_struct *vma) show_signal_msg()
194 if (vma) show_signal_msg()
196 vma->vm_start, vma->vm_end); show_signal_msg()
204 struct vm_area_struct *vma, *prev_vma; do_page_fault() local
228 vma = find_vma_prev(mm, address, &prev_vma); do_page_fault()
229 if (!vma || address < vma->vm_start) do_page_fault()
238 if ((vma->vm_flags & acc_type) != acc_type) do_page_fault()
247 fault = handle_mm_fault(mm, vma, address, flags); do_page_fault()
287 vma = prev_vma; do_page_fault()
288 if (vma && (expand_stack(vma, address) == 0)) do_page_fault()
300 show_signal_msg(regs, code, address, tsk, vma); do_page_fault()
304 /* send SIGSEGV when outside of vma */ do_page_fault()
305 if (!vma || do_page_fault()
306 address < vma->vm_start || address > vma->vm_end) { do_page_fault()
313 if ((vma->vm_flags & acc_type) != acc_type) { do_page_fault()
180 show_signal_msg(struct pt_regs *regs, unsigned long code, unsigned long address, struct task_struct *tsk, struct vm_area_struct *vma) show_signal_msg() argument
/linux-4.1.27/drivers/misc/sgi-gru/
H A Dgrufile.c71 * and tables belonging to the vma.
73 static void gru_vma_close(struct vm_area_struct *vma) gru_vma_close() argument
79 if (!vma->vm_private_data) gru_vma_close()
82 vdata = vma->vm_private_data; gru_vma_close()
83 vma->vm_private_data = NULL; gru_vma_close()
84 gru_dbg(grudev, "vma %p, file %p, vdata %p\n", vma, vma->vm_file, gru_vma_close()
103 * Called when mmapping the device. Initializes the vma with a fault handler
107 static int gru_file_mmap(struct file *file, struct vm_area_struct *vma) gru_file_mmap() argument
109 if ((vma->vm_flags & (VM_SHARED | VM_WRITE)) != (VM_SHARED | VM_WRITE)) gru_file_mmap()
112 if (vma->vm_start & (GRU_GSEG_PAGESIZE - 1) || gru_file_mmap()
113 vma->vm_end & (GRU_GSEG_PAGESIZE - 1)) gru_file_mmap()
116 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_LOCKED | gru_file_mmap()
118 vma->vm_page_prot = PAGE_SHARED; gru_file_mmap()
119 vma->vm_ops = &gru_vm_ops; gru_file_mmap()
121 vma->vm_private_data = gru_alloc_vma_data(vma, 0); gru_file_mmap()
122 if (!vma->vm_private_data) gru_file_mmap()
125 gru_dbg(grudev, "file %p, vaddr 0x%lx, vma %p, vdata %p\n", gru_file_mmap()
126 file, vma->vm_start, vma, vma->vm_private_data); gru_file_mmap()
136 struct vm_area_struct *vma; gru_create_new_context() local
152 vma = gru_find_vma(req.gseg); gru_create_new_context()
153 if (vma) { gru_create_new_context()
154 vdata = vma->vm_private_data; gru_create_new_context()
/linux-4.1.27/arch/nios2/mm/
H A Dcacheflush.c148 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, flush_cache_range() argument
152 if (vma == NULL || (vma->vm_flags & VM_EXEC)) flush_cache_range()
156 void flush_icache_page(struct vm_area_struct *vma, struct page *page) flush_icache_page() argument
165 void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, flush_cache_page() argument
172 if (vma->vm_flags & VM_EXEC) flush_cache_page()
216 void update_mmu_cache(struct vm_area_struct *vma, update_mmu_cache() argument
241 if (vma->vm_flags & VM_EXEC) update_mmu_cache()
242 flush_icache_page(vma, page); update_mmu_cache()
265 void copy_from_user_page(struct vm_area_struct *vma, struct page *page, copy_from_user_page() argument
269 flush_cache_page(vma, user_vaddr, page_to_pfn(page)); copy_from_user_page()
272 if (vma->vm_flags & VM_EXEC) copy_from_user_page()
276 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, copy_to_user_page() argument
280 flush_cache_page(vma, user_vaddr, page_to_pfn(page)); copy_to_user_page()
283 if (vma->vm_flags & VM_EXEC) copy_to_user_page()
/linux-4.1.27/arch/powerpc/oprofile/cell/
H A Dvma_map.c15 * vma-to-fileOffset maps for both overlay and non-overlay SPU
37 vma_map_lookup(struct vma_to_fileoffset_map *map, unsigned int vma, vma_map_lookup() argument
42 * Addresses of dynamically generated code can't be found in the vma vma_map_lookup()
47 u32 offset = 0x10000000 + vma; vma_map_lookup()
51 if (vma < map->vma || vma >= map->vma + map->size) vma_map_lookup()
60 offset = vma - map->vma + map->offset; vma_map_lookup()
68 vma_map_add(struct vma_to_fileoffset_map *map, unsigned int vma, vma_map_add() argument
82 new->vma = vma; vma_map_add()
249 /* The ovly.vma/size/offset arguments are analogous to the same create_vma_map()
272 map = vma_map_add(map, ovly.vma, ovly.size, ovly.offset, create_vma_map()
/linux-4.1.27/kernel/events/
H A Duprobes.c114 * We keep the vma's vm_start rather than a pointer to the vma
116 * the vma go away, and we must handle that reasonably gracefully.
122 * valid_vma: Verify if the specified vma is an executable vma
127 * executable vma.
129 static bool valid_vma(struct vm_area_struct *vma, bool is_register) valid_vma() argument
136 return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC; valid_vma()
139 static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset) offset_to_vaddr() argument
141 return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT); offset_to_vaddr()
144 static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr) vaddr_to_offset() argument
146 return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start); vaddr_to_offset()
150 * __replace_page - replace page in vma by new page.
153 * @vma: vma that holds the pte pointing to page
160 static int __replace_page(struct vm_area_struct *vma, unsigned long addr, __replace_page() argument
163 struct mm_struct *mm = vma->vm_mm; __replace_page()
172 err = mem_cgroup_try_charge(kpage, vma->vm_mm, GFP_KERNEL, &memcg); __replace_page()
186 page_add_new_anon_rmap(kpage, vma, addr); __replace_page()
188 lru_cache_add_active_or_unevictable(kpage, vma); __replace_page()
195 flush_cache_page(vma, addr, pte_pfn(*ptep)); __replace_page()
196 ptep_clear_flush_notify(vma, addr, ptep); __replace_page()
197 set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); __replace_page()
204 if (vma->vm_flags & VM_LOCKED) __replace_page()
304 struct vm_area_struct *vma; uprobe_write_opcode() local
309 ret = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &old_page, &vma); uprobe_write_opcode()
317 ret = anon_vma_prepare(vma); uprobe_write_opcode()
322 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); uprobe_write_opcode()
330 ret = __replace_page(vma, vaddr, old_page, new_page); uprobe_write_opcode()
648 struct vm_area_struct *vma, unsigned long vaddr) install_breakpoint()
653 ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr); install_breakpoint()
720 struct vm_area_struct *vma; build_map_info() local
728 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { build_map_info()
729 if (!valid_vma(vma, is_register)) build_map_info()
747 if (!atomic_inc_not_zero(&vma->vm_mm->mm_users)) build_map_info()
755 info->mm = vma->vm_mm; build_map_info()
756 info->vaddr = offset_to_vaddr(vma, offset); build_map_info()
803 struct vm_area_struct *vma; register_for_each_vma() local
809 vma = find_vma(mm, info->vaddr); register_for_each_vma()
810 if (!vma || !valid_vma(vma, is_register) || register_for_each_vma()
811 file_inode(vma->vm_file) != uprobe->inode) register_for_each_vma()
814 if (vma->vm_start > info->vaddr || register_for_each_vma()
815 vaddr_to_offset(vma, info->vaddr) != uprobe->offset) register_for_each_vma()
822 err = install_breakpoint(uprobe, mm, vma, info->vaddr); register_for_each_vma()
968 struct vm_area_struct *vma; unapply_uprobe() local
972 for (vma = mm->mmap; vma; vma = vma->vm_next) { unapply_uprobe()
976 if (!valid_vma(vma, false) || unapply_uprobe()
977 file_inode(vma->vm_file) != uprobe->inode) unapply_uprobe()
980 offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT; unapply_uprobe()
982 uprobe->offset >= offset + vma->vm_end - vma->vm_start) unapply_uprobe()
985 vaddr = offset_to_vaddr(vma, uprobe->offset); unapply_uprobe()
1019 * For a given range in vma, build a list of probes that need to be inserted.
1022 struct vm_area_struct *vma, build_probe_list()
1031 min = vaddr_to_offset(vma, start); build_probe_list()
1061 int uprobe_mmap(struct vm_area_struct *vma) uprobe_mmap() argument
1067 if (no_uprobe_events() || !valid_vma(vma, true)) uprobe_mmap()
1070 inode = file_inode(vma->vm_file); uprobe_mmap()
1075 build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list); uprobe_mmap()
1083 filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) { uprobe_mmap()
1084 unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); uprobe_mmap()
1085 install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); uprobe_mmap()
1095 vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end) vma_has_uprobes() argument
1101 inode = file_inode(vma->vm_file); vma_has_uprobes()
1103 min = vaddr_to_offset(vma, start); vma_has_uprobes()
1114 * Called in context of a munmap of a vma.
1116 void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) uprobe_munmap() argument
1118 if (no_uprobe_events() || !valid_vma(vma, false)) uprobe_munmap()
1121 if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ uprobe_munmap()
1124 if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) || uprobe_munmap()
1125 test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags)) uprobe_munmap()
1128 if (vma_has_uprobes(vma, start, end)) uprobe_munmap()
1129 set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags); uprobe_munmap()
1350 * We probably need flush_icache_user_range() but it needs vma. arch_uprobe_copy_ixol()
1651 struct vm_area_struct *vma; mmf_recalc_uprobes() local
1653 for (vma = mm->mmap; vma; vma = vma->vm_next) { mmf_recalc_uprobes()
1654 if (!valid_vma(vma, false)) mmf_recalc_uprobes()
1662 if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end)) mmf_recalc_uprobes()
1698 struct vm_area_struct *vma; find_active_uprobe() local
1701 vma = find_vma(mm, bp_vaddr); find_active_uprobe()
1702 if (vma && vma->vm_start <= bp_vaddr) { find_active_uprobe()
1703 if (valid_vma(vma, false)) { find_active_uprobe()
1704 struct inode *inode = file_inode(vma->vm_file); find_active_uprobe()
1705 loff_t offset = vaddr_to_offset(vma, bp_vaddr); find_active_uprobe()
1845 * we can simply restart. If this vma was unmapped we handle_swbp()
647 install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long vaddr) install_breakpoint() argument
1021 build_probe_list(struct inode *inode, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct list_head *head) build_probe_list() argument
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/subdev/bar/
H A Dgf100.c45 struct nvkm_vma *vma) gf100_bar_kmap()
50 ret = nvkm_vm_get(priv->bar[0].vm, mem->size << 12, 12, flags, vma); gf100_bar_kmap()
54 nvkm_vm_map(vma, mem); gf100_bar_kmap()
60 struct nvkm_vma *vma) gf100_bar_umap()
66 mem->page_shift, flags, vma); gf100_bar_umap()
70 nvkm_vm_map(vma, mem); gf100_bar_umap()
75 gf100_bar_unmap(struct nvkm_bar *bar, struct nvkm_vma *vma) gf100_bar_unmap() argument
77 nvkm_vm_unmap(vma); gf100_bar_unmap()
78 nvkm_vm_put(vma); gf100_bar_unmap()
44 gf100_bar_kmap(struct nvkm_bar *bar, struct nvkm_mem *mem, u32 flags, struct nvkm_vma *vma) gf100_bar_kmap() argument
59 gf100_bar_umap(struct nvkm_bar *bar, struct nvkm_mem *mem, u32 flags, struct nvkm_vma *vma) gf100_bar_umap() argument
/linux-4.1.27/arch/hexagon/include/asm/
H A Dcacheflush.h31 * - flush_cache_range(vma, start, end) flushes a range of pages
34 * - flush_icache_page(vma, pg) flushes(invalidates) a page for icache
44 #define flush_cache_range(vma, start, end) do { } while (0)
45 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
50 #define flush_icache_page(vma, pg) do { } while (0)
51 #define flush_icache_user_range(vma, pg, adr, len) do { } while (0)
86 static inline void update_mmu_cache(struct vm_area_struct *vma, update_mmu_cache() argument
92 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
95 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
/linux-4.1.27/sound/soc/pxa/
H A Dmmp-pcm.c123 struct vm_area_struct *vma) mmp_pcm_mmap()
126 unsigned long off = vma->vm_pgoff; mmp_pcm_mmap()
128 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); mmp_pcm_mmap()
129 return remap_pfn_range(vma, vma->vm_start, mmp_pcm_mmap()
131 vma->vm_end - vma->vm_start, vma->vm_page_prot); mmp_pcm_mmap()
122 mmp_pcm_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) mmp_pcm_mmap() argument
/linux-4.1.27/drivers/gpu/drm/vgem/
H A Dvgem_drv.c89 static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) vgem_gem_fault() argument
91 struct drm_vgem_gem_object *obj = vma->vm_private_data; vgem_gem_fault()
98 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> vgem_gem_fault()
108 ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, vgem_gem_fault()
238 int vgem_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) vgem_drm_gem_mmap() argument
250 vma->vm_pgoff, vgem_drm_gem_mmap()
251 vma_pages(vma)); vgem_drm_gem_mmap()
265 ret = dma_buf_mmap(obj->dma_buf, vma, 0); vgem_drm_gem_mmap()
274 vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP; vgem_drm_gem_mmap()
275 vma->vm_ops = obj->dev->driver->gem_vm_ops; vgem_drm_gem_mmap()
276 vma->vm_private_data = vgem_obj; vgem_drm_gem_mmap()
277 vma->vm_page_prot = vgem_drm_gem_mmap()
278 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); vgem_drm_gem_mmap()
281 drm_gem_vm_open(vma); vgem_drm_gem_mmap()
/linux-4.1.27/arch/sparc/mm/
H A Dfault_32.c170 struct vm_area_struct *vma; do_sparc_fault() local
210 vma = find_vma(mm, address); do_sparc_fault()
211 if (!vma) do_sparc_fault()
213 if (vma->vm_start <= address) do_sparc_fault()
215 if (!(vma->vm_flags & VM_GROWSDOWN)) do_sparc_fault()
217 if (expand_stack(vma, address)) do_sparc_fault()
226 if (!(vma->vm_flags & VM_WRITE)) do_sparc_fault()
230 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) do_sparc_fault()
244 fault = handle_mm_fault(mm, vma, address, flags); do_sparc_fault()
386 struct vm_area_struct *vma; force_user_fault() local
395 vma = find_vma(mm, address); force_user_fault()
396 if (!vma) force_user_fault()
398 if (vma->vm_start <= address) force_user_fault()
400 if (!(vma->vm_flags & VM_GROWSDOWN)) force_user_fault()
402 if (expand_stack(vma, address)) force_user_fault()
407 if (!(vma->vm_flags & VM_WRITE)) force_user_fault()
411 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) force_user_fault()
414 switch (handle_mm_fault(mm, vma, address, flags)) { force_user_fault()
/linux-4.1.27/drivers/uio/
H A Duio.c579 static int uio_find_mem_index(struct vm_area_struct *vma) uio_find_mem_index() argument
581 struct uio_device *idev = vma->vm_private_data; uio_find_mem_index()
583 if (vma->vm_pgoff < MAX_UIO_MAPS) { uio_find_mem_index()
584 if (idev->info->mem[vma->vm_pgoff].size == 0) uio_find_mem_index()
586 return (int)vma->vm_pgoff; uio_find_mem_index()
591 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) uio_vma_fault() argument
593 struct uio_device *idev = vma->vm_private_data; uio_vma_fault()
598 int mi = uio_find_mem_index(vma); uio_vma_fault()
622 static int uio_mmap_logical(struct vm_area_struct *vma) uio_mmap_logical() argument
624 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; uio_mmap_logical()
625 vma->vm_ops = &uio_logical_vm_ops; uio_mmap_logical()
635 static int uio_mmap_physical(struct vm_area_struct *vma) uio_mmap_physical() argument
637 struct uio_device *idev = vma->vm_private_data; uio_mmap_physical()
638 int mi = uio_find_mem_index(vma); uio_mmap_physical()
646 if (vma->vm_end - vma->vm_start > mem->size) uio_mmap_physical()
649 vma->vm_ops = &uio_physical_vm_ops; uio_mmap_physical()
650 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); uio_mmap_physical()
654 * because vma->vm_pgoff is the map index we looked uio_mmap_physical()
661 return remap_pfn_range(vma, uio_mmap_physical()
662 vma->vm_start, uio_mmap_physical()
664 vma->vm_end - vma->vm_start, uio_mmap_physical()
665 vma->vm_page_prot); uio_mmap_physical()
668 static int uio_mmap(struct file *filep, struct vm_area_struct *vma) uio_mmap() argument
676 if (vma->vm_end < vma->vm_start) uio_mmap()
679 vma->vm_private_data = idev; uio_mmap()
681 mi = uio_find_mem_index(vma); uio_mmap()
685 requested_pages = vma_pages(vma); uio_mmap()
692 ret = idev->info->mmap(idev->info, vma); uio_mmap()
698 return uio_mmap_physical(vma); uio_mmap()
701 return uio_mmap_logical(vma); uio_mmap()
/linux-4.1.27/arch/xtensa/kernel/
H A Dsmp.c442 struct vm_area_struct *vma; member in struct:flush_data
470 local_flush_tlb_page(fd->vma, fd->addr1); ipi_flush_tlb_page()
473 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) flush_tlb_page() argument
476 .vma = vma, flush_tlb_page()
485 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); ipi_flush_tlb_range()
488 void flush_tlb_range(struct vm_area_struct *vma, flush_tlb_range() argument
492 .vma = vma, flush_tlb_range()
529 local_flush_cache_page(fd->vma, fd->addr1, fd->addr2); ipi_flush_cache_page()
532 void flush_cache_page(struct vm_area_struct *vma, flush_cache_page() argument
536 .vma = vma, flush_cache_page()
546 local_flush_cache_range(fd->vma, fd->addr1, fd->addr2); ipi_flush_cache_range()
549 void flush_cache_range(struct vm_area_struct *vma, flush_cache_range() argument
553 .vma = vma, flush_cache_range()
/linux-4.1.27/arch/x86/mm/
H A Dhugetlbpage.c27 struct vm_area_struct *vma;
29 vma = find_vma(mm, addr);
30 if (!vma || !is_vm_hugetlb_page(vma))
130 struct vm_area_struct *vma; hugetlb_get_unmapped_area() local
145 vma = find_vma(mm, addr); hugetlb_get_unmapped_area()
147 (!vma || addr + len <= vma->vm_start)) hugetlb_get_unmapped_area()
/linux-4.1.27/arch/score/mm/
H A Dtlb-score.c80 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, local_flush_tlb_range() argument
83 struct mm_struct *mm = vma->vm_mm; local_flush_tlb_range()
159 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) local_flush_tlb_page() argument
161 if (vma && vma->vm_mm->context != 0) { local_flush_tlb_page()
164 unsigned long vma_ASID = vma->vm_mm->context; local_flush_tlb_page()
213 void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) __update_tlb() argument
221 if (current->active_mm != vma->vm_mm) __update_tlb()
/linux-4.1.27/arch/ia64/pci/
H A Dpci.c590 pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma, pci_mmap_page_range() argument
593 unsigned long size = vma->vm_end - vma->vm_start; pci_mmap_page_range()
609 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) pci_mmap_page_range()
612 prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size, pci_mmap_page_range()
613 vma->vm_page_prot); pci_mmap_page_range()
623 efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start)) pci_mmap_page_range()
624 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); pci_mmap_page_range()
626 vma->vm_page_prot = prot; pci_mmap_page_range()
628 if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, pci_mmap_page_range()
629 vma->vm_end - vma->vm_start, vma->vm_page_prot)) pci_mmap_page_range()
655 * @vma: vma passed in by mmap
661 pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma, pci_mmap_legacy_page_range() argument
664 unsigned long size = vma->vm_end - vma->vm_start; pci_mmap_legacy_page_range()
676 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) pci_mmap_legacy_page_range()
678 prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size, pci_mmap_legacy_page_range()
679 vma->vm_page_prot); pci_mmap_legacy_page_range()
685 vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT; pci_mmap_legacy_page_range()
686 vma->vm_page_prot = prot; pci_mmap_legacy_page_range()
688 if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, pci_mmap_legacy_page_range()
689 size, vma->vm_page_prot)) pci_mmap_legacy_page_range()

Completed in 5200 milliseconds

12345