Home
last modified time | relevance | path

Searched refs:vma (Results 1 – 200 of 848) sorted by relevance

12345

/linux-4.1.27/drivers/gpu/drm/
Ddrm_vm.c49 struct vm_area_struct *vma; member
53 static void drm_vm_open(struct vm_area_struct *vma);
54 static void drm_vm_close(struct vm_area_struct *vma);
57 struct vm_area_struct *vma) in drm_io_prot() argument
59 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); in drm_io_prot()
67 if (efi_range_is_wc(vma->vm_start, vma->vm_end - in drm_io_prot()
68 vma->vm_start)) in drm_io_prot()
78 static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma) in drm_dma_prot() argument
80 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); in drm_dma_prot()
99 static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) in drm_do_vm_fault() argument
[all …]
Ddrm_gem_cma_helper.c318 struct vm_area_struct *vma) in drm_gem_cma_mmap_obj() argument
327 vma->vm_flags &= ~VM_PFNMAP; in drm_gem_cma_mmap_obj()
328 vma->vm_pgoff = 0; in drm_gem_cma_mmap_obj()
330 ret = dma_mmap_writecombine(cma_obj->base.dev->dev, vma, in drm_gem_cma_mmap_obj()
332 vma->vm_end - vma->vm_start); in drm_gem_cma_mmap_obj()
334 drm_gem_vm_close(vma); in drm_gem_cma_mmap_obj()
354 int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma) in drm_gem_cma_mmap() argument
360 ret = drm_gem_mmap(filp, vma); in drm_gem_cma_mmap()
364 gem_obj = vma->vm_private_data; in drm_gem_cma_mmap()
367 return drm_gem_cma_mmap_obj(cma_obj, vma); in drm_gem_cma_mmap()
[all …]
Ddrm_gem.c776 void drm_gem_vm_open(struct vm_area_struct *vma) in drm_gem_vm_open() argument
778 struct drm_gem_object *obj = vma->vm_private_data; in drm_gem_vm_open()
783 drm_vm_open_locked(obj->dev, vma); in drm_gem_vm_open()
788 void drm_gem_vm_close(struct vm_area_struct *vma) in drm_gem_vm_close() argument
790 struct drm_gem_object *obj = vma->vm_private_data; in drm_gem_vm_close()
794 drm_vm_close_locked(obj->dev, vma); in drm_gem_vm_close()
827 struct vm_area_struct *vma) in drm_gem_mmap_obj() argument
834 if (obj_size < vma->vm_end - vma->vm_start) in drm_gem_mmap_obj()
840 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; in drm_gem_mmap_obj()
841 vma->vm_ops = dev->driver->gem_vm_ops; in drm_gem_mmap_obj()
[all …]
/linux-4.1.27/mm/
Dmmap.c61 struct vm_area_struct *vma, struct vm_area_struct *prev,
98 void vma_set_page_prot(struct vm_area_struct *vma) in vma_set_page_prot() argument
100 unsigned long vm_flags = vma->vm_flags; in vma_set_page_prot()
102 vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); in vma_set_page_prot()
103 if (vma_wants_writenotify(vma)) { in vma_set_page_prot()
105 vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, in vma_set_page_prot()
237 static void __remove_shared_vm_struct(struct vm_area_struct *vma, in __remove_shared_vm_struct() argument
240 if (vma->vm_flags & VM_DENYWRITE) in __remove_shared_vm_struct()
242 if (vma->vm_flags & VM_SHARED) in __remove_shared_vm_struct()
246 vma_interval_tree_remove(vma, &mapping->i_mmap); in __remove_shared_vm_struct()
[all …]
Dmremap.c52 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, in alloc_new_pmd() argument
88 static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, in move_ptes() argument
95 struct mm_struct *mm = vma->vm_mm; in move_ptes()
118 if (vma->vm_file) { in move_ptes()
119 mapping = vma->vm_file->f_mapping; in move_ptes()
122 if (vma->anon_vma) { in move_ptes()
123 anon_vma = vma->anon_vma; in move_ptes()
162 unsigned long move_page_tables(struct vm_area_struct *vma, in move_page_tables() argument
174 flush_cache_range(vma, old_addr, old_end); in move_page_tables()
178 mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); in move_page_tables()
[all …]
Dnommu.c137 struct vm_area_struct *vma; in kobjsize() local
139 vma = find_vma(current->mm, (unsigned long)objp); in kobjsize()
140 if (vma) in kobjsize()
141 return vma->vm_end - vma->vm_start; in kobjsize()
156 struct vm_area_struct *vma; in __get_user_pages() local
169 vma = find_vma(mm, start); in __get_user_pages()
170 if (!vma) in __get_user_pages()
174 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) || in __get_user_pages()
175 !(vm_flags & vma->vm_flags)) in __get_user_pages()
184 vmas[i] = vma; in __get_user_pages()
[all …]
Dmadvise.c45 static long madvise_behavior(struct vm_area_struct *vma, in madvise_behavior() argument
49 struct mm_struct *mm = vma->vm_mm; in madvise_behavior()
52 unsigned long new_flags = vma->vm_flags; in madvise_behavior()
68 if (vma->vm_flags & VM_IO) { in madvise_behavior()
86 error = ksm_madvise(vma, start, end, behavior, &new_flags); in madvise_behavior()
92 error = hugepage_madvise(vma, &new_flags, behavior); in madvise_behavior()
98 if (new_flags == vma->vm_flags) { in madvise_behavior()
99 *prev = vma; in madvise_behavior()
103 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); in madvise_behavior()
104 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, in madvise_behavior()
[all …]
Dmprotect.c39 static pte_t *lock_pte_protection(struct vm_area_struct *vma, pmd_t *pmd, in lock_pte_protection() argument
47 return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); in lock_pte_protection()
49 pmdl = pmd_lock(vma->vm_mm, pmd); in lock_pte_protection()
55 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); in lock_pte_protection()
60 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, in change_pte_range() argument
64 struct mm_struct *mm = vma->vm_mm; in change_pte_range()
69 pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl); in change_pte_range()
87 page = vm_normal_page(vma, addr, oldpte); in change_pte_range()
104 !(vma->vm_flags & VM_SOFTDIRTY))) { in change_pte_range()
134 static inline unsigned long change_pmd_range(struct vm_area_struct *vma, in change_pmd_range() argument
[all …]
Dmemory.c528 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, in free_pgtables() argument
531 while (vma) { in free_pgtables()
532 struct vm_area_struct *next = vma->vm_next; in free_pgtables()
533 unsigned long addr = vma->vm_start; in free_pgtables()
539 unlink_anon_vmas(vma); in free_pgtables()
540 unlink_file_vma(vma); in free_pgtables()
542 if (is_vm_hugetlb_page(vma)) { in free_pgtables()
543 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables()
549 while (next && next->vm_start <= vma->vm_end + PMD_SIZE in free_pgtables()
551 vma = next; in free_pgtables()
[all …]
Drmap.c127 static void anon_vma_chain_link(struct vm_area_struct *vma, in anon_vma_chain_link() argument
131 avc->vma = vma; in anon_vma_chain_link()
133 list_add(&avc->same_vma, &vma->anon_vma_chain); in anon_vma_chain_link()
164 int anon_vma_prepare(struct vm_area_struct *vma) in anon_vma_prepare() argument
166 struct anon_vma *anon_vma = vma->anon_vma; in anon_vma_prepare()
171 struct mm_struct *mm = vma->vm_mm; in anon_vma_prepare()
178 anon_vma = find_mergeable_anon_vma(vma); in anon_vma_prepare()
190 if (likely(!vma->anon_vma)) { in anon_vma_prepare()
191 vma->anon_vma = anon_vma; in anon_vma_prepare()
192 anon_vma_chain_link(vma, avc, anon_vma); in anon_vma_prepare()
[all …]
Dpagewalk.c38 if (pmd_none(*pmd) || !walk->vma) { in walk_pmd_range()
135 struct vm_area_struct *vma = walk->vma; in walk_hugetlb_range() local
136 struct hstate *h = hstate_vma(vma); in walk_hugetlb_range()
172 struct vm_area_struct *vma = walk->vma; in walk_page_test() local
185 if (vma->vm_flags & VM_PFNMAP) { in walk_page_test()
198 struct vm_area_struct *vma = walk->vma; in __walk_page_range() local
200 if (vma && is_vm_hugetlb_page(vma)) { in __walk_page_range()
244 struct vm_area_struct *vma; in walk_page_range() local
254 vma = find_vma(walk->mm, start); in walk_page_range()
256 if (!vma) { /* after the last vma */ in walk_page_range()
[all …]
Dpgtable-generic.c47 int ptep_set_access_flags(struct vm_area_struct *vma, in ptep_set_access_flags() argument
53 set_pte_at(vma->vm_mm, address, ptep, entry); in ptep_set_access_flags()
54 flush_tlb_fix_spurious_fault(vma, address); in ptep_set_access_flags()
61 int pmdp_set_access_flags(struct vm_area_struct *vma, in pmdp_set_access_flags() argument
69 set_pmd_at(vma->vm_mm, address, pmdp, entry); in pmdp_set_access_flags()
70 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); in pmdp_set_access_flags()
81 int ptep_clear_flush_young(struct vm_area_struct *vma, in ptep_clear_flush_young() argument
85 young = ptep_test_and_clear_young(vma, address, ptep); in ptep_clear_flush_young()
87 flush_tlb_page(vma, address); in ptep_clear_flush_young()
93 int pmdp_clear_flush_young(struct vm_area_struct *vma, in pmdp_clear_flush_young() argument
[all …]
Dmlock.c360 struct vm_area_struct *vma, int zoneid, unsigned long start, in __munlock_pagevec_fill() argument
371 pte = get_locked_pte(vma->vm_mm, start, &ptl); in __munlock_pagevec_fill()
383 page = vm_normal_page(vma, start, *pte); in __munlock_pagevec_fill()
422 void munlock_vma_pages_range(struct vm_area_struct *vma, in munlock_vma_pages_range() argument
425 vma->vm_flags &= ~VM_LOCKED; in munlock_vma_pages_range()
443 page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP, in munlock_vma_pages_range()
474 start = __munlock_pagevec_fill(&pvec, vma, in munlock_vma_pages_range()
498 static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, in mlock_fixup() argument
501 struct mm_struct *mm = vma->vm_mm; in mlock_fixup()
507 if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) || in mlock_fixup()
[all …]
Dgup.c19 static struct page *no_page_table(struct vm_area_struct *vma, in no_page_table() argument
30 if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault)) in no_page_table()
35 static struct page *follow_page_pte(struct vm_area_struct *vma, in follow_page_pte() argument
38 struct mm_struct *mm = vma->vm_mm; in follow_page_pte()
45 return no_page_table(vma, flags); in follow_page_pte()
74 page = vm_normal_page(vma, address, pte); in follow_page_pte()
95 if ((flags & FOLL_POPULATE) && (vma->vm_flags & VM_LOCKED)) { in follow_page_pte()
127 return no_page_table(vma, flags); in follow_page_pte()
143 struct page *follow_page_mask(struct vm_area_struct *vma, in follow_page_mask() argument
152 struct mm_struct *mm = vma->vm_mm; in follow_page_mask()
[all …]
Dmsync.c35 struct vm_area_struct *vma; in SYSCALL_DEFINE3() local
58 vma = find_vma(mm, start); in SYSCALL_DEFINE3()
65 if (!vma) in SYSCALL_DEFINE3()
68 if (start < vma->vm_start) { in SYSCALL_DEFINE3()
69 start = vma->vm_start; in SYSCALL_DEFINE3()
76 (vma->vm_flags & VM_LOCKED)) { in SYSCALL_DEFINE3()
80 file = vma->vm_file; in SYSCALL_DEFINE3()
81 fstart = (start - vma->vm_start) + in SYSCALL_DEFINE3()
82 ((loff_t)vma->vm_pgoff << PAGE_SHIFT); in SYSCALL_DEFINE3()
83 fend = fstart + (min(end, vma->vm_end) - start) - 1; in SYSCALL_DEFINE3()
[all …]
Dhuge_memory.c702 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) in maybe_pmd_mkwrite() argument
704 if (likely(vma->vm_flags & VM_WRITE)) in maybe_pmd_mkwrite()
718 struct vm_area_struct *vma, in __do_huge_pmd_anonymous_page() argument
753 entry = mk_huge_pmd(page, vma->vm_page_prot); in __do_huge_pmd_anonymous_page()
754 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); in __do_huge_pmd_anonymous_page()
755 page_add_new_anon_rmap(page, vma, haddr); in __do_huge_pmd_anonymous_page()
757 lru_cache_add_active_or_unevictable(page, vma); in __do_huge_pmd_anonymous_page()
775 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, in set_huge_zero_page() argument
781 entry = mk_pmd(zero_page, vma->vm_page_prot); in set_huge_zero_page()
789 int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, in do_huge_pmd_anonymous_page() argument
[all …]
Dhugetlb.c211 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) in subpool_vma() argument
213 return subpool_inode(file_inode(vma->vm_file)); in subpool_vma()
401 struct vm_area_struct *vma, unsigned long address) in vma_hugecache_offset() argument
403 return ((address - vma->vm_start) >> huge_page_shift(h)) + in vma_hugecache_offset()
404 (vma->vm_pgoff >> huge_page_order(h)); in vma_hugecache_offset()
407 pgoff_t linear_hugepage_index(struct vm_area_struct *vma, in linear_hugepage_index() argument
410 return vma_hugecache_offset(hstate_vma(vma), vma, address); in linear_hugepage_index()
417 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) in vma_kernel_pagesize() argument
421 if (!is_vm_hugetlb_page(vma)) in vma_kernel_pagesize()
424 hstate = hstate_vma(vma); in vma_kernel_pagesize()
[all …]
Dmempolicy.c445 struct vm_area_struct *vma; in mpol_rebind_mm() local
448 for (vma = mm->mmap; vma; vma = vma->vm_next) in mpol_rebind_mm()
449 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE); in mpol_rebind_mm()
488 struct vm_area_struct *vma = walk->vma; in queue_pages_pte_range() local
496 split_huge_page_pmd(vma, addr, pmd); in queue_pages_pte_range()
504 page = vm_normal_page(vma, addr, *pte); in queue_pages_pte_range()
537 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); in queue_pages_hugetlb()
567 unsigned long change_prot_numa(struct vm_area_struct *vma, in change_prot_numa() argument
572 nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1); in change_prot_numa()
579 static unsigned long change_prot_numa(struct vm_area_struct *vma, in change_prot_numa() argument
[all …]
Dvmacache.c96 struct vm_area_struct *vma = current->vmacache[i]; in vmacache_find() local
98 if (!vma) in vmacache_find()
100 if (WARN_ON_ONCE(vma->vm_mm != mm)) in vmacache_find()
102 if (vma->vm_start <= addr && vma->vm_end > addr) { in vmacache_find()
104 return vma; in vmacache_find()
124 struct vm_area_struct *vma = current->vmacache[i]; in vmacache_find_exact() local
126 if (vma && vma->vm_start == start && vma->vm_end == end) { in vmacache_find_exact()
128 return vma; in vmacache_find_exact()
Dksm.c363 static int break_ksm(struct vm_area_struct *vma, unsigned long addr) in break_ksm() argument
370 page = follow_page(vma, addr, FOLL_GET | FOLL_MIGRATION); in break_ksm()
374 ret = handle_mm_fault(vma->vm_mm, vma, addr, in break_ksm()
414 struct vm_area_struct *vma; in find_mergeable_vma() local
417 vma = find_vma(mm, addr); in find_mergeable_vma()
418 if (!vma || vma->vm_start > addr) in find_mergeable_vma()
420 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) in find_mergeable_vma()
422 return vma; in find_mergeable_vma()
429 struct vm_area_struct *vma; in break_cow() local
438 vma = find_mergeable_vma(mm, addr); in break_cow()
[all …]
Dmincore.c85 struct vm_area_struct *vma, unsigned char *vec) in __mincore_unmapped_range() argument
90 if (vma->vm_file) { in __mincore_unmapped_range()
93 pgoff = linear_page_index(vma, addr); in __mincore_unmapped_range()
95 vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff); in __mincore_unmapped_range()
107 walk->vma, walk->private); in mincore_unmapped_range()
115 struct vm_area_struct *vma = walk->vma; in mincore_pte_range() local
120 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { in mincore_pte_range()
127 __mincore_unmapped_range(addr, end, vma, vec); in mincore_pte_range()
137 vma, vec); in mincore_pte_range()
175 struct vm_area_struct *vma; in do_mincore() local
[all …]
Ddebug.c154 void dump_vma(const struct vm_area_struct *vma) in dump_vma() argument
160 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next, in dump_vma()
161 vma->vm_prev, vma->vm_mm, in dump_vma()
162 (unsigned long)pgprot_val(vma->vm_page_prot), in dump_vma()
163 vma->anon_vma, vma->vm_ops, vma->vm_pgoff, in dump_vma()
164 vma->vm_file, vma->vm_private_data); in dump_vma()
165 dump_flags(vma->vm_flags, vmaflags_names, ARRAY_SIZE(vmaflags_names)); in dump_vma()
/linux-4.1.27/drivers/media/v4l2-core/
Dvideobuf2-memops.c36 struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma) in vb2_get_vma() argument
44 if (vma->vm_ops && vma->vm_ops->open) in vb2_get_vma()
45 vma->vm_ops->open(vma); in vb2_get_vma()
47 if (vma->vm_file) in vb2_get_vma()
48 get_file(vma->vm_file); in vb2_get_vma()
50 memcpy(vma_copy, vma, sizeof(*vma)); in vb2_get_vma()
67 void vb2_put_vma(struct vm_area_struct *vma) in vb2_put_vma() argument
69 if (!vma) in vb2_put_vma()
72 if (vma->vm_ops && vma->vm_ops->close) in vb2_put_vma()
73 vma->vm_ops->close(vma); in vb2_put_vma()
[all …]
Dvideobuf-dma-contig.c66 static void videobuf_vm_open(struct vm_area_struct *vma) in videobuf_vm_open() argument
68 struct videobuf_mapping *map = vma->vm_private_data; in videobuf_vm_open()
71 map, map->count, vma->vm_start, vma->vm_end); in videobuf_vm_open()
76 static void videobuf_vm_close(struct vm_area_struct *vma) in videobuf_vm_close() argument
78 struct videobuf_mapping *map = vma->vm_private_data; in videobuf_vm_close()
83 map, map->count, vma->vm_start, vma->vm_end); in videobuf_vm_close()
164 struct vm_area_struct *vma; in videobuf_dma_contig_user_get() local
176 vma = find_vma(mm, vb->baddr); in videobuf_dma_contig_user_get()
177 if (!vma) in videobuf_dma_contig_user_get()
180 if ((vb->baddr + mem->size) > vma->vm_end) in videobuf_dma_contig_user_get()
[all …]
Dvideobuf-vmalloc.c54 static void videobuf_vm_open(struct vm_area_struct *vma) in videobuf_vm_open() argument
56 struct videobuf_mapping *map = vma->vm_private_data; in videobuf_vm_open()
59 map->count, vma->vm_start, vma->vm_end); in videobuf_vm_open()
64 static void videobuf_vm_close(struct vm_area_struct *vma) in videobuf_vm_close() argument
66 struct videobuf_mapping *map = vma->vm_private_data; in videobuf_vm_close()
71 map->count, vma->vm_start, vma->vm_end); in videobuf_vm_close()
212 rc = remap_vmalloc_range(mem->vma, (void *)vb->baddr, 0); in __videobuf_iolock()
234 struct vm_area_struct *vma) in __videobuf_mmap_mapper() argument
250 buf->baddr = vma->vm_start; in __videobuf_mmap_mapper()
256 pages = PAGE_ALIGN(vma->vm_end - vma->vm_start); in __videobuf_mmap_mapper()
[all …]
Dvideobuf2-dma-contig.c42 struct vm_area_struct *vma; member
191 static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma) in vb2_dc_mmap() argument
205 vma->vm_pgoff = 0; in vb2_dc_mmap()
207 ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr, in vb2_dc_mmap()
215 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; in vb2_dc_mmap()
216 vma->vm_private_data = &buf->handler; in vb2_dc_mmap()
217 vma->vm_ops = &vb2_common_vm_ops; in vb2_dc_mmap()
219 vma->vm_ops->open(vma); in vb2_dc_mmap()
222 __func__, (unsigned long)buf->dma_addr, vma->vm_start, in vb2_dc_mmap()
362 struct vm_area_struct *vma) in vb2_dc_dmabuf_ops_mmap() argument
[all …]
Dvideobuf2-vmalloc.c27 struct vm_area_struct *vma; member
81 struct vm_area_struct *vma; in vb2_vmalloc_get_userptr() local
93 vma = find_vma(current->mm, vaddr); in vb2_vmalloc_get_userptr()
94 if (vma && (vma->vm_flags & VM_PFNMAP) && (vma->vm_pgoff)) { in vb2_vmalloc_get_userptr()
95 if (vb2_get_contig_userptr(vaddr, size, &vma, &physp)) in vb2_vmalloc_get_userptr()
97 buf->vma = vma; in vb2_vmalloc_get_userptr()
157 vb2_put_vma(buf->vma); in vb2_vmalloc_put_userptr()
182 static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma) in vb2_vmalloc_mmap() argument
192 ret = remap_vmalloc_range(vma, buf->vaddr, 0); in vb2_vmalloc_mmap()
201 vma->vm_flags |= VM_DONTEXPAND; in vb2_vmalloc_mmap()
[all …]
Dvideobuf2-dma-sg.c54 struct vm_area_struct *vma; member
227 static inline int vma_is_io(struct vm_area_struct *vma) in vma_is_io() argument
229 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP)); in vma_is_io()
240 struct vm_area_struct *vma; in vb2_dma_sg_get_userptr() local
266 vma = find_vma(current->mm, vaddr); in vb2_dma_sg_get_userptr()
267 if (!vma) { in vb2_dma_sg_get_userptr()
272 if (vma->vm_end < vaddr + size) { in vb2_dma_sg_get_userptr()
278 buf->vma = vb2_get_vma(vma); in vb2_dma_sg_get_userptr()
279 if (!buf->vma) { in vb2_dma_sg_get_userptr()
284 if (vma_is_io(buf->vma)) { in vb2_dma_sg_get_userptr()
[all …]
/linux-4.1.27/drivers/infiniband/hw/ehca/
Dehca_uverbs.c71 static void ehca_mm_open(struct vm_area_struct *vma) in ehca_mm_open() argument
73 u32 *count = (u32 *)vma->vm_private_data; in ehca_mm_open()
76 vma->vm_start, vma->vm_end); in ehca_mm_open()
82 vma->vm_start, vma->vm_end); in ehca_mm_open()
84 vma->vm_start, vma->vm_end, *count); in ehca_mm_open()
87 static void ehca_mm_close(struct vm_area_struct *vma) in ehca_mm_close() argument
89 u32 *count = (u32 *)vma->vm_private_data; in ehca_mm_close()
92 vma->vm_start, vma->vm_end); in ehca_mm_close()
97 vma->vm_start, vma->vm_end, *count); in ehca_mm_close()
105 static int ehca_mmap_fw(struct vm_area_struct *vma, struct h_galpas *galpas, in ehca_mmap_fw() argument
[all …]
/linux-4.1.27/drivers/gpu/drm/i915/
Di915_gem_evict.c37 mark_free(struct i915_vma *vma, struct list_head *unwind) in mark_free() argument
39 if (vma->pin_count) in mark_free()
42 if (WARN_ON(!list_empty(&vma->exec_list))) in mark_free()
45 list_add(&vma->exec_list, unwind); in mark_free()
46 return drm_mm_scan_add_block(&vma->node); in mark_free()
80 struct i915_vma *vma; in i915_gem_evict_something() local
119 list_for_each_entry(vma, &vm->inactive_list, mm_list) { in i915_gem_evict_something()
120 if (mark_free(vma, &unwind_list)) in i915_gem_evict_something()
128 list_for_each_entry(vma, &vm->active_list, mm_list) { in i915_gem_evict_something()
129 if (mark_free(vma, &unwind_list)) in i915_gem_evict_something()
[all …]
Di915_gem_execbuffer.c132 struct i915_vma *vma; in eb_lookup_vmas() local
146 vma = i915_gem_obj_lookup_or_create_vma(obj, vm); in eb_lookup_vmas()
147 if (IS_ERR(vma)) { in eb_lookup_vmas()
149 ret = PTR_ERR(vma); in eb_lookup_vmas()
154 list_add_tail(&vma->exec_list, &eb->vmas); in eb_lookup_vmas()
157 vma->exec_entry = &exec[i]; in eb_lookup_vmas()
159 eb->lut[i] = vma; in eb_lookup_vmas()
162 vma->exec_handle = handle; in eb_lookup_vmas()
163 hlist_add_head(&vma->exec_node, in eb_lookup_vmas()
200 struct i915_vma *vma; in eb_get_vma() local
[all …]
Di915_gem_gtt.c149 static void ppgtt_bind_vma(struct i915_vma *vma,
152 static void ppgtt_unbind_vma(struct i915_vma *vma);
1539 ppgtt_bind_vma(struct i915_vma *vma, in ppgtt_bind_vma() argument
1544 if (vma->obj->gt_ro) in ppgtt_bind_vma()
1547 vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start, in ppgtt_bind_vma()
1551 static void ppgtt_unbind_vma(struct i915_vma *vma) in ppgtt_unbind_vma() argument
1553 vma->vm->clear_range(vma->vm, in ppgtt_unbind_vma()
1554 vma->node.start, in ppgtt_unbind_vma()
1555 vma->obj->base.size, in ppgtt_unbind_vma()
1671 struct i915_vma *vma = i915_gem_obj_to_vma(obj, in i915_gem_restore_gtt_mappings() local
[all …]
Di915_gem.c287 struct i915_vma *vma, *next; in drop_pages() local
291 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) in drop_pages()
292 if (i915_vma_unbind(vma)) in drop_pages()
1578 struct vm_area_struct *vma; in i915_gem_mmap_ioctl() local
1581 vma = find_vma(mm, addr); in i915_gem_mmap_ioctl()
1582 if (vma) in i915_gem_mmap_ioctl()
1583 vma->vm_page_prot = in i915_gem_mmap_ioctl()
1584 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); in i915_gem_mmap_ioctl()
1614 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) in i915_gem_fault() argument
1616 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data); in i915_gem_fault()
[all …]
/linux-4.1.27/drivers/xen/
Dprivcmd.c47 struct vm_area_struct *vma,
198 struct vm_area_struct *vma; member
206 struct vm_area_struct *vma = st->vma; in mmap_mfn_range() local
216 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end)) in mmap_mfn_range()
219 rc = xen_remap_domain_mfn_range(vma, in mmap_mfn_range()
222 vma->vm_page_prot, in mmap_mfn_range()
236 struct vm_area_struct *vma; in privcmd_ioctl_mmap() local
262 vma = find_vma(mm, msg->va); in privcmd_ioctl_mmap()
265 if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data) in privcmd_ioctl_mmap()
267 vma->vm_private_data = PRIV_VMA_LOCKED; in privcmd_ioctl_mmap()
[all …]
Dgntdev.c84 struct vm_area_struct *vma; member
241 unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT; in find_grant_ptes()
397 static void gntdev_vma_open(struct vm_area_struct *vma) in gntdev_vma_open() argument
399 struct grant_map *map = vma->vm_private_data; in gntdev_vma_open()
401 pr_debug("gntdev_vma_open %p\n", vma); in gntdev_vma_open()
405 static void gntdev_vma_close(struct vm_area_struct *vma) in gntdev_vma_close() argument
407 struct grant_map *map = vma->vm_private_data; in gntdev_vma_close()
408 struct file *file = vma->vm_file; in gntdev_vma_close()
411 pr_debug("gntdev_vma_close %p\n", vma); in gntdev_vma_close()
421 map->vma = NULL; in gntdev_vma_close()
[all …]
Dxlate_mmu.c68 struct vm_area_struct *vma; member
88 set_pte_at(info->vma->vm_mm, addr, ptep, pte); in remap_pte_fn()
96 int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, in xen_xlate_remap_gfn_array() argument
109 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO))); in xen_xlate_remap_gfn_array()
114 data.vma = vma; in xen_xlate_remap_gfn_array()
120 err = apply_to_page_range(vma->vm_mm, addr, range, in xen_xlate_remap_gfn_array()
126 int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma, in xen_xlate_unmap_gfn_range() argument
/linux-4.1.27/drivers/staging/lustre/lustre/llite/
Dllite_mmap.c57 struct vm_area_struct *vma, unsigned long addr, in policy_from_vma() argument
60 policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) + in policy_from_vma()
61 (vma->vm_pgoff << PAGE_CACHE_SHIFT); in policy_from_vma()
69 struct vm_area_struct *vma, *ret = NULL; in our_vma() local
74 for (vma = find_vma(mm, addr); in our_vma()
75 vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) { in our_vma()
76 if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops && in our_vma()
77 vma->vm_flags & VM_SHARED) { in our_vma()
78 ret = vma; in our_vma()
98 ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret, in ll_fault_io_init() argument
[all …]
/linux-4.1.27/drivers/gpu/drm/ttm/
Dttm_bo_vm.c45 struct vm_area_struct *vma, in ttm_bo_vm_fault_idle() argument
69 up_read(&vma->vm_mm->mmap_sem); in ttm_bo_vm_fault_idle()
86 static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) in ttm_bo_vm_fault() argument
89 vma->vm_private_data; in ttm_bo_vm_fault()
117 up_read(&vma->vm_mm->mmap_sem); in ttm_bo_vm_fault()
160 ret = ttm_bo_vm_fault_idle(bo, vma, vmf); in ttm_bo_vm_fault()
177 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + in ttm_bo_vm_fault()
178 vma->vm_pgoff - drm_vma_node_start(&bo->vma_node); in ttm_bo_vm_fault()
179 page_last = vma_pages(vma) + vma->vm_pgoff - in ttm_bo_vm_fault()
192 cvma = *vma; in ttm_bo_vm_fault()
[all …]
/linux-4.1.27/include/linux/
Dhuge_mm.h5 struct vm_area_struct *vma,
10 struct vm_area_struct *vma);
12 struct vm_area_struct *vma,
15 extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
18 extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
23 struct vm_area_struct *vma,
25 extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
28 extern int move_huge_pmd(struct vm_area_struct *vma,
33 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
68 extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
[all …]
Dmm.h247 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
248 void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf);
252 int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
255 int (*pfn_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
260 int (*access)(struct vm_area_struct *vma, unsigned long addr,
266 const char *(*name)(struct vm_area_struct *vma);
276 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
288 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
296 struct page *(*find_special_page)(struct vm_area_struct *vma,
610 static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) in maybe_mkwrite() argument
[all …]
Dmempolicy.h93 #define vma_policy(vma) ((vma)->vm_policy) argument
131 struct vm_area_struct *vma,
138 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
140 bool vma_policy_mof(struct vm_area_struct *vma);
148 extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
175 static inline int vma_migratable(struct vm_area_struct *vma) in vma_migratable() argument
177 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) in vma_migratable()
181 if (vma->vm_flags & VM_HUGETLB) in vma_migratable()
190 if (vma->vm_file && in vma_migratable()
191 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) in vma_migratable()
[all …]
Dkhugepaged.h9 extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
39 static inline int khugepaged_enter(struct vm_area_struct *vma, in khugepaged_enter() argument
42 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags)) in khugepaged_enter()
46 if (__khugepaged_enter(vma->vm_mm)) in khugepaged_enter()
58 static inline int khugepaged_enter(struct vm_area_struct *vma, in khugepaged_enter() argument
63 static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma, in khugepaged_enter_vma_merge() argument
Drmap.h74 struct vm_area_struct *vma; member
138 static inline void anon_vma_merge(struct vm_area_struct *vma, in anon_vma_merge() argument
141 VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma); in anon_vma_merge()
219 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
232 int (*rmap_one)(struct page *page, struct vm_area_struct *vma,
236 bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
244 #define anon_vma_prepare(vma) (0) argument
245 #define anon_vma_link(vma) do {} while (0) argument
Dhugetlb.h51 void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
68 struct vm_area_struct *vma,
71 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
78 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
81 struct vm_area_struct *vma,
111 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
116 static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) in reset_vma_resv_huge_pages() argument
127 #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; }) argument
142 #define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; }) argument
155 static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma, in hugetlb_change_protection() argument
[all …]
/linux-4.1.27/fs/proc/
Dtask_mmu.c127 m_next_vma(struct proc_maps_private *priv, struct vm_area_struct *vma) in m_next_vma() argument
129 if (vma == priv->tail_vma) in m_next_vma()
131 return vma->vm_next ?: priv->tail_vma; in m_next_vma()
134 static void m_cache_vma(struct seq_file *m, struct vm_area_struct *vma) in m_cache_vma() argument
137 m->version = m_next_vma(m->private, vma) ? vma->vm_start : -1UL; in m_cache_vma()
145 struct vm_area_struct *vma; in m_start() local
165 vma = find_vma(mm, last_addr); in m_start()
166 if (vma && (vma = m_next_vma(priv, vma))) in m_start()
167 return vma; in m_start()
172 for (vma = mm->mmap; pos; pos--) { in m_start()
[all …]
Dtask_nommu.c20 struct vm_area_struct *vma; in task_mem() local
27 vma = rb_entry(p, struct vm_area_struct, vm_rb); in task_mem()
29 bytes += kobjsize(vma); in task_mem()
31 region = vma->vm_region; in task_mem()
36 size = vma->vm_end - vma->vm_start; in task_mem()
40 vma->vm_flags & VM_MAYSHARE) { in task_mem()
45 slack = region->vm_end - vma->vm_end; in task_mem()
82 struct vm_area_struct *vma; in task_vsize() local
88 vma = rb_entry(p, struct vm_area_struct, vm_rb); in task_vsize()
89 vsize += vma->vm_end - vma->vm_start; in task_vsize()
[all …]
Dvmcore.c160 int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma, in remap_oldmem_pfn_range() argument
164 return remap_pfn_range(vma, from, pfn, size, prot); in remap_oldmem_pfn_range()
266 static int mmap_vmcore_fault(struct vm_area_struct *vma, struct vm_fault *vmf) in mmap_vmcore_fault() argument
269 struct address_space *mapping = vma->vm_file->f_mapping; in mmap_vmcore_fault()
343 static int remap_oldmem_pfn_checked(struct vm_area_struct *vma, in remap_oldmem_pfn_checked() argument
365 if (remap_oldmem_pfn_range(vma, from + len, in remap_oldmem_pfn_checked()
372 if (remap_oldmem_pfn_range(vma, from + len, in remap_oldmem_pfn_checked()
383 if (remap_oldmem_pfn_range(vma, from + len, pos_start, in remap_oldmem_pfn_checked()
389 do_munmap(vma->vm_mm, from, len); in remap_oldmem_pfn_checked()
393 static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma, in vmcore_remap_oldmem_pfn() argument
[all …]
/linux-4.1.27/arch/m32r/include/asm/
Dcacheflush.h13 #define flush_cache_range(vma, start, end) do { } while (0) argument
14 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) argument
21 #define flush_icache_page(vma,pg) _flush_cache_copyback_all() argument
22 #define flush_icache_user_range(vma,pg,adr,len) _flush_cache_copyback_all() argument
27 #define flush_icache_page(vma,pg) smp_flush_cache_all() argument
28 #define flush_icache_user_range(vma,pg,adr,len) smp_flush_cache_all() argument
35 #define flush_cache_range(vma, start, end) do { } while (0) argument
36 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) argument
42 #define flush_icache_page(vma,pg) _flush_cache_all() argument
43 #define flush_icache_user_range(vma,pg,adr,len) _flush_cache_all() argument
[all …]
Dtlbflush.h27 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) argument
28 #define flush_tlb_range(vma, start, end) \ argument
29 local_flush_tlb_range(vma, start, end)
34 #define flush_tlb_page(vma, vmaddr) do { } while (0) argument
35 #define flush_tlb_range(vma, start, end) do { } while (0) argument
46 #define flush_tlb_page(vma, page) smp_flush_tlb_page(vma, page) argument
47 #define flush_tlb_range(vma, start, end) \ argument
48 smp_flush_tlb_range(vma, start, end)
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
Dbase.c30 nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node) in nvkm_vm_map_at() argument
32 struct nvkm_vm *vm = vma->vm; in nvkm_vm_map_at()
35 int big = vma->node->type != mmu->spg_shift; in nvkm_vm_map_at()
36 u32 offset = vma->node->offset + (delta >> 12); in nvkm_vm_map_at()
37 u32 bits = vma->node->type - 12; in nvkm_vm_map_at()
56 mmu->map(vma, pgt, node, pte, len, phys, delta); in nvkm_vm_map_at()
66 delta += (u64)len << vma->node->type; in nvkm_vm_map_at()
74 nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length, in nvkm_vm_map_sg_table() argument
77 struct nvkm_vm *vm = vma->vm; in nvkm_vm_map_sg_table()
79 int big = vma->node->type != mmu->spg_shift; in nvkm_vm_map_sg_table()
[all …]
Dnv50.c65 vm_addr(struct nvkm_vma *vma, u64 phys, u32 memtype, u32 target) in vm_addr() argument
70 if (vma->access & NV_MEM_ACCESS_SYS) in vm_addr()
72 if (!(vma->access & NV_MEM_ACCESS_WO)) in vm_addr()
78 nv50_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt, in nv50_vm_map() argument
87 if (nvkm_fb(vma->vm->mmu)->ram->stolen) { in nv50_vm_map()
88 phys += nvkm_fb(vma->vm->mmu)->ram->stolen; in nv50_vm_map()
92 phys = vm_addr(vma, phys, mem->memtype, target); in nv50_vm_map()
107 phys += block << (vma->node->type - 3); in nv50_vm_map()
112 delta += block << (vma->node->type - 3); in nv50_vm_map()
125 nv50_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt, in nv50_vm_map_sg() argument
[all …]
Dgf100.c92 gf100_vm_addr(struct nvkm_vma *vma, u64 phys, u32 memtype, u32 target) in gf100_vm_addr() argument
97 if (vma->access & NV_MEM_ACCESS_SYS) in gf100_vm_addr()
106 gf100_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt, in gf100_vm_map() argument
109 u64 next = 1 << (vma->node->type - 8); in gf100_vm_map()
111 phys = gf100_vm_addr(vma, phys, mem->memtype, 0); in gf100_vm_map()
115 struct nvkm_ltc *ltc = nvkm_ltc(vma->vm->mmu); in gf100_vm_map()
131 gf100_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt, in gf100_vm_map_sg() argument
134 u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 7 : 5; in gf100_vm_map_sg()
140 u64 phys = gf100_vm_addr(vma, *list++, memtype, target); in gf100_vm_map_sg()
/linux-4.1.27/drivers/gpu/drm/exynos/
Dexynos_drm_gem.c57 struct vm_area_struct *vma) in update_vm_cache_attr() argument
63 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); in update_vm_cache_attr()
65 vma->vm_page_prot = in update_vm_cache_attr()
66 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); in update_vm_cache_attr()
68 vma->vm_page_prot = in update_vm_cache_attr()
69 pgprot_noncached(vm_get_page_prot(vma->vm_flags)); in update_vm_cache_attr()
80 struct vm_area_struct *vma, in exynos_drm_gem_map_buf() argument
107 return vm_insert_mixed(vma, f_vaddr, pfn); in exynos_drm_gem_map_buf()
322 struct vm_area_struct *vma) in exynos_drm_gem_mmap_buffer() argument
329 vma->vm_flags &= ~VM_PFNMAP; in exynos_drm_gem_mmap_buffer()
[all …]
Dexynos_drm_gem.h74 struct vm_area_struct *vma; member
145 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
148 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
150 static inline int vma_is_io(struct vm_area_struct *vma) in vma_is_io() argument
152 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP)); in vma_is_io()
156 struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma);
159 void exynos_gem_put_vma(struct vm_area_struct *vma);
165 struct vm_area_struct *vma);
170 struct vm_area_struct *vma);
/linux-4.1.27/arch/metag/mm/
Dhugetlbpage.c35 struct vm_area_struct *vma; in prepare_hugepage_range() local
44 vma = find_vma(mm, ALIGN_HUGEPT(addr)); in prepare_hugepage_range()
45 if (vma && !(vma->vm_flags & MAP_HUGETLB)) in prepare_hugepage_range()
48 vma = find_vma(mm, addr); in prepare_hugepage_range()
49 if (vma) { in prepare_hugepage_range()
50 if (addr + len > vma->vm_start) in prepare_hugepage_range()
52 if (!(vma->vm_flags & MAP_HUGETLB) && in prepare_hugepage_range()
53 (ALIGN_HUGEPT(addr + len) > vma->vm_start)) in prepare_hugepage_range()
131 struct vm_area_struct *vma; in hugetlb_get_unmapped_area_existing() local
145 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { in hugetlb_get_unmapped_area_existing()
[all …]
Dfault.c53 struct vm_area_struct *vma, *prev_vma; in do_page_fault() local
116 vma = find_vma_prev(mm, address, &prev_vma); in do_page_fault()
118 if (!vma || address < vma->vm_start) in do_page_fault()
123 if (!(vma->vm_flags & VM_WRITE)) in do_page_fault()
127 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) in do_page_fault()
136 fault = handle_mm_fault(mm, vma, address, flags); in do_page_fault()
173 vma = prev_vma; in do_page_fault()
174 if (vma && (expand_stack(vma, address) == 0)) in do_page_fault()
/linux-4.1.27/arch/metag/include/asm/
Dtlb.h11 #define tlb_start_vma(tlb, vma) \ argument
14 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
17 #define tlb_end_vma(tlb, vma) \ argument
20 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
26 #define tlb_start_vma(tlb, vma) do { } while (0) argument
27 #define tlb_end_vma(tlb, vma) do { } while (0) argument
Dcacheflush.h49 static inline void flush_cache_range(struct vm_area_struct *vma, in flush_cache_range() argument
52 flush_cache_mm(vma->vm_mm); in flush_cache_range()
55 static inline void flush_cache_page(struct vm_area_struct *vma, in flush_cache_page() argument
58 flush_cache_mm(vma->vm_mm); in flush_cache_page()
70 static inline void flush_icache_page(struct vm_area_struct *vma, in flush_icache_page() argument
93 #define flush_cache_range(vma, start, end) do { } while (0) argument
94 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) argument
97 #define flush_icache_page(vma, pg) do { } while (0) argument
233 static inline void copy_to_user_page(struct vm_area_struct *vma, in copy_to_user_page() argument
242 static inline void copy_from_user_page(struct vm_area_struct *vma, in copy_from_user_page() argument
/linux-4.1.27/arch/xtensa/include/asm/
Dtlb.h21 # define tlb_start_vma(tlb,vma) do { } while (0) argument
22 # define tlb_end_vma(tlb,vma) do { } while (0) argument
26 # define tlb_start_vma(tlb, vma) \ argument
29 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
32 # define tlb_end_vma(tlb, vma) \ argument
35 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
/linux-4.1.27/arch/powerpc/include/asm/
Dtlbflush.h37 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
42 extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
49 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
54 #define flush_tlb_page(vma,addr) local_flush_tlb_page(vma,addr) argument
57 #define flush_tlb_page_nohash(vma,addr) flush_tlb_page(vma,addr) argument
65 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
66 extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
67 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
70 static inline void local_flush_tlb_page(struct vm_area_struct *vma, in local_flush_tlb_page() argument
73 flush_tlb_page(vma, vmaddr); in local_flush_tlb_page()
[all …]
Dcacheflush.h22 #define flush_cache_range(vma, start, end) do { } while (0) argument
23 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) argument
24 #define flush_icache_page(vma, page) do { } while (0) argument
36 extern void flush_icache_user_range(struct vm_area_struct *vma,
55 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument
58 flush_icache_user_range(vma, page, vaddr, len); \
60 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument
Dfb.h8 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, in fb_pgprotect() argument
11 vma->vm_page_prot = phys_mem_access_prot(file, off >> PAGE_SHIFT, in fb_pgprotect()
12 vma->vm_end - vma->vm_start, in fb_pgprotect()
13 vma->vm_page_prot); in fb_pgprotect()
Dhugetlb.h86 void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
88 void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
136 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, in huge_ptep_clear_flush() argument
140 pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); in huge_ptep_clear_flush()
141 flush_tlb_page(vma, addr); in huge_ptep_clear_flush()
154 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, in huge_ptep_set_access_flags() argument
164 ptep_set_access_flags(vma, addr, ptep, pte, dirty); in huge_ptep_set_access_flags()
167 return ptep_set_access_flags(vma, addr, ptep, pte, dirty); in huge_ptep_set_access_flags()
190 static inline void flush_hugetlb_page(struct vm_area_struct *vma, in flush_hugetlb_page() argument
/linux-4.1.27/drivers/char/
Dmspec.c145 mspec_open(struct vm_area_struct *vma) in mspec_open() argument
149 vdata = vma->vm_private_data; in mspec_open()
160 mspec_close(struct vm_area_struct *vma) in mspec_close() argument
166 vdata = vma->vm_private_data; in mspec_close()
200 mspec_fault(struct vm_area_struct *vma, struct vm_fault *vmf) in mspec_fault() argument
205 struct vma_data *vdata = vma->vm_private_data; in mspec_fault()
236 vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); in mspec_fault()
255 mspec_mmap(struct file *file, struct vm_area_struct *vma, in mspec_mmap() argument
261 if (vma->vm_pgoff != 0) in mspec_mmap()
264 if ((vma->vm_flags & VM_SHARED) == 0) in mspec_mmap()
[all …]
Duv_mmtimer.c43 static int uv_mmtimer_mmap(struct file *file, struct vm_area_struct *vma);
147 static int uv_mmtimer_mmap(struct file *file, struct vm_area_struct *vma) in uv_mmtimer_mmap() argument
151 if (vma->vm_end - vma->vm_start != PAGE_SIZE) in uv_mmtimer_mmap()
154 if (vma->vm_flags & VM_WRITE) in uv_mmtimer_mmap()
160 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in uv_mmtimer_mmap()
166 if (remap_pfn_range(vma, vma->vm_start, uv_mmtimer_addr >> PAGE_SHIFT, in uv_mmtimer_mmap()
167 PAGE_SIZE, vma->vm_page_prot)) { in uv_mmtimer_mmap()
Dmem.c303 static inline int private_mapping_ok(struct vm_area_struct *vma) in private_mapping_ok() argument
305 return vma->vm_flags & VM_MAYSHARE; in private_mapping_ok()
309 static inline int private_mapping_ok(struct vm_area_struct *vma) in private_mapping_ok() argument
321 static int mmap_mem(struct file *file, struct vm_area_struct *vma) in mmap_mem() argument
323 size_t size = vma->vm_end - vma->vm_start; in mmap_mem()
325 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) in mmap_mem()
328 if (!private_mapping_ok(vma)) in mmap_mem()
331 if (!range_is_allowed(vma->vm_pgoff, size)) in mmap_mem()
334 if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size, in mmap_mem()
335 &vma->vm_page_prot)) in mmap_mem()
[all …]
Dbsr.c125 static int bsr_mmap(struct file *filp, struct vm_area_struct *vma) in bsr_mmap() argument
127 unsigned long size = vma->vm_end - vma->vm_start; in bsr_mmap()
131 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in bsr_mmap()
135 ret = remap_4k_pfn(vma, vma->vm_start, dev->bsr_addr >> 12, in bsr_mmap()
136 vma->vm_page_prot); in bsr_mmap()
138 ret = io_remap_pfn_range(vma, vma->vm_start, in bsr_mmap()
140 size, vma->vm_page_prot); in bsr_mmap()
/linux-4.1.27/arch/arc/include/asm/
Dtlb.h27 #define tlb_start_vma(tlb, vma) argument
29 #define tlb_start_vma(tlb, vma) \ argument
32 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
36 #define tlb_end_vma(tlb, vma) \ argument
39 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
Dtlbflush.h16 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
18 void local_flush_tlb_range(struct vm_area_struct *vma,
22 #define flush_tlb_range(vma, s, e) local_flush_tlb_range(vma, s, e) argument
23 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) argument
28 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
30 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
Dcacheflush.h30 #define flush_icache_page(vma, page) argument
62 #define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */ argument
68 void flush_cache_range(struct vm_area_struct *vma,
70 void flush_cache_page(struct vm_area_struct *vma,
78 void flush_anon_page(struct vm_area_struct *vma,
110 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument
113 if (vma->vm_flags & VM_EXEC) \
117 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument
/linux-4.1.27/arch/alpha/include/asm/
Dcacheflush.h10 #define flush_cache_range(vma, start, end) do { } while (0) argument
11 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) argument
50 flush_icache_user_range(struct vm_area_struct *vma, struct page *page, in flush_icache_user_range() argument
53 if (vma->vm_flags & VM_EXEC) { in flush_icache_user_range()
54 struct mm_struct *mm = vma->vm_mm; in flush_icache_user_range()
62 extern void flush_icache_user_range(struct vm_area_struct *vma,
67 #define flush_icache_page(vma, page) \ argument
68 flush_icache_user_range((vma), (page), 0, 0)
70 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument
72 flush_icache_user_range(vma, page, vaddr, len); \
[all …]
Dtlbflush.h39 struct vm_area_struct *vma, in ev4_flush_tlb_current_page() argument
43 if (vma->vm_flags & VM_EXEC) { in ev4_flush_tlb_current_page()
52 struct vm_area_struct *vma, in ev5_flush_tlb_current_page() argument
55 if (vma->vm_flags & VM_EXEC) in ev5_flush_tlb_current_page()
117 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) in flush_tlb_page() argument
119 struct mm_struct *mm = vma->vm_mm; in flush_tlb_page()
122 flush_tlb_current_page(mm, vma, addr); in flush_tlb_page()
130 flush_tlb_range(struct vm_area_struct *vma, unsigned long start, in flush_tlb_range() argument
133 flush_tlb_mm(vma->vm_mm); in flush_tlb_range()
/linux-4.1.27/arch/parisc/kernel/
Dcache.c78 update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) in update_mmu_cache() argument
276 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, in __flush_cache_page() argument
281 if (vma->vm_flags & VM_EXEC) in __flush_cache_page()
491 struct vm_area_struct *vma; in mm_total_size() local
494 for (vma = mm->mmap; vma; vma = vma->vm_next) in mm_total_size()
495 usize += vma->vm_end - vma->vm_start; in mm_total_size()
516 struct vm_area_struct *vma; in flush_cache_mm() local
527 for (vma = mm->mmap; vma; vma = vma->vm_next) { in flush_cache_mm()
528 flush_user_dcache_range_asm(vma->vm_start, vma->vm_end); in flush_cache_mm()
529 if ((vma->vm_flags & VM_EXEC) == 0) in flush_cache_mm()
[all …]
Dpci.c223 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, in pci_mmap_page_range() argument
242 prot = pgprot_val(vma->vm_page_prot); in pci_mmap_page_range()
244 vma->vm_page_prot = __pgprot(prot); in pci_mmap_page_range()
246 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, in pci_mmap_page_range()
247 vma->vm_end - vma->vm_start, vma->vm_page_prot); in pci_mmap_page_range()
/linux-4.1.27/arch/alpha/kernel/
Dpci-sysfs.c18 struct vm_area_struct *vma, in hose_mmap_page_range() argument
28 vma->vm_pgoff += base >> PAGE_SHIFT; in hose_mmap_page_range()
30 return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, in hose_mmap_page_range()
31 vma->vm_end - vma->vm_start, in hose_mmap_page_range()
32 vma->vm_page_prot); in hose_mmap_page_range()
36 struct vm_area_struct *vma, int sparse) in __pci_mmap_fits() argument
41 nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; in __pci_mmap_fits()
42 start = vma->vm_pgoff; in __pci_mmap_fits()
65 struct vm_area_struct *vma, int sparse) in pci_mmap_resource() argument
80 if (!__pci_mmap_fits(pdev, i, vma, sparse)) in pci_mmap_resource()
[all …]
/linux-4.1.27/arch/sparc/include/asm/
Dcacheflush_64.h23 #define flush_cache_range(vma, start, end) \ argument
24 flush_cache_mm((vma)->vm_mm)
25 #define flush_cache_page(vma, page, pfn) \ argument
26 flush_cache_mm((vma)->vm_mm)
50 #define flush_icache_page(vma, pg) do { } while(0) argument
51 #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) argument
57 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument
59 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
61 flush_ptrace_access(vma, page, vaddr, src, len, 0); \
64 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument
[all …]
Dtlb_32.h4 #define tlb_start_vma(tlb, vma) \ argument
6 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
9 #define tlb_end_vma(tlb, vma) \ argument
11 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
Dcacheflush_32.h12 #define flush_cache_range(vma,start,end) \ argument
13 sparc32_cachetlb_ops->cache_range(vma, start, end)
14 #define flush_cache_page(vma,addr,pfn) \ argument
15 sparc32_cachetlb_ops->cache_page(vma, addr)
17 #define flush_icache_page(vma, pg) do { } while (0) argument
19 #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) argument
21 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument
23 flush_cache_page(vma, vaddr, page_to_pfn(page));\
26 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument
28 flush_cache_page(vma, vaddr, page_to_pfn(page));\
Dtlbflush_32.h10 #define flush_tlb_range(vma, start, end) \ argument
11 sparc32_cachetlb_ops->tlb_range(vma, start, end)
12 #define flush_tlb_page(vma, addr) \ argument
13 sparc32_cachetlb_ops->tlb_page(vma, addr)
/linux-4.1.27/arch/arm/mm/
Dfault-armv.c40 static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, in do_adjust_pte() argument
56 flush_cache_page(vma, address, pfn); in do_adjust_pte()
61 set_pte_at(vma->vm_mm, address, ptep, entry); in do_adjust_pte()
62 flush_tlb_page(vma, address); in do_adjust_pte()
92 static int adjust_pte(struct vm_area_struct *vma, unsigned long address, in adjust_pte() argument
102 pgd = pgd_offset(vma->vm_mm, address); in adjust_pte()
119 ptl = pte_lockptr(vma->vm_mm, pmd); in adjust_pte()
123 ret = do_adjust_pte(vma, address, pfn, pte); in adjust_pte()
132 make_coherent(struct address_space *mapping, struct vm_area_struct *vma, in make_coherent() argument
135 struct mm_struct *mm = vma->vm_mm; in make_coherent()
[all …]
Dflush.c67 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) in flush_cache_range() argument
70 vivt_flush_cache_range(vma, start, end); in flush_cache_range()
82 if (vma->vm_flags & VM_EXEC) in flush_cache_range()
86 void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) in flush_cache_page() argument
89 vivt_flush_cache_page(vma, user_addr, pfn); in flush_cache_page()
98 if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) in flush_cache_page()
147 void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, in flush_ptrace_access() argument
151 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) in flush_ptrace_access()
153 if (vma->vm_flags & VM_EXEC) in flush_ptrace_access()
173 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, in copy_to_user_page() argument
[all …]
/linux-4.1.27/arch/arc/kernel/
Darc_hostlink.c21 static int arc_hl_mmap(struct file *fp, struct vm_area_struct *vma) in arc_hl_mmap() argument
23 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in arc_hl_mmap()
25 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, in arc_hl_mmap()
26 vma->vm_end - vma->vm_start, in arc_hl_mmap()
27 vma->vm_page_prot)) { in arc_hl_mmap()
/linux-4.1.27/arch/ia64/include/asm/
Dfb.h9 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, in fb_pgprotect() argument
12 if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start)) in fb_pgprotect()
13 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); in fb_pgprotect()
15 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in fb_pgprotect()
Dcacheflush.h22 #define flush_cache_range(vma, start, end) do { } while (0) argument
23 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) argument
24 #define flush_icache_page(vma,page) do { } while (0) argument
41 #define flush_icache_user_range(vma, page, user_addr, len) \ argument
47 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument
49 flush_icache_user_range(vma, page, vaddr, len); \
51 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument
Dtlbflush.h69 extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end);
75 flush_tlb_page (struct vm_area_struct *vma, unsigned long addr) in flush_tlb_page() argument
78 flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE); in flush_tlb_page()
80 if (vma->vm_mm == current->active_mm) in flush_tlb_page()
83 vma->vm_mm->context = 0; in flush_tlb_page()
/linux-4.1.27/arch/parisc/mm/
Dfault.c182 struct vm_area_struct *vma) in show_signal_msg() argument
194 if (vma) in show_signal_msg()
196 vma->vm_start, vma->vm_end); in show_signal_msg()
204 struct vm_area_struct *vma, *prev_vma; in do_page_fault() local
228 vma = find_vma_prev(mm, address, &prev_vma); in do_page_fault()
229 if (!vma || address < vma->vm_start) in do_page_fault()
238 if ((vma->vm_flags & acc_type) != acc_type) in do_page_fault()
247 fault = handle_mm_fault(mm, vma, address, flags); in do_page_fault()
287 vma = prev_vma; in do_page_fault()
288 if (vma && (expand_stack(vma, address) == 0)) in do_page_fault()
[all …]
/linux-4.1.27/drivers/sbus/char/
Dflash.c36 flash_mmap(struct file *file, struct vm_area_struct *vma) in flash_mmap() argument
46 if ((vma->vm_flags & VM_READ) && in flash_mmap()
47 (vma->vm_flags & VM_WRITE)) { in flash_mmap()
51 if (vma->vm_flags & VM_READ) { in flash_mmap()
54 } else if (vma->vm_flags & VM_WRITE) { in flash_mmap()
64 if ((vma->vm_pgoff << PAGE_SHIFT) > size) in flash_mmap()
66 addr = vma->vm_pgoff + (addr >> PAGE_SHIFT); in flash_mmap()
68 if (vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT)) > size) in flash_mmap()
69 size = vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT)); in flash_mmap()
71 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in flash_mmap()
[all …]
/linux-4.1.27/arch/avr32/include/asm/
Dtlb.h11 #define tlb_start_vma(tlb, vma) \ argument
12 flush_cache_range(vma, vma->vm_start, vma->vm_end)
14 #define tlb_end_vma(tlb, vma) \ argument
15 flush_tlb_range(vma, vma->vm_start, vma->vm_end)
Dcacheflush.h91 #define flush_cache_range(vma, start, end) do { } while (0) argument
92 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) argument
103 extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
121 extern void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
125 static inline void copy_from_user_page(struct vm_area_struct *vma, in copy_from_user_page() argument
/linux-4.1.27/arch/parisc/include/asm/
Dtlb.h9 #define tlb_start_vma(tlb, vma) \ argument
11 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
14 #define tlb_end_vma(tlb, vma) \ argument
16 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
Dcacheflush.h82 #define flush_icache_page(vma,page) do { \ argument
92 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument
94 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
99 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument
101 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
105 void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn);
106 void flush_cache_range(struct vm_area_struct *vma,
114 flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) in flush_anon_page() argument
117 flush_tlb_page(vma, vmaddr); in flush_anon_page()
/linux-4.1.27/kernel/events/
Duprobes.c129 static bool valid_vma(struct vm_area_struct *vma, bool is_register) in valid_vma() argument
136 return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC; in valid_vma()
139 static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset) in offset_to_vaddr() argument
141 return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT); in offset_to_vaddr()
144 static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr) in vaddr_to_offset() argument
146 return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start); in vaddr_to_offset()
160 static int __replace_page(struct vm_area_struct *vma, unsigned long addr, in __replace_page() argument
163 struct mm_struct *mm = vma->vm_mm; in __replace_page()
172 err = mem_cgroup_try_charge(kpage, vma->vm_mm, GFP_KERNEL, &memcg); in __replace_page()
186 page_add_new_anon_rmap(kpage, vma, addr); in __replace_page()
[all …]
/linux-4.1.27/drivers/gpu/drm/udl/
Dudl_gem.c61 struct vm_area_struct *vma) in update_vm_cache_attr() argument
67 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); in update_vm_cache_attr()
69 vma->vm_page_prot = in update_vm_cache_attr()
70 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); in update_vm_cache_attr()
72 vma->vm_page_prot = in update_vm_cache_attr()
73 pgprot_noncached(vm_get_page_prot(vma->vm_flags)); in update_vm_cache_attr()
87 int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) in udl_drm_gem_mmap() argument
91 ret = drm_gem_mmap(filp, vma); in udl_drm_gem_mmap()
95 vma->vm_flags &= ~VM_PFNMAP; in udl_drm_gem_mmap()
96 vma->vm_flags |= VM_MIXEDMAP; in udl_drm_gem_mmap()
[all …]
/linux-4.1.27/arch/arm/include/asm/
Dcacheflush.h177 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument
232 vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) in vivt_flush_cache_range() argument
234 struct mm_struct *mm = vma->vm_mm; in vivt_flush_cache_range()
238 vma->vm_flags); in vivt_flush_cache_range()
242 vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) in vivt_flush_cache_page() argument
244 struct mm_struct *mm = vma->vm_mm; in vivt_flush_cache_page()
248 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); in vivt_flush_cache_page()
255 #define flush_cache_range(vma,start,end) \ argument
256 vivt_flush_cache_range(vma,start,end)
257 #define flush_cache_page(vma,addr,pfn) \ argument
[all …]
Dtlbflush.h419 __local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) in __local_flush_tlb_page() argument
424 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); in __local_flush_tlb_page()
427 cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { in __local_flush_tlb_page()
441 local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) in local_flush_tlb_page() argument
445 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); in local_flush_tlb_page()
450 __local_flush_tlb_page(vma, uaddr); in local_flush_tlb_page()
458 __flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) in __flush_tlb_page() argument
462 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); in __flush_tlb_page()
467 __local_flush_tlb_page(vma, uaddr); in __flush_tlb_page()
603 #define local_flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma) argument
[all …]
Dtlb.h72 struct vm_area_struct *vma; member
99 if (tlb->fullmm || !tlb->vma) in tlb_flush()
102 flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end); in tlb_flush()
157 tlb->vma = NULL; in tlb_gather_mmu()
195 tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) in tlb_start_vma() argument
198 flush_cache_range(vma, vma->vm_start, vma->vm_end); in tlb_start_vma()
199 tlb->vma = vma; in tlb_start_vma()
206 tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) in tlb_end_vma() argument
/linux-4.1.27/arch/sh/include/asm/
Dtlbflush.h15 extern void local_flush_tlb_range(struct vm_area_struct *vma,
18 extern void local_flush_tlb_page(struct vm_area_struct *vma,
30 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
32 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
40 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) argument
43 #define flush_tlb_range(vma, start, end) \ argument
44 local_flush_tlb_range(vma, start, end)
Dtlb.h74 tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) in tlb_start_vma() argument
77 flush_cache_range(vma, vma->vm_start, vma->vm_end); in tlb_start_vma()
81 tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) in tlb_end_vma() argument
84 flush_tlb_range(vma, tlb->start, tlb->end); in tlb_end_vma()
122 static inline void tlb_wire_entry(struct vm_area_struct *vma , in tlb_wire_entry() argument
136 #define tlb_start_vma(tlb, vma) do { } while (0) argument
137 #define tlb_end_vma(tlb, vma) do { } while (0) argument
Dcacheflush.h41 extern void flush_cache_page(struct vm_area_struct *vma,
43 extern void flush_cache_range(struct vm_area_struct *vma,
48 extern void flush_icache_page(struct vm_area_struct *vma,
53 struct vm_area_struct *vma; member
60 static inline void flush_anon_page(struct vm_area_struct *vma, in flush_anon_page() argument
81 extern void copy_to_user_page(struct vm_area_struct *vma,
85 extern void copy_from_user_page(struct vm_area_struct *vma,
/linux-4.1.27/arch/ia64/mm/
Dfault.c83 struct vm_area_struct *vma, *prev_vma; in ia64_do_page_fault() local
127 vma = find_vma_prev(mm, address, &prev_vma); in ia64_do_page_fault()
128 if (!vma && !prev_vma ) in ia64_do_page_fault()
138 if (( !vma && prev_vma ) || (address < vma->vm_start) ) in ia64_do_page_fault()
151 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE)))) in ia64_do_page_fault()
154 if ((vma->vm_flags & mask) != mask) in ia64_do_page_fault()
162 fault = handle_mm_fault(mm, vma, address, flags); in ia64_do_page_fault()
207 if (!vma) in ia64_do_page_fault()
209 if (!(vma->vm_flags & VM_GROWSDOWN)) in ia64_do_page_fault()
211 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) in ia64_do_page_fault()
[all …]
Dinit.c107 struct vm_area_struct *vma; in ia64_init_addr_space() local
116 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); in ia64_init_addr_space()
117 if (vma) { in ia64_init_addr_space()
118 INIT_LIST_HEAD(&vma->anon_vma_chain); in ia64_init_addr_space()
119 vma->vm_mm = current->mm; in ia64_init_addr_space()
120 vma->vm_start = current->thread.rbs_bot & PAGE_MASK; in ia64_init_addr_space()
121 vma->vm_end = vma->vm_start + PAGE_SIZE; in ia64_init_addr_space()
122 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT; in ia64_init_addr_space()
123 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); in ia64_init_addr_space()
125 if (insert_vm_struct(current->mm, vma)) { in ia64_init_addr_space()
[all …]
/linux-4.1.27/arch/tile/kernel/
Dtlb.c53 void flush_tlb_page_mm(struct vm_area_struct *vma, struct mm_struct *mm, in flush_tlb_page_mm() argument
56 unsigned long size = vma_kernel_pagesize(vma); in flush_tlb_page_mm()
57 int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0; in flush_tlb_page_mm()
62 void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) in flush_tlb_page() argument
64 flush_tlb_page_mm(vma, vma->vm_mm, va); in flush_tlb_page()
68 void flush_tlb_range(struct vm_area_struct *vma, in flush_tlb_range() argument
71 unsigned long size = vma_kernel_pagesize(vma); in flush_tlb_range()
72 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range()
73 int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0; in flush_tlb_range()
/linux-4.1.27/drivers/infiniband/hw/ipath/
Dipath_mmap.c64 static void ipath_vma_open(struct vm_area_struct *vma) in ipath_vma_open() argument
66 struct ipath_mmap_info *ip = vma->vm_private_data; in ipath_vma_open()
71 static void ipath_vma_close(struct vm_area_struct *vma) in ipath_vma_close() argument
73 struct ipath_mmap_info *ip = vma->vm_private_data; in ipath_vma_close()
89 int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) in ipath_mmap() argument
92 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; in ipath_mmap()
93 unsigned long size = vma->vm_end - vma->vm_start; in ipath_mmap()
115 ret = remap_vmalloc_range(vma, ip->obj, 0); in ipath_mmap()
118 vma->vm_ops = &ipath_vm_ops; in ipath_mmap()
119 vma->vm_private_data = ip; in ipath_mmap()
[all …]
/linux-4.1.27/drivers/infiniband/hw/qib/
Dqib_mmap.c64 static void qib_vma_open(struct vm_area_struct *vma) in qib_vma_open() argument
66 struct qib_mmap_info *ip = vma->vm_private_data; in qib_vma_open()
71 static void qib_vma_close(struct vm_area_struct *vma) in qib_vma_close() argument
73 struct qib_mmap_info *ip = vma->vm_private_data; in qib_vma_close()
89 int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) in qib_mmap() argument
92 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; in qib_mmap()
93 unsigned long size = vma->vm_end - vma->vm_start; in qib_mmap()
115 ret = remap_vmalloc_range(vma, ip->obj, 0); in qib_mmap()
118 vma->vm_ops = &qib_vm_ops; in qib_mmap()
119 vma->vm_private_data = ip; in qib_mmap()
[all …]
Dqib_file_ops.c729 static int qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd, in qib_mmap_mem() argument
736 if ((vma->vm_end - vma->vm_start) > len) { in qib_mmap_mem()
739 vma->vm_end - vma->vm_start, len); in qib_mmap_mem()
749 if (vma->vm_flags & VM_WRITE) { in qib_mmap_mem()
757 vma->vm_flags &= ~VM_MAYWRITE; in qib_mmap_mem()
761 ret = remap_pfn_range(vma, vma->vm_start, pfn, in qib_mmap_mem()
762 len, vma->vm_page_prot); in qib_mmap_mem()
771 static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd, in mmap_ureg() argument
784 if ((vma->vm_end - vma->vm_start) > sz) { in mmap_ureg()
787 vma->vm_end - vma->vm_start); in mmap_ureg()
[all …]
/linux-4.1.27/arch/score/include/asm/
Dcacheflush.h9 extern void flush_cache_range(struct vm_area_struct *vma,
11 extern void flush_cache_page(struct vm_area_struct *vma,
28 static inline void flush_icache_page(struct vm_area_struct *vma, in flush_icache_page() argument
31 if (vma->vm_flags & VM_EXEC) { in flush_icache_page()
38 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument
41 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument
44 if ((vma->vm_flags & VM_EXEC)) \
45 flush_cache_page(vma, vaddr, page_to_pfn(page));\
Dtlbflush.h17 extern void local_flush_tlb_range(struct vm_area_struct *vma,
21 extern void local_flush_tlb_page(struct vm_area_struct *vma,
27 #define flush_tlb_range(vma, vmaddr, end) \ argument
28 local_flush_tlb_range(vma, vmaddr, end)
31 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) argument
/linux-4.1.27/arch/arm64/include/asm/
Dtlbflush.h85 static inline void flush_tlb_page(struct vm_area_struct *vma, in flush_tlb_page() argument
89 ((unsigned long)ASID(vma->vm_mm) << 48); in flush_tlb_page()
96 static inline void __flush_tlb_range(struct vm_area_struct *vma, in __flush_tlb_range() argument
99 unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48; in __flush_tlb_range()
129 static inline void flush_tlb_range(struct vm_area_struct *vma, in flush_tlb_range() argument
133 __flush_tlb_range(vma, start, end); in flush_tlb_range()
135 flush_tlb_mm(vma->vm_mm); in flush_tlb_range()
162 static inline void update_mmu_cache(struct vm_area_struct *vma, in update_mmu_cache() argument
172 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) argument
/linux-4.1.27/arch/sh/mm/
Dcache-sh5.c87 static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long eaddr) in sh64_icache_inv_user_page() argument
113 vma_asid = cpu_asid(cpu, vma->vm_mm); in sh64_icache_inv_user_page()
177 struct vm_area_struct *vma; in sh64_icache_inv_user_page_range() local
179 vma = find_vma(mm, aligned_start); in sh64_icache_inv_user_page_range()
180 if (!vma || (aligned_start <= vma->vm_end)) { in sh64_icache_inv_user_page_range()
185 vma_end = vma->vm_end; in sh64_icache_inv_user_page_range()
186 if (vma->vm_flags & VM_EXEC) { in sh64_icache_inv_user_page_range()
190 sh64_icache_inv_user_page(vma, eaddr); in sh64_icache_inv_user_page_range()
194 aligned_start = vma->vm_end; /* Skip to start of next region */ in sh64_icache_inv_user_page_range()
528 struct vm_area_struct *vma; in sh5_flush_cache_range() local
[all …]
Dcache.c58 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, in copy_to_user_page() argument
73 if (vma->vm_flags & VM_EXEC) in copy_to_user_page()
74 flush_cache_page(vma, vaddr, page_to_pfn(page)); in copy_to_user_page()
77 void copy_from_user_page(struct vm_area_struct *vma, struct page *page, in copy_from_user_page() argument
94 unsigned long vaddr, struct vm_area_struct *vma) in copy_user_highpage() argument
112 (vma->vm_flags & VM_EXEC)) in copy_user_highpage()
134 void __update_cache(struct vm_area_struct *vma, in __update_cache() argument
191 void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, in flush_cache_page() argument
196 data.vma = vma; in flush_cache_page()
203 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, in flush_cache_range() argument
[all …]
Dmmap.c37 struct vm_area_struct *vma; in arch_get_unmapped_area() local
64 vma = find_vma(mm, addr); in arch_get_unmapped_area()
66 (!vma || addr + len <= vma->vm_start)) in arch_get_unmapped_area()
84 struct vm_area_struct *vma; in arch_get_unmapped_area_topdown() local
114 vma = find_vma(mm, addr); in arch_get_unmapped_area_topdown()
116 (!vma || addr + len <= vma->vm_start)) in arch_get_unmapped_area_topdown()
Dcache-sh4.c207 struct vm_area_struct *vma; in sh4_flush_cache_page() local
217 vma = data->vma; in sh4_flush_cache_page()
223 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) in sh4_flush_cache_page()
226 pgd = pgd_offset(vma->vm_mm, address); in sh4_flush_cache_page()
235 if ((vma->vm_mm == current->active_mm)) in sh4_flush_cache_page()
256 if (vma->vm_flags & VM_EXEC) in sh4_flush_cache_page()
279 struct vm_area_struct *vma; in sh4_flush_cache_range() local
282 vma = data->vma; in sh4_flush_cache_range()
286 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) in sh4_flush_cache_range()
298 if (vma->vm_flags & VM_EXEC) in sh4_flush_cache_range()
Dtlbflush_32.c15 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) in local_flush_tlb_page() argument
19 if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) { in local_flush_tlb_page()
24 asid = cpu_asid(cpu, vma->vm_mm); in local_flush_tlb_page()
28 if (vma->vm_mm != current->mm) { in local_flush_tlb_page()
39 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, in local_flush_tlb_range() argument
42 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range()
/linux-4.1.27/arch/tile/include/asm/
Dtlbflush.h45 static inline void local_flush_tlb_page(struct vm_area_struct *vma, in local_flush_tlb_page() argument
53 if (!vma || (vma != FLUSH_NONEXEC && (vma->vm_flags & VM_EXEC))) in local_flush_tlb_page()
58 static inline void local_flush_tlb_pages(struct vm_area_struct *vma, in local_flush_tlb_pages() argument
67 if (!vma || (vma != FLUSH_NONEXEC && (vma->vm_flags & VM_EXEC))) in local_flush_tlb_pages()
Dcacheflush.h29 #define flush_cache_range(vma, start, end) do { } while (0) argument
30 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) argument
37 #define flush_icache_page(vma, pg) do { } while (0) argument
38 #define flush_icache_user_range(vma, pg, adr, len) do { } while (0) argument
64 static inline void copy_to_user_page(struct vm_area_struct *vma, in copy_to_user_page() argument
69 if (vma->vm_flags & VM_EXEC) { in copy_to_user_page()
75 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument
Dhugetlb.h67 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, in huge_ptep_clear_flush() argument
70 ptep_clear_flush(vma, addr, ptep); in huge_ptep_clear_flush()
89 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, in huge_ptep_set_access_flags() argument
93 return ptep_set_access_flags(vma, addr, ptep, pte, dirty); in huge_ptep_set_access_flags()
115 static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, in arch_make_huge_pte() argument
118 size_t pagesize = huge_page_size(hstate_vma(vma)); in arch_make_huge_pte()
/linux-4.1.27/arch/mips/include/asm/
Dtlbflush.h17 extern void local_flush_tlb_range(struct vm_area_struct *vma,
21 extern void local_flush_tlb_page(struct vm_area_struct *vma,
29 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long,
39 #define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, end) argument
42 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) argument
Dtlb.h8 #define tlb_start_vma(tlb, vma) \ argument
11 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
13 #define tlb_end_vma(tlb, vma) do { } while (0) argument
Dcacheflush.h50 extern void (*flush_cache_range)(struct vm_area_struct *vma,
52 extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
69 static inline void flush_anon_page(struct vm_area_struct *vma, in flush_anon_page() argument
76 static inline void flush_icache_page(struct vm_area_struct *vma, in flush_icache_page() argument
100 extern void copy_to_user_page(struct vm_area_struct *vma,
104 extern void copy_from_user_page(struct vm_area_struct *vma,
Dhugetlb.h71 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, in huge_ptep_clear_flush() argument
74 flush_tlb_page(vma, addr & huge_page_mask(hstate_vma(vma))); in huge_ptep_clear_flush()
94 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, in huge_ptep_set_access_flags() argument
102 set_pte_at(vma->vm_mm, addr, ptep, pte); in huge_ptep_set_access_flags()
107 flush_tlb_range(vma, addr, addr + HPAGE_SIZE); in huge_ptep_set_access_flags()
/linux-4.1.27/drivers/gpu/drm/amd/amdkfd/
Dkfd_doorbell.c120 int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma) in kfd_doorbell_mmap() argument
129 if (vma->vm_end - vma->vm_start != doorbell_process_allocation()) in kfd_doorbell_mmap()
133 dev = kfd_device_by_id(vma->vm_pgoff); in kfd_doorbell_mmap()
140 vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE | in kfd_doorbell_mmap()
143 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in kfd_doorbell_mmap()
150 (unsigned long long) vma->vm_start, address, vma->vm_flags, in kfd_doorbell_mmap()
154 return io_remap_pfn_range(vma, in kfd_doorbell_mmap()
155 vma->vm_start, in kfd_doorbell_mmap()
158 vma->vm_page_prot); in kfd_doorbell_mmap()
/linux-4.1.27/include/asm-generic/
Dcacheflush.h14 #define flush_cache_range(vma, start, end) do { } while (0) argument
15 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) argument
21 #define flush_icache_page(vma,pg) do { } while (0) argument
22 #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) argument
26 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument
29 flush_icache_user_range(vma, page, vaddr, len); \
31 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument
Dpgtable.h27 extern int ptep_set_access_flags(struct vm_area_struct *vma,
33 extern int pmdp_set_access_flags(struct vm_area_struct *vma,
39 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, in ptep_test_and_clear_young() argument
48 set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte)); in ptep_test_and_clear_young()
55 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, in pmdp_test_and_clear_young() argument
64 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd)); in pmdp_test_and_clear_young()
68 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, in pmdp_test_and_clear_young() argument
79 int ptep_clear_flush_young(struct vm_area_struct *vma,
84 int pmdp_clear_flush_young(struct vm_area_struct *vma,
150 extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
[all …]
/linux-4.1.27/arch/m32r/mm/
Dfault.c79 struct vm_area_struct * vma; in do_page_fault() local
145 vma = find_vma(mm, address); in do_page_fault()
146 if (!vma) in do_page_fault()
148 if (vma->vm_start <= address) in do_page_fault()
150 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_page_fault()
164 if (expand_stack(vma, address)) in do_page_fault()
176 if (!(vma->vm_flags & VM_WRITE)) in do_page_fault()
182 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) in do_page_fault()
189 if ((error_code & ACE_INSTRUCTION) && !(vma->vm_flags & VM_EXEC)) in do_page_fault()
199 fault = handle_mm_fault(mm, vma, addr, flags); in do_page_fault()
[all …]
/linux-4.1.27/arch/arm64/mm/
Dflush.c30 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, in flush_cache_range() argument
33 if (vma->vm_flags & VM_EXEC) in flush_cache_range()
37 static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, in flush_ptrace_access() argument
41 if (vma->vm_flags & VM_EXEC) { in flush_ptrace_access()
59 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, in copy_to_user_page() argument
67 flush_ptrace_access(vma, page, uaddr, dst, len); in copy_to_user_page()
110 void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, in pmdp_splitting_flush() argument
116 set_pmd_at(vma->vm_mm, address, pmdp, pmd); in pmdp_splitting_flush()
/linux-4.1.27/arch/cris/arch-v32/drivers/pci/
Dbios.c17 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, in pci_mmap_page_range() argument
25 prot = pgprot_val(vma->vm_page_prot); in pci_mmap_page_range()
26 vma->vm_page_prot = __pgprot(prot); in pci_mmap_page_range()
31 if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, in pci_mmap_page_range()
32 vma->vm_end - vma->vm_start, in pci_mmap_page_range()
33 vma->vm_page_prot)) in pci_mmap_page_range()
/linux-4.1.27/arch/hexagon/include/asm/
Dcacheflush.h44 #define flush_cache_range(vma, start, end) do { } while (0) argument
45 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) argument
50 #define flush_icache_page(vma, pg) do { } while (0) argument
51 #define flush_icache_user_range(vma, pg, adr, len) do { } while (0) argument
86 static inline void update_mmu_cache(struct vm_area_struct *vma, in update_mmu_cache() argument
92 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
95 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument
/linux-4.1.27/arch/nios2/kernel/
Dsys_nios2.c24 struct vm_area_struct *vma; in sys_cacheflush() local
41 vma = find_vma(current->mm, addr); in sys_cacheflush()
42 if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) in sys_cacheflush()
45 flush_cache_range(vma, addr, addr + len); in sys_cacheflush()
/linux-4.1.27/arch/m68k/include/asm/
Dfb.h11 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, in fb_pgprotect() argument
14 pgprot_val(vma->vm_page_prot) |= SUN3_PAGE_NOCACHE; in fb_pgprotect()
17 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, in fb_pgprotect() argument
21 pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE030; in fb_pgprotect()
23 pgprot_val(vma->vm_page_prot) &= _CACHEMASK040; in fb_pgprotect()
25 pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE_S; in fb_pgprotect()
Dcacheflush_mm.h205 static inline void flush_cache_range(struct vm_area_struct *vma, in flush_cache_range() argument
209 if (vma->vm_mm == current->mm) in flush_cache_range()
213 static inline void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long… in flush_cache_page() argument
215 if (vma->vm_mm == current->mm) in flush_cache_page()
254 #define flush_icache_page(vma, page) __flush_page_to_ram(page_address(page)) argument
256 extern void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
260 static inline void copy_to_user_page(struct vm_area_struct *vma, in copy_to_user_page() argument
264 flush_cache_page(vma, vaddr, page_to_pfn(page)); in copy_to_user_page()
266 flush_icache_user_range(vma, page, vaddr, len); in copy_to_user_page()
268 static inline void copy_from_user_page(struct vm_area_struct *vma, in copy_from_user_page() argument
[all …]
Dcacheflush_no.h13 #define flush_cache_range(vma, start, end) do { } while (0) argument
14 #define flush_cache_page(vma, vmaddr) do { } while (0) argument
21 #define flush_icache_page(vma,pg) do { } while (0) argument
22 #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) argument
26 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument
28 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument
Dtlbflush.h84 static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) in flush_tlb_page() argument
86 if (vma->vm_mm == current->active_mm) { in flush_tlb_page()
94 static inline void flush_tlb_range(struct vm_area_struct *vma, in flush_tlb_range() argument
97 if (vma->vm_mm == current->active_mm) in flush_tlb_range()
170 static inline void flush_tlb_page (struct vm_area_struct *vma, in flush_tlb_page() argument
177 sun3_put_context(vma->vm_mm->context); in flush_tlb_page()
191 static inline void flush_tlb_range (struct vm_area_struct *vma, in flush_tlb_range() argument
194 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range()
260 static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) in flush_tlb_page() argument
/linux-4.1.27/arch/frv/include/asm/
Dcacheflush.h25 #define flush_cache_page(vma, vmaddr, pfn) do {} while(0) argument
77 extern void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
80 static inline void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, in flush_icache_user_range() argument
87 static inline void flush_icache_page(struct vm_area_struct *vma, struct page *page) in flush_icache_page() argument
89 flush_icache_user_range(vma, page, page_to_phys(page), PAGE_SIZE); in flush_icache_page()
96 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument
99 flush_icache_user_range((vma), (page), (vaddr), (len)); \
102 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument
Dtlbflush.h42 #define flush_tlb_range(vma,start,end) \ argument
45 __flush_tlb_range((vma)->vm_mm->context.id, start, end); \
49 #define flush_tlb_page(vma,addr) \ argument
52 __flush_tlb_page((vma)->vm_mm->context.id, addr); \
66 #define flush_tlb_page(vma,addr) BUG() argument
/linux-4.1.27/arch/hexagon/mm/
Dvm_fault.c50 struct vm_area_struct *vma; in do_page_fault() local
71 vma = find_vma(mm, address); in do_page_fault()
72 if (!vma) in do_page_fault()
75 if (vma->vm_start <= address) in do_page_fault()
78 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_page_fault()
81 if (expand_stack(vma, address)) in do_page_fault()
90 if (!(vma->vm_flags & VM_EXEC)) in do_page_fault()
94 if (!(vma->vm_flags & VM_READ)) in do_page_fault()
98 if (!(vma->vm_flags & VM_WRITE)) in do_page_fault()
104 fault = handle_mm_fault(mm, vma, address, flags); in do_page_fault()
/linux-4.1.27/ipc/
Dshm.c68 static void shm_open(struct vm_area_struct *vma);
69 static void shm_close(struct vm_area_struct *vma);
190 static int __shm_open(struct vm_area_struct *vma) in __shm_open() argument
192 struct file *file = vma->vm_file; in __shm_open()
209 static void shm_open(struct vm_area_struct *vma) in shm_open() argument
211 int err = __shm_open(vma); in shm_open()
269 static void shm_close(struct vm_area_struct *vma) in shm_close() argument
271 struct file *file = vma->vm_file; in shm_close()
376 static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) in shm_fault() argument
378 struct file *file = vma->vm_file; in shm_fault()
[all …]
/linux-4.1.27/arch/sparc/mm/
Dfault_32.c170 struct vm_area_struct *vma; in do_sparc_fault() local
210 vma = find_vma(mm, address); in do_sparc_fault()
211 if (!vma) in do_sparc_fault()
213 if (vma->vm_start <= address) in do_sparc_fault()
215 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_sparc_fault()
217 if (expand_stack(vma, address)) in do_sparc_fault()
226 if (!(vma->vm_flags & VM_WRITE)) in do_sparc_fault()
230 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) in do_sparc_fault()
244 fault = handle_mm_fault(mm, vma, address, flags); in do_sparc_fault()
386 struct vm_area_struct *vma; in force_user_fault() local
[all …]
Dsrmmu.c545 extern void tsunami_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned lon…
546 extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
552 extern void tsunami_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long …
553 extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
559 extern void swift_flush_cache_range(struct vm_area_struct *vma,
561 extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
567 extern void swift_flush_tlb_range(struct vm_area_struct *vma,
569 extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
572 void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
577 if ((ctx1 = vma->vm_mm->context) != -1) {
[all …]
/linux-4.1.27/arch/unicore32/mm/
Dflush.c23 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, in flush_cache_range() argument
26 if (vma->vm_flags & VM_EXEC) in flush_cache_range()
30 void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, in flush_cache_page() argument
35 static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, in flush_ptrace_access() argument
39 if (vma->vm_flags & VM_EXEC) { in flush_ptrace_access()
53 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, in copy_to_user_page() argument
58 flush_ptrace_access(vma, page, uaddr, dst, len); in copy_to_user_page()
/linux-4.1.27/arch/arm/kernel/
Dsmp_tlb.c156 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) in flush_tlb_page() argument
160 ta.ta_vma = vma; in flush_tlb_page()
162 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, in flush_tlb_page()
165 __flush_tlb_page(vma, uaddr); in flush_tlb_page()
166 broadcast_tlb_mm_a15_erratum(vma->vm_mm); in flush_tlb_page()
180 void flush_tlb_range(struct vm_area_struct *vma, in flush_tlb_range() argument
185 ta.ta_vma = vma; in flush_tlb_range()
188 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, in flush_tlb_range()
191 local_flush_tlb_range(vma, start, end); in flush_tlb_range()
192 broadcast_tlb_mm_a15_erratum(vma->vm_mm); in flush_tlb_range()
/linux-4.1.27/arch/ia64/pci/
Dpci.c590 pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma, in pci_mmap_page_range() argument
593 unsigned long size = vma->vm_end - vma->vm_start; in pci_mmap_page_range()
609 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) in pci_mmap_page_range()
612 prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size, in pci_mmap_page_range()
613 vma->vm_page_prot); in pci_mmap_page_range()
623 efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start)) in pci_mmap_page_range()
624 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); in pci_mmap_page_range()
626 vma->vm_page_prot = prot; in pci_mmap_page_range()
628 if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, in pci_mmap_page_range()
629 vma->vm_end - vma->vm_start, vma->vm_page_prot)) in pci_mmap_page_range()
[all …]
/linux-4.1.27/drivers/xen/xenfs/
Dxenstored.c33 static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma) in xsd_kva_mmap() argument
35 size_t size = vma->vm_end - vma->vm_start; in xsd_kva_mmap()
37 if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0)) in xsd_kva_mmap()
40 if (remap_pfn_range(vma, vma->vm_start, in xsd_kva_mmap()
42 size, vma->vm_page_prot)) in xsd_kva_mmap()
/linux-4.1.27/arch/powerpc/mm/
Dcopro_fault.c39 struct vm_area_struct *vma; in copro_handle_mm_fault() local
51 vma = find_vma(mm, ea); in copro_handle_mm_fault()
52 if (!vma) in copro_handle_mm_fault()
55 if (ea < vma->vm_start) { in copro_handle_mm_fault()
56 if (!(vma->vm_flags & VM_GROWSDOWN)) in copro_handle_mm_fault()
58 if (expand_stack(vma, ea)) in copro_handle_mm_fault()
64 if (!(vma->vm_flags & VM_WRITE)) in copro_handle_mm_fault()
67 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) in copro_handle_mm_fault()
78 *flt = handle_mm_fault(mm, vma, ea, is_write ? FAULT_FLAG_WRITE : 0); in copro_handle_mm_fault()
Dsubpage-prot.c137 struct vm_area_struct *vma = walk->vma; in subpage_walk_pmd_entry() local
138 split_huge_page_pmd(vma, addr, pmd); in subpage_walk_pmd_entry()
145 struct vm_area_struct *vma; in subpage_mark_vma_nohuge() local
155 vma = find_vma(mm, addr); in subpage_mark_vma_nohuge()
159 if (vma && ((addr + len) <= vma->vm_start)) in subpage_mark_vma_nohuge()
162 while (vma) { in subpage_mark_vma_nohuge()
163 if (vma->vm_start >= (addr + len)) in subpage_mark_vma_nohuge()
165 vma->vm_flags |= VM_NOHUGEPAGE; in subpage_mark_vma_nohuge()
166 walk_page_vma(vma, &subpage_proto_walk); in subpage_mark_vma_nohuge()
167 vma = vma->vm_next; in subpage_mark_vma_nohuge()
Dfault.c212 struct vm_area_struct * vma; in do_page_fault() local
332 vma = find_vma(mm, address); in do_page_fault()
333 if (!vma) in do_page_fault()
335 if (vma->vm_start <= address) in do_page_fault()
337 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_page_fault()
349 if (address + 0x100000 < vma->vm_end) { in do_page_fault()
370 if (expand_stack(vma, address)) in do_page_fault()
402 if (!(vma->vm_flags & VM_EXEC) && in do_page_fault()
404 !(vma->vm_flags & (VM_READ | VM_WRITE)))) in do_page_fault()
416 if (!(vma->vm_flags & VM_WRITE)) in do_page_fault()
[all …]
Dpgtable.c88 static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma, in set_access_flags_filter() argument
128 static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma, in set_access_flags_filter() argument
146 if (WARN_ON(!(vma->vm_flags & VM_EXEC))) in set_access_flags_filter()
200 int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, in ptep_set_access_flags() argument
204 entry = set_access_flags_filter(entry, vma, dirty); in ptep_set_access_flags()
207 if (!is_vm_hugetlb_page(vma)) in ptep_set_access_flags()
208 assert_pte_locked(vma->vm_mm, address); in ptep_set_access_flags()
210 flush_tlb_page_nohash(vma, address); in ptep_set_access_flags()
/linux-4.1.27/arch/nios2/mm/
Dcacheflush.c148 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, in flush_cache_range() argument
152 if (vma == NULL || (vma->vm_flags & VM_EXEC)) in flush_cache_range()
156 void flush_icache_page(struct vm_area_struct *vma, struct page *page) in flush_icache_page() argument
165 void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, in flush_cache_page() argument
172 if (vma->vm_flags & VM_EXEC) in flush_cache_page()
216 void update_mmu_cache(struct vm_area_struct *vma, in update_mmu_cache() argument
241 if (vma->vm_flags & VM_EXEC) in update_mmu_cache()
242 flush_icache_page(vma, page); in update_mmu_cache()
265 void copy_from_user_page(struct vm_area_struct *vma, struct page *page, in copy_from_user_page() argument
269 flush_cache_page(vma, user_vaddr, page_to_pfn(page)); in copy_from_user_page()
[all …]
Dfault.c45 struct vm_area_struct *vma = NULL; in do_page_fault() local
93 vma = find_vma(mm, address); in do_page_fault()
94 if (!vma) in do_page_fault()
96 if (vma->vm_start <= address) in do_page_fault()
98 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_page_fault()
100 if (expand_stack(vma, address)) in do_page_fault()
115 if (!(vma->vm_flags & VM_EXEC)) in do_page_fault()
119 if (!(vma->vm_flags & VM_READ)) in do_page_fault()
123 if (!(vma->vm_flags & VM_WRITE)) in do_page_fault()
134 fault = handle_mm_fault(mm, vma, address, flags); in do_page_fault()
/linux-4.1.27/arch/xtensa/kernel/
Dpci.c286 __pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vma, in __pci_mmap_make_offset() argument
290 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; in __pci_mmap_make_offset()
329 vma->vm_pgoff = offset >> PAGE_SHIFT; in __pci_mmap_make_offset()
341 __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma, in __pci_mmap_set_pgprot() argument
344 int prot = pgprot_val(vma->vm_page_prot); in __pci_mmap_set_pgprot()
352 vma->vm_page_prot = __pgprot(prot); in __pci_mmap_set_pgprot()
365 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, in pci_mmap_page_range() argument
371 ret = __pci_mmap_make_offset(dev, vma, mmap_state); in pci_mmap_page_range()
375 __pci_mmap_set_pgprot(dev, vma, mmap_state, write_combine); in pci_mmap_page_range()
377 ret = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, in pci_mmap_page_range()
[all …]
Dsmp.c442 struct vm_area_struct *vma; member
470 local_flush_tlb_page(fd->vma, fd->addr1); in ipi_flush_tlb_page()
473 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) in flush_tlb_page() argument
476 .vma = vma, in flush_tlb_page()
485 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); in ipi_flush_tlb_range()
488 void flush_tlb_range(struct vm_area_struct *vma, in flush_tlb_range() argument
492 .vma = vma, in flush_tlb_range()
529 local_flush_cache_page(fd->vma, fd->addr1, fd->addr2); in ipi_flush_cache_page()
532 void flush_cache_page(struct vm_area_struct *vma, in flush_cache_page() argument
536 .vma = vma, in flush_cache_page()
[all …]
/linux-4.1.27/fs/ext2/
Dfile.c29 static int ext2_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) in ext2_dax_fault() argument
31 return dax_fault(vma, vmf, ext2_get_block, NULL); in ext2_dax_fault()
34 static int ext2_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) in ext2_dax_mkwrite() argument
36 return dax_mkwrite(vma, vmf, ext2_get_block, NULL); in ext2_dax_mkwrite()
45 static int ext2_file_mmap(struct file *file, struct vm_area_struct *vma) in ext2_file_mmap() argument
48 return generic_file_mmap(file, vma); in ext2_file_mmap()
51 vma->vm_ops = &ext2_dax_vm_ops; in ext2_file_mmap()
52 vma->vm_flags |= VM_MIXEDMAP; in ext2_file_mmap()
/linux-4.1.27/arch/m68k/mm/
Dfault.c73 struct vm_area_struct * vma; in do_page_fault() local
92 vma = find_vma(mm, address); in do_page_fault()
93 if (!vma) in do_page_fault()
95 if (vma->vm_flags & VM_IO) in do_page_fault()
97 if (vma->vm_start <= address) in do_page_fault()
99 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_page_fault()
109 if (expand_stack(vma, address)) in do_page_fault()
122 if (!(vma->vm_flags & VM_WRITE)) in do_page_fault()
129 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) in do_page_fault()
139 fault = handle_mm_fault(mm, vma, address, flags); in do_page_fault()
/linux-4.1.27/drivers/misc/sgi-gru/
Dgrufile.c73 static void gru_vma_close(struct vm_area_struct *vma) in gru_vma_close() argument
79 if (!vma->vm_private_data) in gru_vma_close()
82 vdata = vma->vm_private_data; in gru_vma_close()
83 vma->vm_private_data = NULL; in gru_vma_close()
84 gru_dbg(grudev, "vma %p, file %p, vdata %p\n", vma, vma->vm_file, in gru_vma_close()
107 static int gru_file_mmap(struct file *file, struct vm_area_struct *vma) in gru_file_mmap() argument
109 if ((vma->vm_flags & (VM_SHARED | VM_WRITE)) != (VM_SHARED | VM_WRITE)) in gru_file_mmap()
112 if (vma->vm_start & (GRU_GSEG_PAGESIZE - 1) || in gru_file_mmap()
113 vma->vm_end & (GRU_GSEG_PAGESIZE - 1)) in gru_file_mmap()
116 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_LOCKED | in gru_file_mmap()
[all …]
Dgrufault.c63 struct vm_area_struct *vma; in gru_find_vma() local
65 vma = find_vma(current->mm, vaddr); in gru_find_vma()
66 if (vma && vma->vm_start <= vaddr && vma->vm_ops == &gru_vm_ops) in gru_find_vma()
67 return vma; in gru_find_vma()
82 struct vm_area_struct *vma; in gru_find_lock_gts() local
86 vma = gru_find_vma(vaddr); in gru_find_lock_gts()
87 if (vma) in gru_find_lock_gts()
88 gts = gru_find_thread_state(vma, TSID(vaddr, vma)); in gru_find_lock_gts()
99 struct vm_area_struct *vma; in gru_alloc_locked_gts() local
103 vma = gru_find_vma(vaddr); in gru_alloc_locked_gts()
[all …]
/linux-4.1.27/arch/arc/mm/
Dtlb.c274 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, in local_flush_tlb_range() argument
288 local_flush_tlb_mm(vma->vm_mm); in local_flush_tlb_range()
301 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { in local_flush_tlb_range()
303 tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu)); in local_flush_tlb_range()
348 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) in local_flush_tlb_page() argument
358 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { in local_flush_tlb_page()
359 tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu)); in local_flush_tlb_page()
406 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) in flush_tlb_page() argument
409 .ta_vma = vma, in flush_tlb_page()
413 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1); in flush_tlb_page()
[all …]
Dfault.c58 struct vm_area_struct *vma = NULL; in do_page_fault() local
96 vma = find_vma(mm, address); in do_page_fault()
97 if (!vma) in do_page_fault()
99 if (vma->vm_start <= address) in do_page_fault()
101 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_page_fault()
103 if (expand_stack(vma, address)) in do_page_fault()
120 if (!(vma->vm_flags & VM_WRITE)) in do_page_fault()
124 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) in do_page_fault()
133 fault = handle_mm_fault(mm, vma, address, flags); in do_page_fault()
/linux-4.1.27/arch/microblaze/kernel/
Ddma.c157 int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, in dma_direct_mmap_coherent() argument
162 unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; in dma_direct_mmap_coherent()
164 unsigned long off = vma->vm_pgoff; in dma_direct_mmap_coherent()
171 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in dma_direct_mmap_coherent()
176 return remap_pfn_range(vma, vma->vm_start, pfn + off, in dma_direct_mmap_coherent()
177 vma->vm_end - vma->vm_start, vma->vm_page_prot); in dma_direct_mmap_coherent()
/linux-4.1.27/fs/
Dexec.c190 ret = expand_downwards(bprm->vma, pos); in get_arg_page()
201 unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start; in get_arg_page()
246 flush_cache_page(bprm->vma, pos, page_to_pfn(page)); in flush_arg_page()
252 struct vm_area_struct *vma = NULL; in __bprm_mm_init() local
255 bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); in __bprm_mm_init()
256 if (!vma) in __bprm_mm_init()
260 vma->vm_mm = mm; in __bprm_mm_init()
269 vma->vm_end = STACK_TOP_MAX; in __bprm_mm_init()
270 vma->vm_start = vma->vm_end - PAGE_SIZE; in __bprm_mm_init()
271 vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP; in __bprm_mm_init()
[all …]
/linux-4.1.27/arch/cris/include/asm/
Dcacheflush.h13 #define flush_cache_range(vma, start, end) do { } while (0) argument
14 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) argument
20 #define flush_icache_page(vma,pg) do { } while (0) argument
21 #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) argument
25 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument
27 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument
/linux-4.1.27/arch/tile/mm/
Delf.c45 struct vm_area_struct *vma; in notify_exec() local
64 for (vma = current->mm->mmap; ; vma = vma->vm_next) { in notify_exec()
65 if (vma == NULL) { in notify_exec()
69 if (vma->vm_file == exe_file) in notify_exec()
78 if (vma->vm_start == (ELF_ET_DYN_BASE & PAGE_MASK)) { in notify_exec()
82 snprintf(buf, sizeof(buf), "0x%lx:@", vma->vm_start); in notify_exec()
/linux-4.1.27/arch/mips/kernel/
Dsmp.c350 struct vm_area_struct *vma; member
359 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); in flush_tlb_range_ipi()
362 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) in flush_tlb_range() argument
364 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range()
369 .vma = vma, in flush_tlb_range()
383 local_flush_tlb_range(vma, start, end); in flush_tlb_range()
408 local_flush_tlb_page(fd->vma, fd->addr1); in flush_tlb_page_ipi()
411 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) in flush_tlb_page() argument
414 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) { in flush_tlb_page()
416 .vma = vma, in flush_tlb_page()
[all …]
/linux-4.1.27/arch/mn10300/unit-asb2305/
Dpci-asb2305.c216 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, in pci_mmap_page_range() argument
224 vma->vm_flags |= VM_LOCKED; in pci_mmap_page_range()
226 prot = pgprot_val(vma->vm_page_prot); in pci_mmap_page_range()
228 vma->vm_page_prot = __pgprot(prot); in pci_mmap_page_range()
231 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, in pci_mmap_page_range()
232 vma->vm_end - vma->vm_start, in pci_mmap_page_range()
233 vma->vm_page_prot)) in pci_mmap_page_range()
/linux-4.1.27/arch/sh/kernel/
Dsmp.c376 struct vm_area_struct *vma; member
385 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); in flush_tlb_range_ipi()
388 void flush_tlb_range(struct vm_area_struct *vma, in flush_tlb_range() argument
391 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range()
397 fd.vma = vma; in flush_tlb_range()
407 local_flush_tlb_range(vma, start, end); in flush_tlb_range()
431 local_flush_tlb_page(fd->vma, fd->addr1); in flush_tlb_page_ipi()
434 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) in flush_tlb_page() argument
437 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || in flush_tlb_page()
438 (current->mm != vma->vm_mm)) { in flush_tlb_page()
[all …]
/linux-4.1.27/arch/nios2/include/asm/
Dtlb.h22 #define tlb_start_vma(tlb, vma) \ argument
25 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
28 #define tlb_end_vma(tlb, vma) do { } while (0) argument
Dcacheflush.h26 extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
28 extern void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
34 extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
39 extern void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
42 extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
/linux-4.1.27/drivers/vfio/platform/
Dvfio_platform_common.c418 struct vm_area_struct *vma) in vfio_platform_mmap_mmio() argument
422 req_len = vma->vm_end - vma->vm_start; in vfio_platform_mmap_mmio()
423 pgoff = vma->vm_pgoff & in vfio_platform_mmap_mmio()
430 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in vfio_platform_mmap_mmio()
431 vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff; in vfio_platform_mmap_mmio()
433 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, in vfio_platform_mmap_mmio()
434 req_len, vma->vm_page_prot); in vfio_platform_mmap_mmio()
437 static int vfio_platform_mmap(void *device_data, struct vm_area_struct *vma) in vfio_platform_mmap() argument
442 index = vma->vm_pgoff >> (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT); in vfio_platform_mmap()
444 if (vma->vm_end < vma->vm_start) in vfio_platform_mmap()
[all …]
/linux-4.1.27/arch/powerpc/oprofile/cell/
Dvma_map.c37 vma_map_lookup(struct vma_to_fileoffset_map *map, unsigned int vma, in vma_map_lookup() argument
47 u32 offset = 0x10000000 + vma; in vma_map_lookup()
51 if (vma < map->vma || vma >= map->vma + map->size) in vma_map_lookup()
60 offset = vma - map->vma + map->offset; in vma_map_lookup()
68 vma_map_add(struct vma_to_fileoffset_map *map, unsigned int vma, in vma_map_add() argument
82 new->vma = vma; in vma_map_add()
272 map = vma_map_add(map, ovly.vma, ovly.size, ovly.offset, in create_vma_map()
/linux-4.1.27/drivers/video/fbdev/core/
Dfb_defio.c40 static int fb_deferred_io_fault(struct vm_area_struct *vma, in fb_deferred_io_fault() argument
45 struct fb_info *info = vma->vm_private_data; in fb_deferred_io_fault()
57 if (vma->vm_file) in fb_deferred_io_fault()
58 page->mapping = vma->vm_file->f_mapping; in fb_deferred_io_fault()
94 static int fb_deferred_io_mkwrite(struct vm_area_struct *vma, in fb_deferred_io_mkwrite() argument
98 struct fb_info *info = vma->vm_private_data; in fb_deferred_io_mkwrite()
108 file_update_time(vma->vm_file); in fb_deferred_io_mkwrite()
167 static int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma) in fb_deferred_io_mmap() argument
169 vma->vm_ops = &fb_deferred_io_vm_ops; in fb_deferred_io_mmap()
170 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; in fb_deferred_io_mmap()
[all …]
/linux-4.1.27/drivers/uio/
Duio.c579 static int uio_find_mem_index(struct vm_area_struct *vma) in uio_find_mem_index() argument
581 struct uio_device *idev = vma->vm_private_data; in uio_find_mem_index()
583 if (vma->vm_pgoff < MAX_UIO_MAPS) { in uio_find_mem_index()
584 if (idev->info->mem[vma->vm_pgoff].size == 0) in uio_find_mem_index()
586 return (int)vma->vm_pgoff; in uio_find_mem_index()
591 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) in uio_vma_fault() argument
593 struct uio_device *idev = vma->vm_private_data; in uio_vma_fault()
598 int mi = uio_find_mem_index(vma); in uio_vma_fault()
622 static int uio_mmap_logical(struct vm_area_struct *vma) in uio_mmap_logical() argument
624 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; in uio_mmap_logical()
[all …]
/linux-4.1.27/drivers/staging/unisys/visorchipset/
Dfile.c61 visorchipset_mmap(struct file *file, struct vm_area_struct *vma) in visorchipset_mmap() argument
64 ulong offset = vma->vm_pgoff << PAGE_SHIFT; in visorchipset_mmap()
73 vma->vm_flags |= VM_IO; in visorchipset_mmap()
85 if (remap_pfn_range(vma, vma->vm_start, in visorchipset_mmap()
87 vma->vm_end - vma->vm_start, in visorchipset_mmap()
89 (vma->vm_page_prot))) { in visorchipset_mmap()
/linux-4.1.27/arch/microblaze/include/asm/
Dcacheflush.h64 #define flush_icache_user_range(vma, pg, adr, len) flush_icache(); argument
65 #define flush_icache_page(vma, pg) do { } while (0) argument
92 #define flush_cache_page(vma, vmaddr, pfn) \ argument
97 #define flush_cache_range(vma, start, len) { \
103 #define flush_cache_range(vma, start, len) do { } while (0) argument
105 static inline void copy_to_user_page(struct vm_area_struct *vma, in copy_to_user_page() argument
111 if (vma->vm_flags & VM_EXEC) { in copy_to_user_page()
117 static inline void copy_from_user_page(struct vm_area_struct *vma, in copy_from_user_page() argument
/linux-4.1.27/arch/microblaze/mm/
Dfault.c89 struct vm_area_struct *vma; in do_page_fault() local
149 vma = find_vma(mm, address); in do_page_fault()
150 if (unlikely(!vma)) in do_page_fault()
153 if (vma->vm_start <= address) in do_page_fault()
156 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) in do_page_fault()
171 if (unlikely(address + 0x100000 < vma->vm_end)) { in do_page_fault()
194 if (expand_stack(vma, address)) in do_page_fault()
202 if (unlikely(!(vma->vm_flags & VM_WRITE))) in do_page_fault()
210 if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC)))) in do_page_fault()
219 fault = handle_mm_fault(mm, vma, address, flags); in do_page_fault()
/linux-4.1.27/drivers/xen/xenbus/
Dxenbus_dev_backend.c93 static int xenbus_backend_mmap(struct file *file, struct vm_area_struct *vma) in xenbus_backend_mmap() argument
95 size_t size = vma->vm_end - vma->vm_start; in xenbus_backend_mmap()
100 if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0)) in xenbus_backend_mmap()
103 if (remap_pfn_range(vma, vma->vm_start, in xenbus_backend_mmap()
105 size, vma->vm_page_prot)) in xenbus_backend_mmap()
/linux-4.1.27/fs/kernfs/
Dfile.c328 static void kernfs_vma_open(struct vm_area_struct *vma) in kernfs_vma_open() argument
330 struct file *file = vma->vm_file; in kernfs_vma_open()
340 of->vm_ops->open(vma); in kernfs_vma_open()
345 static int kernfs_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) in kernfs_vma_fault() argument
347 struct file *file = vma->vm_file; in kernfs_vma_fault()
359 ret = of->vm_ops->fault(vma, vmf); in kernfs_vma_fault()
365 static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma, in kernfs_vma_page_mkwrite() argument
368 struct file *file = vma->vm_file; in kernfs_vma_page_mkwrite()
380 ret = of->vm_ops->page_mkwrite(vma, vmf); in kernfs_vma_page_mkwrite()
388 static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr, in kernfs_vma_access() argument
[all …]
/linux-4.1.27/arch/avr32/mm/
Dfault.c60 struct vm_area_struct *vma; in do_page_fault() local
94 vma = find_vma(mm, address); in do_page_fault()
95 if (!vma) in do_page_fault()
97 if (vma->vm_start <= address) in do_page_fault()
99 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_page_fault()
101 if (expand_stack(vma, address)) in do_page_fault()
114 if (!(vma->vm_flags & VM_EXEC)) in do_page_fault()
119 if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) in do_page_fault()
124 if (!(vma->vm_flags & VM_WRITE)) in do_page_fault()
137 fault = handle_mm_fault(mm, vma, address, flags); in do_page_fault()
/linux-4.1.27/arch/openrisc/mm/
Dfault.c54 struct vm_area_struct *vma; in do_page_fault() local
112 vma = find_vma(mm, address); in do_page_fault()
114 if (!vma) in do_page_fault()
117 if (vma->vm_start <= address) in do_page_fault()
120 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_page_fault()
133 if (expand_stack(vma, address)) in do_page_fault()
147 if (!(vma->vm_flags & VM_WRITE)) in do_page_fault()
152 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) in do_page_fault()
157 if ((vector == 0x400) && !(vma->vm_page_prot.pgprot & _PAGE_EXEC)) in do_page_fault()
166 fault = handle_mm_fault(mm, vma, address, flags); in do_page_fault()
/linux-4.1.27/arch/alpha/mm/
Dfault.c87 struct vm_area_struct * vma; in do_page_fault() local
121 vma = find_vma(mm, address); in do_page_fault()
122 if (!vma) in do_page_fault()
124 if (vma->vm_start <= address) in do_page_fault()
126 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_page_fault()
128 if (expand_stack(vma, address)) in do_page_fault()
136 if (!(vma->vm_flags & VM_EXEC)) in do_page_fault()
140 if (!(vma->vm_flags & (VM_READ | VM_WRITE))) in do_page_fault()
143 if (!(vma->vm_flags & VM_WRITE)) in do_page_fault()
151 fault = handle_mm_fault(mm, vma, address, flags); in do_page_fault()
/linux-4.1.27/drivers/gpu/drm/vgem/
Dvgem_drv.c89 static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) in vgem_gem_fault() argument
91 struct drm_vgem_gem_object *obj = vma->vm_private_data; in vgem_gem_fault()
98 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> in vgem_gem_fault()
108 ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, in vgem_gem_fault()
238 int vgem_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) in vgem_drm_gem_mmap() argument
250 vma->vm_pgoff, in vgem_drm_gem_mmap()
251 vma_pages(vma)); in vgem_drm_gem_mmap()
265 ret = dma_buf_mmap(obj->dma_buf, vma, 0); in vgem_drm_gem_mmap()
274 vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP; in vgem_drm_gem_mmap()
275 vma->vm_ops = obj->dev->driver->gem_vm_ops; in vgem_drm_gem_mmap()
[all …]
/linux-4.1.27/arch/um/drivers/
Dmmapper_kern.c48 static int mmapper_mmap(struct file *file, struct vm_area_struct *vma) in mmapper_mmap() argument
53 if (vma->vm_pgoff != 0) in mmapper_mmap()
56 size = vma->vm_end - vma->vm_start; in mmapper_mmap()
64 if (remap_pfn_range(vma, vma->vm_start, p_buf >> PAGE_SHIFT, size, in mmapper_mmap()
65 vma->vm_page_prot)) in mmapper_mmap()
/linux-4.1.27/arch/metag/kernel/
Ddma.c335 static int dma_mmap(struct device *dev, struct vm_area_struct *vma, in dma_mmap() argument
343 user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; in dma_mmap()
350 unsigned long off = vma->vm_pgoff; in dma_mmap()
356 ret = remap_pfn_range(vma, vma->vm_start, in dma_mmap()
359 vma->vm_page_prot); in dma_mmap()
367 int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, in dma_mmap_coherent() argument
370 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in dma_mmap_coherent()
371 return dma_mmap(dev, vma, cpu_addr, dma_addr, size); in dma_mmap_coherent()
375 int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, in dma_mmap_writecombine() argument
378 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); in dma_mmap_writecombine()
[all …]
/linux-4.1.27/arch/powerpc/kernel/
Dproc_powerpc.c44 static int page_map_mmap( struct file *file, struct vm_area_struct *vma ) in page_map_mmap() argument
46 if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) in page_map_mmap()
49 remap_pfn_range(vma, vma->vm_start, in page_map_mmap()
51 PAGE_SIZE, vma->vm_page_prot); in page_map_mmap()
/linux-4.1.27/arch/mips/mm/
Dtlb-r3k.c78 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, in local_flush_tlb_range() argument
81 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range()
157 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) in local_flush_tlb_page() argument
161 if (cpu_context(cpu, vma->vm_mm) != 0) { in local_flush_tlb_page()
166 printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page); in local_flush_tlb_page()
168 newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK; in local_flush_tlb_page()
188 void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) in __update_tlb() argument
196 if (current->active_mm != vma->vm_mm) in __update_tlb()
202 if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) { in __update_tlb()
204 (cpu_context(cpu, vma->vm_mm)), pid); in __update_tlb()
Dc-octeon.c65 static void octeon_flush_icache_all_cores(struct vm_area_struct *vma) in octeon_flush_icache_all_cores() argument
83 if (vma) in octeon_flush_icache_all_cores()
84 mask = *mm_cpumask(vma->vm_mm); in octeon_flush_icache_all_cores()
138 struct vm_area_struct *vma; in octeon_flush_cache_sigtramp() local
141 vma = find_vma(current->mm, addr); in octeon_flush_cache_sigtramp()
142 octeon_flush_icache_all_cores(vma); in octeon_flush_cache_sigtramp()
154 static void octeon_flush_cache_range(struct vm_area_struct *vma, in octeon_flush_cache_range() argument
157 if (vma->vm_flags & VM_EXEC) in octeon_flush_cache_range()
158 octeon_flush_icache_all_cores(vma); in octeon_flush_cache_range()
169 static void octeon_flush_cache_page(struct vm_area_struct *vma, in octeon_flush_cache_page() argument
[all …]
Dfault.c42 struct vm_area_struct * vma = NULL; in __do_page_fault() local
104 vma = find_vma(mm, address); in __do_page_fault()
105 if (!vma) in __do_page_fault()
107 if (vma->vm_start <= address) in __do_page_fault()
109 if (!(vma->vm_flags & VM_GROWSDOWN)) in __do_page_fault()
111 if (expand_stack(vma, address)) in __do_page_fault()
121 if (!(vma->vm_flags & VM_WRITE)) in __do_page_fault()
126 if (address == regs->cp0_epc && !(vma->vm_flags & VM_EXEC)) { in __do_page_fault()
136 if (!(vma->vm_flags & VM_READ)) { in __do_page_fault()
147 if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) in __do_page_fault()
[all …]
/linux-4.1.27/arch/score/mm/
Dfault.c46 struct vm_area_struct *vma = NULL; in do_page_fault() local
83 vma = find_vma(mm, address); in do_page_fault()
84 if (!vma) in do_page_fault()
86 if (vma->vm_start <= address) in do_page_fault()
88 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_page_fault()
90 if (expand_stack(vma, address)) in do_page_fault()
100 if (!(vma->vm_flags & VM_WRITE)) in do_page_fault()
104 if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) in do_page_fault()
113 fault = handle_mm_fault(mm, vma, address, flags); in do_page_fault()
/linux-4.1.27/arch/unicore32/include/asm/
Dcacheflush.h120 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument
140 extern void flush_cache_range(struct vm_area_struct *vma,
142 extern void flush_cache_page(struct vm_area_struct *vma,
152 #define flush_cache_user_range(vma, start, end) \ argument
187 #define flush_icache_user_range(vma, page, addr, len) \ argument
194 #define flush_icache_page(vma, page) do { } while (0) argument
Dtlbflush.h88 local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) in local_flush_tlb_page() argument
90 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { in local_flush_tlb_page()
170 #define local_flush_tlb_range(vma, start, end) \ argument
171 __cpu_flush_user_tlb_range(start, end, vma)
187 extern void update_mmu_cache(struct vm_area_struct *vma,
/linux-4.1.27/drivers/misc/cxl/
Dcontext.c103 static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) in cxl_mmap_fault() argument
105 struct cxl_context *ctx = vma->vm_file->private_data; in cxl_mmap_fault()
132 vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT); in cxl_mmap_fault()
146 int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma) in cxl_context_iomap() argument
148 u64 len = vma->vm_end - vma->vm_start; in cxl_context_iomap()
166 vma->vm_flags |= VM_IO | VM_PFNMAP; in cxl_context_iomap()
167 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in cxl_context_iomap()
168 vma->vm_ops = &cxl_mmap_vmops; in cxl_context_iomap()
/linux-4.1.27/arch/cris/mm/
Dfault.c58 struct vm_area_struct * vma; in do_page_fault() local
123 vma = find_vma(mm, address); in do_page_fault()
124 if (!vma) in do_page_fault()
126 if (vma->vm_start <= address) in do_page_fault()
128 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_page_fault()
140 if (expand_stack(vma, address)) in do_page_fault()
154 if (!(vma->vm_flags & VM_EXEC)) in do_page_fault()
157 if (!(vma->vm_flags & VM_WRITE)) in do_page_fault()
161 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) in do_page_fault()
171 fault = handle_mm_fault(mm, vma, address, flags); in do_page_fault()
/linux-4.1.27/arch/xtensa/mm/
Dfault.c39 struct vm_area_struct * vma; in do_page_fault() local
79 vma = find_vma(mm, address); in do_page_fault()
81 if (!vma) in do_page_fault()
83 if (vma->vm_start <= address) in do_page_fault()
85 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_page_fault()
87 if (expand_stack(vma, address)) in do_page_fault()
98 if (!(vma->vm_flags & VM_WRITE)) in do_page_fault()
102 if (!(vma->vm_flags & VM_EXEC)) in do_page_fault()
105 if (!(vma->vm_flags & (VM_READ | VM_WRITE))) in do_page_fault()
112 fault = handle_mm_fault(mm, vma, address, flags); in do_page_fault()
Dcache.c108 unsigned long vaddr, struct vm_area_struct *vma) in copy_user_highpage() argument
185 void local_flush_cache_range(struct vm_area_struct *vma, in local_flush_cache_range() argument
199 void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address, in local_flush_cache_page() argument
214 update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) in update_mmu_cache() argument
226 flush_tlb_page(vma, addr); in update_mmu_cache()
244 && (vma->vm_flags & VM_EXEC) != 0) { in update_mmu_cache()
261 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, in copy_to_user_page() argument
288 if ((vma->vm_flags & VM_EXEC) != 0) in copy_to_user_page()
291 } else if ((vma->vm_flags & VM_EXEC) != 0) { in copy_to_user_page()
297 extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page, in copy_from_user_page() argument
/linux-4.1.27/drivers/gpu/drm/rockchip/
Drockchip_drm_gem.c58 struct vm_area_struct *vma) in rockchip_drm_gem_object_mmap() argument
69 vma->vm_flags &= ~VM_PFNMAP; in rockchip_drm_gem_object_mmap()
71 ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, in rockchip_drm_gem_object_mmap()
74 drm_gem_vm_close(vma); in rockchip_drm_gem_object_mmap()
80 struct vm_area_struct *vma) in rockchip_gem_mmap_buf() argument
86 ret = drm_gem_mmap_obj(obj, obj->size, vma); in rockchip_gem_mmap_buf()
91 return rockchip_drm_gem_object_mmap(obj, vma); in rockchip_gem_mmap_buf()
95 int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma) in rockchip_gem_mmap() argument
100 ret = drm_gem_mmap(filp, vma); in rockchip_gem_mmap()
104 obj = vma->vm_private_data; in rockchip_gem_mmap()
[all …]
/linux-4.1.27/drivers/misc/mic/host/
Dmic_fops.c192 mic_mmap(struct file *f, struct vm_area_struct *vma) in mic_mmap() argument
195 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; in mic_mmap()
196 unsigned long pa, size = vma->vm_end - vma->vm_start, size_rem = size; in mic_mmap()
203 if (vma->vm_flags & VM_WRITE) in mic_mmap()
210 err = remap_pfn_range(vma, vma->vm_start + offset, in mic_mmap()
211 pa >> PAGE_SHIFT, size, vma->vm_page_prot); in mic_mmap()
217 pa, vma->vm_start + offset); in mic_mmap()
/linux-4.1.27/arch/x86/mm/
Dmpx.c31 struct vm_area_struct *vma; in mpx_mmap() local
64 vma = find_vma(mm, ret); in mpx_mmap()
65 if (!vma) { in mpx_mmap()
620 struct vm_area_struct *vma; in zap_bt_entries() local
628 vma = find_vma(mm, start); in zap_bt_entries()
629 if (!vma || vma->vm_start > start) in zap_bt_entries()
639 while (vma && vma->vm_start < end) { in zap_bt_entries()
646 if (!(vma->vm_flags & VM_MPX)) in zap_bt_entries()
649 len = min(vma->vm_end, end) - addr; in zap_bt_entries()
650 zap_page_range(vma, addr, len, NULL); in zap_bt_entries()
[all …]
Dpgtable.c409 int ptep_set_access_flags(struct vm_area_struct *vma, in ptep_set_access_flags() argument
417 pte_update_defer(vma->vm_mm, address, ptep); in ptep_set_access_flags()
424 int pmdp_set_access_flags(struct vm_area_struct *vma, in pmdp_set_access_flags() argument
434 pmd_update_defer(vma->vm_mm, address, pmdp); in pmdp_set_access_flags()
447 int ptep_test_and_clear_young(struct vm_area_struct *vma, in ptep_test_and_clear_young() argument
457 pte_update(vma->vm_mm, addr, ptep); in ptep_test_and_clear_young()
463 int pmdp_test_and_clear_young(struct vm_area_struct *vma, in pmdp_test_and_clear_young() argument
473 pmd_update(vma->vm_mm, addr, pmdp); in pmdp_test_and_clear_young()
479 int ptep_clear_flush_young(struct vm_area_struct *vma, in ptep_clear_flush_young() argument
495 return ptep_test_and_clear_young(vma, address, ptep); in ptep_clear_flush_young()
[all …]
Dhugetlbpage.c27 struct vm_area_struct *vma;
29 vma = find_vma(mm, addr);
30 if (!vma || !is_vm_hugetlb_page(vma))
130 struct vm_area_struct *vma; in hugetlb_get_unmapped_area() local
145 vma = find_vma(mm, addr); in hugetlb_get_unmapped_area()
147 (!vma || addr + len <= vma->vm_start)) in hugetlb_get_unmapped_area()
/linux-4.1.27/arch/powerpc/platforms/cell/spufs/
Dfile.c236 spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) in spufs_mem_mmap_fault() argument
238 struct spu_context *ctx = vma->vm_file->private_data; in spufs_mem_mmap_fault()
247 psize = get_slice_psize(vma->vm_mm, address); in spufs_mem_mmap_fault()
254 BUG_ON(vma->vm_start & 0xffff); in spufs_mem_mmap_fault()
270 vma->vm_page_prot = pgprot_cached(vma->vm_page_prot); in spufs_mem_mmap_fault()
273 vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot); in spufs_mem_mmap_fault()
276 vm_insert_pfn(vma, address, pfn); in spufs_mem_mmap_fault()
283 static int spufs_mem_mmap_access(struct vm_area_struct *vma, in spufs_mem_mmap_access() argument
287 struct spu_context *ctx = vma->vm_file->private_data; in spufs_mem_mmap_access()
288 unsigned long offset = address - vma->vm_start; in spufs_mem_mmap_access()
[all …]
/linux-4.1.27/arch/mn10300/mm/
Dfault.c121 struct vm_area_struct *vma; in do_page_fault() local
179 vma = find_vma(mm, address); in do_page_fault()
180 if (!vma) in do_page_fault()
182 if (vma->vm_start <= address) in do_page_fault()
184 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_page_fault()
197 vma->vm_start, vma->vm_end); in do_page_fault()
218 if (expand_stack(vma, address)) in do_page_fault()
236 if (!(vma->vm_flags & VM_WRITE)) in do_page_fault()
247 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) in do_page_fault()
257 fault = handle_mm_fault(mm, vma, address, flags); in do_page_fault()
/linux-4.1.27/arch/c6x/include/asm/
Dcacheflush.h29 #define flush_cache_page(vma, vmaddr, pfn) do {} while (0) argument
46 #define flush_icache_page(vma, page) \ argument
48 if ((vma)->vm_flags & PROT_EXEC) \
56 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument
62 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument
/linux-4.1.27/arch/x86/kernel/
Dsys_x86_64.c130 struct vm_area_struct *vma; in arch_get_unmapped_area() local
144 vma = find_vma(mm, addr); in arch_get_unmapped_area()
146 (!vma || addr + len <= vma->vm_start)) in arch_get_unmapped_area()
168 struct vm_area_struct *vma; in arch_get_unmapped_area_topdown() local
187 vma = find_vma(mm, addr); in arch_get_unmapped_area_topdown()
189 (!vma || addr + len <= vma->vm_start)) in arch_get_unmapped_area_topdown()

12345