| /linux-4.4.14/drivers/gpu/drm/ |
| D | drm_vm.c | 49 struct vm_area_struct *vma; member 53 static void drm_vm_open(struct vm_area_struct *vma); 54 static void drm_vm_close(struct vm_area_struct *vma); 57 struct vm_area_struct *vma) in drm_io_prot() argument 59 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); in drm_io_prot() 67 if (efi_range_is_wc(vma->vm_start, vma->vm_end - in drm_io_prot() 68 vma->vm_start)) in drm_io_prot() 78 static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma) in drm_dma_prot() argument 80 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); in drm_dma_prot() 99 static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) in drm_do_vm_fault() argument [all …]
|
| D | drm_gem_cma_helper.c | 313 struct vm_area_struct *vma) in drm_gem_cma_mmap_obj() argument 322 vma->vm_flags &= ~VM_PFNMAP; in drm_gem_cma_mmap_obj() 323 vma->vm_pgoff = 0; in drm_gem_cma_mmap_obj() 325 ret = dma_mmap_writecombine(cma_obj->base.dev->dev, vma, in drm_gem_cma_mmap_obj() 327 vma->vm_end - vma->vm_start); in drm_gem_cma_mmap_obj() 329 drm_gem_vm_close(vma); in drm_gem_cma_mmap_obj() 349 int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma) in drm_gem_cma_mmap() argument 355 ret = drm_gem_mmap(filp, vma); in drm_gem_cma_mmap() 359 gem_obj = vma->vm_private_data; in drm_gem_cma_mmap() 362 return drm_gem_cma_mmap_obj(cma_obj, vma); in drm_gem_cma_mmap() [all …]
|
| /linux-4.4.14/mm/ |
| D | mmap.c | 62 struct vm_area_struct *vma, struct vm_area_struct *prev, 99 void vma_set_page_prot(struct vm_area_struct *vma) in vma_set_page_prot() argument 101 unsigned long vm_flags = vma->vm_flags; in vma_set_page_prot() 103 vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); in vma_set_page_prot() 104 if (vma_wants_writenotify(vma)) { in vma_set_page_prot() 106 vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, in vma_set_page_prot() 238 static void __remove_shared_vm_struct(struct vm_area_struct *vma, in __remove_shared_vm_struct() argument 241 if (vma->vm_flags & VM_DENYWRITE) in __remove_shared_vm_struct() 243 if (vma->vm_flags & VM_SHARED) in __remove_shared_vm_struct() 247 vma_interval_tree_remove(vma, &mapping->i_mmap); in __remove_shared_vm_struct() [all …]
|
| D | mremap.c | 53 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, in alloc_new_pmd() argument 89 static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, in move_ptes() argument 96 struct mm_struct *mm = vma->vm_mm; in move_ptes() 119 if (vma->vm_file) { in move_ptes() 120 mapping = vma->vm_file->f_mapping; in move_ptes() 123 if (vma->anon_vma) { in move_ptes() 124 anon_vma = vma->anon_vma; in move_ptes() 163 unsigned long move_page_tables(struct vm_area_struct *vma, in move_page_tables() argument 175 flush_cache_range(vma, old_addr, old_end); in move_page_tables() 179 mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); in move_page_tables() [all …]
|
| D | nommu.c | 121 struct vm_area_struct *vma; in kobjsize() local 123 vma = find_vma(current->mm, (unsigned long)objp); in kobjsize() 124 if (vma) in kobjsize() 125 return vma->vm_end - vma->vm_start; in kobjsize() 140 struct vm_area_struct *vma; in __get_user_pages() local 153 vma = find_vma(mm, start); in __get_user_pages() 154 if (!vma) in __get_user_pages() 158 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) || in __get_user_pages() 159 !(vm_flags & vma->vm_flags)) in __get_user_pages() 168 vmas[i] = vma; in __get_user_pages() [all …]
|
| D | madvise.c | 46 static long madvise_behavior(struct vm_area_struct *vma, in madvise_behavior() argument 50 struct mm_struct *mm = vma->vm_mm; in madvise_behavior() 53 unsigned long new_flags = vma->vm_flags; in madvise_behavior() 69 if (vma->vm_flags & VM_IO) { in madvise_behavior() 87 error = ksm_madvise(vma, start, end, behavior, &new_flags); in madvise_behavior() 93 error = hugepage_madvise(vma, &new_flags, behavior); in madvise_behavior() 99 if (new_flags == vma->vm_flags) { in madvise_behavior() 100 *prev = vma; in madvise_behavior() 104 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); in madvise_behavior() 105 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, in madvise_behavior() [all …]
|
| D | mprotect.c | 41 static pte_t *lock_pte_protection(struct vm_area_struct *vma, pmd_t *pmd, in lock_pte_protection() argument 49 return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); in lock_pte_protection() 51 pmdl = pmd_lock(vma->vm_mm, pmd); in lock_pte_protection() 57 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); in lock_pte_protection() 62 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, in change_pte_range() argument 66 struct mm_struct *mm = vma->vm_mm; in change_pte_range() 71 pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl); in change_pte_range() 89 page = vm_normal_page(vma, addr, oldpte); in change_pte_range() 106 !(vma->vm_flags & VM_SOFTDIRTY))) { in change_pte_range() 136 static inline unsigned long change_pmd_range(struct vm_area_struct *vma, in change_pmd_range() argument [all …]
|
| D | memory.c | 529 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, in free_pgtables() argument 532 while (vma) { in free_pgtables() 533 struct vm_area_struct *next = vma->vm_next; in free_pgtables() 534 unsigned long addr = vma->vm_start; in free_pgtables() 540 unlink_anon_vmas(vma); in free_pgtables() 541 unlink_file_vma(vma); in free_pgtables() 543 if (is_vm_hugetlb_page(vma)) { in free_pgtables() 544 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables() 550 while (next && next->vm_start <= vma->vm_end + PMD_SIZE in free_pgtables() 552 vma = next; in free_pgtables() [all …]
|
| D | pgtable-generic.c | 47 int ptep_set_access_flags(struct vm_area_struct *vma, in ptep_set_access_flags() argument 53 set_pte_at(vma->vm_mm, address, ptep, entry); in ptep_set_access_flags() 54 flush_tlb_fix_spurious_fault(vma, address); in ptep_set_access_flags() 61 int ptep_clear_flush_young(struct vm_area_struct *vma, in ptep_clear_flush_young() argument 65 young = ptep_test_and_clear_young(vma, address, ptep); in ptep_clear_flush_young() 67 flush_tlb_page(vma, address); in ptep_clear_flush_young() 73 pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, in ptep_clear_flush() argument 76 struct mm_struct *mm = (vma)->vm_mm; in ptep_clear_flush() 80 flush_tlb_page(vma, address); in ptep_clear_flush() 98 #define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) argument [all …]
|
| D | rmap.c | 132 static void anon_vma_chain_link(struct vm_area_struct *vma, in anon_vma_chain_link() argument 136 avc->vma = vma; in anon_vma_chain_link() 138 list_add(&avc->same_vma, &vma->anon_vma_chain); in anon_vma_chain_link() 169 int anon_vma_prepare(struct vm_area_struct *vma) in anon_vma_prepare() argument 171 struct anon_vma *anon_vma = vma->anon_vma; in anon_vma_prepare() 176 struct mm_struct *mm = vma->vm_mm; in anon_vma_prepare() 183 anon_vma = find_mergeable_anon_vma(vma); in anon_vma_prepare() 195 if (likely(!vma->anon_vma)) { in anon_vma_prepare() 196 vma->anon_vma = anon_vma; in anon_vma_prepare() 197 anon_vma_chain_link(vma, avc, anon_vma); in anon_vma_prepare() [all …]
|
| D | pagewalk.c | 38 if (pmd_none(*pmd) || !walk->vma) { in walk_pmd_range() 135 struct vm_area_struct *vma = walk->vma; in walk_hugetlb_range() local 136 struct hstate *h = hstate_vma(vma); in walk_hugetlb_range() 172 struct vm_area_struct *vma = walk->vma; in walk_page_test() local 185 if (vma->vm_flags & VM_PFNMAP) { in walk_page_test() 198 struct vm_area_struct *vma = walk->vma; in __walk_page_range() local 200 if (vma && is_vm_hugetlb_page(vma)) { in __walk_page_range() 244 struct vm_area_struct *vma; in walk_page_range() local 254 vma = find_vma(walk->mm, start); in walk_page_range() 256 if (!vma) { /* after the last vma */ in walk_page_range() [all …]
|
| D | mlock.c | 360 struct vm_area_struct *vma, int zoneid, unsigned long start, in __munlock_pagevec_fill() argument 371 pte = get_locked_pte(vma->vm_mm, start, &ptl); in __munlock_pagevec_fill() 383 page = vm_normal_page(vma, start, *pte); in __munlock_pagevec_fill() 422 void munlock_vma_pages_range(struct vm_area_struct *vma, in munlock_vma_pages_range() argument 425 vma->vm_flags &= VM_LOCKED_CLEAR_MASK; in munlock_vma_pages_range() 443 page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP, in munlock_vma_pages_range() 474 start = __munlock_pagevec_fill(&pvec, vma, in munlock_vma_pages_range() 498 static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, in mlock_fixup() argument 501 struct mm_struct *mm = vma->vm_mm; in mlock_fixup() 507 if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) || in mlock_fixup() [all …]
|
| D | huge_memory.c | 699 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) in maybe_pmd_mkwrite() argument 701 if (likely(vma->vm_flags & VM_WRITE)) in maybe_pmd_mkwrite() 715 struct vm_area_struct *vma, in __do_huge_pmd_anonymous_page() argument 758 if (userfaultfd_missing(vma)) { in __do_huge_pmd_anonymous_page() 765 ret = handle_userfault(vma, address, flags, in __do_huge_pmd_anonymous_page() 771 entry = mk_huge_pmd(page, vma->vm_page_prot); in __do_huge_pmd_anonymous_page() 772 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); in __do_huge_pmd_anonymous_page() 773 page_add_new_anon_rmap(page, vma, haddr); in __do_huge_pmd_anonymous_page() 775 lru_cache_add_active_or_unevictable(page, vma); in __do_huge_pmd_anonymous_page() 794 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, in set_huge_zero_page() argument [all …]
|
| D | gup.c | 21 static struct page *no_page_table(struct vm_area_struct *vma, in no_page_table() argument 32 if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault)) in no_page_table() 37 static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, in follow_pfn_pte() argument 52 set_pte_at(vma->vm_mm, address, pte, entry); in follow_pfn_pte() 53 update_mmu_cache(vma, address, pte); in follow_pfn_pte() 61 static struct page *follow_page_pte(struct vm_area_struct *vma, in follow_page_pte() argument 64 struct mm_struct *mm = vma->vm_mm; in follow_page_pte() 71 return no_page_table(vma, flags); in follow_page_pte() 100 page = vm_normal_page(vma, address, pte); in follow_page_pte() 113 ret = follow_pfn_pte(vma, address, ptep, flags); in follow_page_pte() [all …]
|
| D | msync.c | 35 struct vm_area_struct *vma; in SYSCALL_DEFINE3() local 58 vma = find_vma(mm, start); in SYSCALL_DEFINE3() 65 if (!vma) in SYSCALL_DEFINE3() 68 if (start < vma->vm_start) { in SYSCALL_DEFINE3() 69 start = vma->vm_start; in SYSCALL_DEFINE3() 76 (vma->vm_flags & VM_LOCKED)) { in SYSCALL_DEFINE3() 80 file = vma->vm_file; in SYSCALL_DEFINE3() 81 fstart = (start - vma->vm_start) + in SYSCALL_DEFINE3() 82 ((loff_t)vma->vm_pgoff << PAGE_SHIFT); in SYSCALL_DEFINE3() 83 fend = fstart + (min(end, vma->vm_end) - start) - 1; in SYSCALL_DEFINE3() [all …]
|
| D | hugetlb.c | 211 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) in subpool_vma() argument 213 return subpool_inode(file_inode(vma->vm_file)); in subpool_vma() 617 struct vm_area_struct *vma, unsigned long address) in vma_hugecache_offset() argument 619 return ((address - vma->vm_start) >> huge_page_shift(h)) + in vma_hugecache_offset() 620 (vma->vm_pgoff >> huge_page_order(h)); in vma_hugecache_offset() 623 pgoff_t linear_hugepage_index(struct vm_area_struct *vma, in linear_hugepage_index() argument 626 return vma_hugecache_offset(hstate_vma(vma), vma, address); in linear_hugepage_index() 633 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) in vma_kernel_pagesize() argument 637 if (!is_vm_hugetlb_page(vma)) in vma_kernel_pagesize() 640 hstate = hstate_vma(vma); in vma_kernel_pagesize() [all …]
|
| D | mempolicy.c | 445 struct vm_area_struct *vma; in mpol_rebind_mm() local 448 for (vma = mm->mmap; vma; vma = vma->vm_next) in mpol_rebind_mm() 449 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE); in mpol_rebind_mm() 488 struct vm_area_struct *vma = walk->vma; in queue_pages_pte_range() local 496 split_huge_page_pmd(vma, addr, pmd); in queue_pages_pte_range() 504 page = vm_normal_page(vma, addr, *pte); in queue_pages_pte_range() 537 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); in queue_pages_hugetlb() 567 unsigned long change_prot_numa(struct vm_area_struct *vma, in change_prot_numa() argument 572 nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1); in change_prot_numa() 579 static unsigned long change_prot_numa(struct vm_area_struct *vma, in change_prot_numa() argument [all …]
|
| D | vmacache.c | 96 struct vm_area_struct *vma = current->vmacache[i]; in vmacache_find() local 98 if (!vma) in vmacache_find() 100 if (WARN_ON_ONCE(vma->vm_mm != mm)) in vmacache_find() 102 if (vma->vm_start <= addr && vma->vm_end > addr) { in vmacache_find() 104 return vma; in vmacache_find() 124 struct vm_area_struct *vma = current->vmacache[i]; in vmacache_find_exact() local 126 if (vma && vma->vm_start == start && vma->vm_end == end) { in vmacache_find_exact() 128 return vma; in vmacache_find_exact()
|
| D | mincore.c | 85 struct vm_area_struct *vma, unsigned char *vec) in __mincore_unmapped_range() argument 90 if (vma->vm_file) { in __mincore_unmapped_range() 93 pgoff = linear_page_index(vma, addr); in __mincore_unmapped_range() 95 vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff); in __mincore_unmapped_range() 107 walk->vma, walk->private); in mincore_unmapped_range() 115 struct vm_area_struct *vma = walk->vma; in mincore_pte_range() local 120 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { in mincore_pte_range() 127 __mincore_unmapped_range(addr, end, vma, vec); in mincore_pte_range() 137 vma, vec); in mincore_pte_range() 175 struct vm_area_struct *vma; in do_mincore() local [all …]
|
| D | ksm.c | 363 static int break_ksm(struct vm_area_struct *vma, unsigned long addr) in break_ksm() argument 370 page = follow_page(vma, addr, FOLL_GET | FOLL_MIGRATION); in break_ksm() 374 ret = handle_mm_fault(vma->vm_mm, vma, addr, in break_ksm() 414 struct vm_area_struct *vma; in find_mergeable_vma() local 417 vma = find_vma(mm, addr); in find_mergeable_vma() 418 if (!vma || vma->vm_start > addr) in find_mergeable_vma() 420 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) in find_mergeable_vma() 422 return vma; in find_mergeable_vma() 429 struct vm_area_struct *vma; in break_cow() local 438 vma = find_mergeable_vma(mm, addr); in break_cow() [all …]
|
| D | debug.c | 154 void dump_vma(const struct vm_area_struct *vma) in dump_vma() argument 160 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next, in dump_vma() 161 vma->vm_prev, vma->vm_mm, in dump_vma() 162 (unsigned long)pgprot_val(vma->vm_page_prot), in dump_vma() 163 vma->anon_vma, vma->vm_ops, vma->vm_pgoff, in dump_vma() 164 vma->vm_file, vma->vm_private_data); in dump_vma() 165 dump_flags(vma->vm_flags, vmaflags_names, ARRAY_SIZE(vmaflags_names)); in dump_vma()
|
| D | frame_vector.c | 40 struct vm_area_struct *vma; in get_vaddr_frames() local 53 vma = find_vma_intersection(mm, start, start + 1); in get_vaddr_frames() 54 if (!vma) { in get_vaddr_frames() 58 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) { in get_vaddr_frames() 71 while (ret < nr_frames && start + PAGE_SIZE <= vma->vm_end) { in get_vaddr_frames() 72 err = follow_pfn(vma, start, &nums[ret]); in get_vaddr_frames() 85 if (ret >= nr_frames || start < vma->vm_end) in get_vaddr_frames() 87 vma = find_vma_intersection(mm, start, start + 1); in get_vaddr_frames() 88 } while (vma && vma->vm_flags & (VM_IO | VM_PFNMAP)); in get_vaddr_frames()
|
| /linux-4.4.14/drivers/staging/rdma/ehca/ |
| D | ehca_uverbs.c | 71 static void ehca_mm_open(struct vm_area_struct *vma) in ehca_mm_open() argument 73 u32 *count = (u32 *)vma->vm_private_data; in ehca_mm_open() 76 vma->vm_start, vma->vm_end); in ehca_mm_open() 82 vma->vm_start, vma->vm_end); in ehca_mm_open() 84 vma->vm_start, vma->vm_end, *count); in ehca_mm_open() 87 static void ehca_mm_close(struct vm_area_struct *vma) in ehca_mm_close() argument 89 u32 *count = (u32 *)vma->vm_private_data; in ehca_mm_close() 92 vma->vm_start, vma->vm_end); in ehca_mm_close() 97 vma->vm_start, vma->vm_end, *count); in ehca_mm_close() 105 static int ehca_mmap_fw(struct vm_area_struct *vma, struct h_galpas *galpas, in ehca_mmap_fw() argument [all …]
|
| /linux-4.4.14/drivers/gpu/drm/i915/ |
| D | i915_gem_evict.c | 37 mark_free(struct i915_vma *vma, struct list_head *unwind) in mark_free() argument 39 if (vma->pin_count) in mark_free() 42 if (WARN_ON(!list_empty(&vma->exec_list))) in mark_free() 45 list_add(&vma->exec_list, unwind); in mark_free() 46 return drm_mm_scan_add_block(&vma->node); in mark_free() 80 struct i915_vma *vma; in i915_gem_evict_something() local 119 list_for_each_entry(vma, &vm->inactive_list, mm_list) { in i915_gem_evict_something() 120 if (mark_free(vma, &unwind_list)) in i915_gem_evict_something() 128 list_for_each_entry(vma, &vm->active_list, mm_list) { in i915_gem_evict_something() 129 if (mark_free(vma, &unwind_list)) in i915_gem_evict_something() [all …]
|
| D | i915_gem_execbuffer.c | 132 struct i915_vma *vma; in eb_lookup_vmas() local 146 vma = i915_gem_obj_lookup_or_create_vma(obj, vm); in eb_lookup_vmas() 147 if (IS_ERR(vma)) { in eb_lookup_vmas() 149 ret = PTR_ERR(vma); in eb_lookup_vmas() 154 list_add_tail(&vma->exec_list, &eb->vmas); in eb_lookup_vmas() 157 vma->exec_entry = &exec[i]; in eb_lookup_vmas() 159 eb->lut[i] = vma; in eb_lookup_vmas() 162 vma->exec_handle = handle; in eb_lookup_vmas() 163 hlist_add_head(&vma->exec_node, in eb_lookup_vmas() 200 struct i915_vma *vma; in eb_get_vma() local [all …]
|
| D | i915_gem.c | 136 struct i915_vma *vma; in i915_gem_get_aperture_ioctl() local 141 list_for_each_entry(vma, &ggtt->base.active_list, mm_list) in i915_gem_get_aperture_ioctl() 142 if (vma->pin_count) in i915_gem_get_aperture_ioctl() 143 pinned += vma->node.size; in i915_gem_get_aperture_ioctl() 144 list_for_each_entry(vma, &ggtt->base.inactive_list, mm_list) in i915_gem_get_aperture_ioctl() 145 if (vma->pin_count) in i915_gem_get_aperture_ioctl() 146 pinned += vma->node.size; in i915_gem_get_aperture_ioctl() 271 struct i915_vma *vma, *next; in drop_pages() local 275 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) in drop_pages() 276 if (i915_vma_unbind(vma)) in drop_pages() [all …]
|
| D | i915_gem_gtt.c | 96 i915_get_ggtt_vma_pages(struct i915_vma *vma); 149 static int ppgtt_bind_vma(struct i915_vma *vma, in ppgtt_bind_vma() argument 156 if (vma->obj->gt_ro) in ppgtt_bind_vma() 159 vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start, in ppgtt_bind_vma() 165 static void ppgtt_unbind_vma(struct i915_vma *vma) in ppgtt_unbind_vma() argument 167 vma->vm->clear_range(vma->vm, in ppgtt_unbind_vma() 168 vma->node.start, in ppgtt_unbind_vma() 169 vma->obj->base.size, in ppgtt_unbind_vma() 2501 static int ggtt_bind_vma(struct i915_vma *vma, in ggtt_bind_vma() argument 2505 struct drm_i915_gem_object *obj = vma->obj; in ggtt_bind_vma() [all …]
|
| /linux-4.4.14/drivers/xen/ |
| D | privcmd.c | 47 struct vm_area_struct *vma, 198 struct vm_area_struct *vma; member 206 struct vm_area_struct *vma = st->vma; in mmap_gfn_range() local 216 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end)) in mmap_gfn_range() 219 rc = xen_remap_domain_gfn_range(vma, in mmap_gfn_range() 222 vma->vm_page_prot, in mmap_gfn_range() 236 struct vm_area_struct *vma; in privcmd_ioctl_mmap() local 262 vma = find_vma(mm, msg->va); in privcmd_ioctl_mmap() 265 if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data) in privcmd_ioctl_mmap() 267 vma->vm_private_data = PRIV_VMA_LOCKED; in privcmd_ioctl_mmap() [all …]
|
| D | gntdev.c | 84 struct vm_area_struct *vma; member 241 unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT; in find_grant_ptes() 397 static void gntdev_vma_open(struct vm_area_struct *vma) in gntdev_vma_open() argument 399 struct grant_map *map = vma->vm_private_data; in gntdev_vma_open() 401 pr_debug("gntdev_vma_open %p\n", vma); in gntdev_vma_open() 405 static void gntdev_vma_close(struct vm_area_struct *vma) in gntdev_vma_close() argument 407 struct grant_map *map = vma->vm_private_data; in gntdev_vma_close() 408 struct file *file = vma->vm_file; in gntdev_vma_close() 411 pr_debug("gntdev_vma_close %p\n", vma); in gntdev_vma_close() 421 map->vma = NULL; in gntdev_vma_close() [all …]
|
| /linux-4.4.14/fs/proc/ |
| D | task_mmu.c | 129 m_next_vma(struct proc_maps_private *priv, struct vm_area_struct *vma) in m_next_vma() argument 131 if (vma == priv->tail_vma) in m_next_vma() 133 return vma->vm_next ?: priv->tail_vma; in m_next_vma() 136 static void m_cache_vma(struct seq_file *m, struct vm_area_struct *vma) in m_cache_vma() argument 139 m->version = m_next_vma(m->private, vma) ? vma->vm_start : -1UL; in m_cache_vma() 147 struct vm_area_struct *vma; in m_start() local 167 vma = find_vma(mm, last_addr); in m_start() 168 if (vma && (vma = m_next_vma(priv, vma))) in m_start() 169 return vma; in m_start() 174 for (vma = mm->mmap; pos; pos--) { in m_start() [all …]
|
| D | task_nommu.c | 20 struct vm_area_struct *vma; in task_mem() local 27 vma = rb_entry(p, struct vm_area_struct, vm_rb); in task_mem() 29 bytes += kobjsize(vma); in task_mem() 31 region = vma->vm_region; in task_mem() 36 size = vma->vm_end - vma->vm_start; in task_mem() 40 vma->vm_flags & VM_MAYSHARE) { in task_mem() 45 slack = region->vm_end - vma->vm_end; in task_mem() 82 struct vm_area_struct *vma; in task_vsize() local 88 vma = rb_entry(p, struct vm_area_struct, vm_rb); in task_vsize() 89 vsize += vma->vm_end - vma->vm_start; in task_vsize() [all …]
|
| D | vmcore.c | 160 int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma, in remap_oldmem_pfn_range() argument 164 return remap_pfn_range(vma, from, pfn, size, prot); in remap_oldmem_pfn_range() 266 static int mmap_vmcore_fault(struct vm_area_struct *vma, struct vm_fault *vmf) in mmap_vmcore_fault() argument 269 struct address_space *mapping = vma->vm_file->f_mapping; in mmap_vmcore_fault() 343 static int remap_oldmem_pfn_checked(struct vm_area_struct *vma, in remap_oldmem_pfn_checked() argument 365 if (remap_oldmem_pfn_range(vma, from + len, in remap_oldmem_pfn_checked() 372 if (remap_oldmem_pfn_range(vma, from + len, in remap_oldmem_pfn_checked() 383 if (remap_oldmem_pfn_range(vma, from + len, pos_start, in remap_oldmem_pfn_checked() 389 do_munmap(vma->vm_mm, from, len); in remap_oldmem_pfn_checked() 393 static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma, in vmcore_remap_oldmem_pfn() argument [all …]
|
| /linux-4.4.14/drivers/staging/lustre/lustre/llite/ |
| D | llite_mmap.c | 57 struct vm_area_struct *vma, unsigned long addr, in policy_from_vma() argument 60 policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) + in policy_from_vma() 61 (vma->vm_pgoff << PAGE_CACHE_SHIFT); in policy_from_vma() 69 struct vm_area_struct *vma, *ret = NULL; in our_vma() local 74 for (vma = find_vma(mm, addr); in our_vma() 75 vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) { in our_vma() 76 if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops && in our_vma() 77 vma->vm_flags & VM_SHARED) { in our_vma() 78 ret = vma; in our_vma() 98 ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret, in ll_fault_io_init() argument [all …]
|
| /linux-4.4.14/drivers/gpu/drm/ttm/ |
| D | ttm_bo_vm.c | 45 struct vm_area_struct *vma, in ttm_bo_vm_fault_idle() argument 69 up_read(&vma->vm_mm->mmap_sem); in ttm_bo_vm_fault_idle() 86 static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) in ttm_bo_vm_fault() argument 89 vma->vm_private_data; in ttm_bo_vm_fault() 117 up_read(&vma->vm_mm->mmap_sem); in ttm_bo_vm_fault() 160 ret = ttm_bo_vm_fault_idle(bo, vma, vmf); in ttm_bo_vm_fault() 177 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + in ttm_bo_vm_fault() 178 vma->vm_pgoff - drm_vma_node_start(&bo->vma_node); in ttm_bo_vm_fault() 179 page_last = vma_pages(vma) + vma->vm_pgoff - in ttm_bo_vm_fault() 192 cvma = *vma; in ttm_bo_vm_fault() [all …]
|
| /linux-4.4.14/arch/m32r/include/asm/ |
| D | cacheflush.h | 13 #define flush_cache_range(vma, start, end) do { } while (0) argument 14 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) argument 21 #define flush_icache_page(vma,pg) _flush_cache_copyback_all() argument 22 #define flush_icache_user_range(vma,pg,adr,len) _flush_cache_copyback_all() argument 27 #define flush_icache_page(vma,pg) smp_flush_cache_all() argument 28 #define flush_icache_user_range(vma,pg,adr,len) smp_flush_cache_all() argument 35 #define flush_cache_range(vma, start, end) do { } while (0) argument 36 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) argument 42 #define flush_icache_page(vma,pg) _flush_cache_all() argument 43 #define flush_icache_user_range(vma,pg,adr,len) _flush_cache_all() argument [all …]
|
| D | tlbflush.h | 27 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) argument 28 #define flush_tlb_range(vma, start, end) \ argument 29 local_flush_tlb_range(vma, start, end) 34 #define flush_tlb_page(vma, vmaddr) do { } while (0) argument 35 #define flush_tlb_range(vma, start, end) do { } while (0) argument 46 #define flush_tlb_page(vma, page) smp_flush_tlb_page(vma, page) argument 47 #define flush_tlb_range(vma, start, end) \ argument 48 smp_flush_tlb_range(vma, start, end)
|
| /linux-4.4.14/fs/ |
| D | userfaultfd.c | 260 int handle_userfault(struct vm_area_struct *vma, unsigned long address, in handle_userfault() argument 263 struct mm_struct *mm = vma->vm_mm; in handle_userfault() 272 ctx = vma->vm_userfaultfd_ctx.ctx; in handle_userfault() 430 struct vm_area_struct *vma, *prev; in userfaultfd_release() local 447 for (vma = mm->mmap; vma; vma = vma->vm_next) { in userfaultfd_release() 449 BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^ in userfaultfd_release() 450 !!(vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP))); in userfaultfd_release() 451 if (vma->vm_userfaultfd_ctx.ctx != ctx) { in userfaultfd_release() 452 prev = vma; in userfaultfd_release() 455 new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP); in userfaultfd_release() [all …]
|
| D | exec.c | 196 ret = expand_downwards(bprm->vma, pos); in get_arg_page() 207 unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start; in get_arg_page() 252 flush_cache_page(bprm->vma, pos, page_to_pfn(page)); in flush_arg_page() 258 struct vm_area_struct *vma = NULL; in __bprm_mm_init() local 261 bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); in __bprm_mm_init() 262 if (!vma) in __bprm_mm_init() 266 vma->vm_mm = mm; in __bprm_mm_init() 275 vma->vm_end = STACK_TOP_MAX; in __bprm_mm_init() 276 vma->vm_start = vma->vm_end - PAGE_SIZE; in __bprm_mm_init() 277 vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP; in __bprm_mm_init() [all …]
|
| D | dax.c | 293 struct vm_area_struct *vma, struct vm_fault *vmf) in dax_insert_mapping() argument 331 error = vm_insert_mixed(vma, vaddr, pfn); in dax_insert_mapping() 355 int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, in __dax_fault() argument 358 struct file *file = vma->vm_file; in __dax_fault() 381 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { in __dax_fault() 411 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); in __dax_fault() 467 error = dax_insert_mapping(inode, &bh, vma, vmf); in __dax_fault() 501 int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, in dax_fault() argument 505 struct super_block *sb = file_inode(vma->vm_file)->i_sb; in dax_fault() 509 file_update_time(vma->vm_file); in dax_fault() [all …]
|
| D | binfmt_elf_fdpic.c | 1217 static int maydump(struct vm_area_struct *vma, unsigned long mm_flags) in maydump() argument 1222 if (vma->vm_flags & VM_IO) { in maydump() 1223 kdcore("%08lx: %08lx: no (IO)", vma->vm_start, vma->vm_flags); in maydump() 1230 if (!(vma->vm_flags & VM_READ)) { in maydump() 1231 kdcore("%08lx: %08lx: no (!read)", vma->vm_start, vma->vm_flags); in maydump() 1236 if (vma_is_dax(vma)) { in maydump() 1237 if (vma->vm_flags & VM_SHARED) { in maydump() 1239 kdcore("%08lx: %08lx: %s (DAX shared)", vma->vm_start, in maydump() 1240 vma->vm_flags, dump_ok ? "yes" : "no"); in maydump() 1243 kdcore("%08lx: %08lx: %s (DAX private)", vma->vm_start, in maydump() [all …]
|
| /linux-4.4.14/arch/arc/include/asm/ |
| D | tlbflush.h | 16 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); 18 void local_flush_tlb_range(struct vm_area_struct *vma, 20 void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, 24 #define flush_tlb_range(vma, s, e) local_flush_tlb_range(vma, s, e) argument 25 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) argument 29 #define flush_pmd_tlb_range(vma, s, e) local_flush_pmd_tlb_range(vma, s, e) argument 31 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 33 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page); 37 extern void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
|
| D | tlb.h | 27 #define tlb_start_vma(tlb, vma) argument 29 #define tlb_start_vma(tlb, vma) \ argument 32 flush_cache_range(vma, vma->vm_start, vma->vm_end); \ 36 #define tlb_end_vma(tlb, vma) \ argument 39 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
|
| D | cacheflush.h | 30 #define flush_icache_page(vma, page) argument 60 #define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */ argument 66 void flush_cache_range(struct vm_area_struct *vma, 68 void flush_cache_page(struct vm_area_struct *vma, 76 void flush_anon_page(struct vm_area_struct *vma, 108 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument 111 if (vma->vm_flags & VM_EXEC) \ 115 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument
|
| /linux-4.4.14/arch/metag/mm/ |
| D | hugetlbpage.c | 35 struct vm_area_struct *vma; in prepare_hugepage_range() local 44 vma = find_vma(mm, ALIGN_HUGEPT(addr)); in prepare_hugepage_range() 45 if (vma && !(vma->vm_flags & MAP_HUGETLB)) in prepare_hugepage_range() 48 vma = find_vma(mm, addr); in prepare_hugepage_range() 49 if (vma) { in prepare_hugepage_range() 50 if (addr + len > vma->vm_start) in prepare_hugepage_range() 52 if (!(vma->vm_flags & MAP_HUGETLB) && in prepare_hugepage_range() 53 (ALIGN_HUGEPT(addr + len) > vma->vm_start)) in prepare_hugepage_range() 126 struct vm_area_struct *vma; in hugetlb_get_unmapped_area_existing() local 140 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { in hugetlb_get_unmapped_area_existing() [all …]
|
| D | fault.c | 53 struct vm_area_struct *vma, *prev_vma; in do_page_fault() local 116 vma = find_vma_prev(mm, address, &prev_vma); in do_page_fault() 118 if (!vma || address < vma->vm_start) in do_page_fault() 123 if (!(vma->vm_flags & VM_WRITE)) in do_page_fault() 127 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) in do_page_fault() 136 fault = handle_mm_fault(mm, vma, address, flags); in do_page_fault() 173 vma = prev_vma; in do_page_fault() 174 if (vma && (expand_stack(vma, address) == 0)) in do_page_fault()
|
| /linux-4.4.14/include/linux/ |
| D | huge_mm.h | 5 struct vm_area_struct *vma, 10 struct vm_area_struct *vma); 12 struct vm_area_struct *vma, 15 extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, 18 extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, 23 struct vm_area_struct *vma, 25 extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 28 extern int move_huge_pmd(struct vm_area_struct *vma, 33 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 70 extern bool is_vma_temporary_stack(struct vm_area_struct *vma); [all …]
|
| D | userfaultfd_k.h | 30 extern int handle_userfault(struct vm_area_struct *vma, unsigned long address, 40 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, in is_mergeable_vm_userfaultfd_ctx() argument 43 return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx; in is_mergeable_vm_userfaultfd_ctx() 46 static inline bool userfaultfd_missing(struct vm_area_struct *vma) in userfaultfd_missing() argument 48 return vma->vm_flags & VM_UFFD_MISSING; in userfaultfd_missing() 51 static inline bool userfaultfd_armed(struct vm_area_struct *vma) in userfaultfd_armed() argument 53 return vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP); in userfaultfd_armed() 59 static inline int handle_userfault(struct vm_area_struct *vma, in handle_userfault() argument 67 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, in is_mergeable_vm_userfaultfd_ctx() argument 73 static inline bool userfaultfd_missing(struct vm_area_struct *vma) in userfaultfd_missing() argument [all …]
|
| D | mm.h | 256 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); 259 void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf); 263 int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf); 266 int (*pfn_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf); 271 int (*access)(struct vm_area_struct *vma, unsigned long addr, 277 const char *(*name)(struct vm_area_struct *vma); 287 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); 299 struct mempolicy *(*get_policy)(struct vm_area_struct *vma, 307 struct page *(*find_special_page)(struct vm_area_struct *vma, 572 static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) in maybe_mkwrite() argument [all …]
|
| D | mempolicy.h | 93 #define vma_policy(vma) ((vma)->vm_policy) argument 131 struct vm_area_struct *vma, 138 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, 140 bool vma_policy_mof(struct vm_area_struct *vma); 148 extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, 175 static inline int vma_migratable(struct vm_area_struct *vma) in vma_migratable() argument 177 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) in vma_migratable() 181 if (vma->vm_flags & VM_HUGETLB) in vma_migratable() 190 if (vma->vm_file && in vma_migratable() 191 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) in vma_migratable() [all …]
|
| D | dax.h | 23 static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, in dax_pmd_fault() argument 32 #define dax_mkwrite(vma, vmf, gb, iod) dax_fault(vma, vmf, gb, iod) argument 33 #define __dax_mkwrite(vma, vmf, gb, iod) __dax_fault(vma, vmf, gb, iod) argument 35 static inline bool vma_is_dax(struct vm_area_struct *vma) in vma_is_dax() argument 37 return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host); in vma_is_dax()
|
| D | khugepaged.h | 9 extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma, 39 static inline int khugepaged_enter(struct vm_area_struct *vma, in khugepaged_enter() argument 42 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags)) in khugepaged_enter() 46 if (__khugepaged_enter(vma->vm_mm)) in khugepaged_enter() 58 static inline int khugepaged_enter(struct vm_area_struct *vma, in khugepaged_enter() argument 63 static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma, in khugepaged_enter_vma_merge() argument
|
| D | rmap.h | 74 struct vm_area_struct *vma; member 141 static inline void anon_vma_merge(struct vm_area_struct *vma, in anon_vma_merge() argument 144 VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma); in anon_vma_merge() 222 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); 235 int (*rmap_one)(struct page *page, struct vm_area_struct *vma, 239 bool (*invalid_vma)(struct vm_area_struct *vma, void *arg); 247 #define anon_vma_prepare(vma) (0) argument 248 #define anon_vma_link(vma) do {} while (0) argument
|
| D | hugetlb.h | 54 void reset_vma_resv_huge_pages(struct vm_area_struct *vma); 71 struct vm_area_struct *vma, 74 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, 81 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 84 struct vm_area_struct *vma, 95 struct vm_area_struct *vma, 121 unsigned long hugetlb_change_protection(struct vm_area_struct *vma, 126 static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) in reset_vma_resv_huge_pages() argument 137 #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; }) argument 152 #define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; }) argument [all …]
|
| /linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
| D | base.c | 30 nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node) in nvkm_vm_map_at() argument 32 struct nvkm_vm *vm = vma->vm; in nvkm_vm_map_at() 35 int big = vma->node->type != mmu->func->spg_shift; in nvkm_vm_map_at() 36 u32 offset = vma->node->offset + (delta >> 12); in nvkm_vm_map_at() 37 u32 bits = vma->node->type - 12; in nvkm_vm_map_at() 56 mmu->func->map(vma, pgt, node, pte, len, phys, delta); in nvkm_vm_map_at() 66 delta += (u64)len << vma->node->type; in nvkm_vm_map_at() 74 nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length, in nvkm_vm_map_sg_table() argument 77 struct nvkm_vm *vm = vma->vm; in nvkm_vm_map_sg_table() 79 int big = vma->node->type != mmu->func->spg_shift; in nvkm_vm_map_sg_table() [all …]
|
| D | nv50.c | 64 vm_addr(struct nvkm_vma *vma, u64 phys, u32 memtype, u32 target) in vm_addr() argument 69 if (vma->access & NV_MEM_ACCESS_SYS) in vm_addr() 71 if (!(vma->access & NV_MEM_ACCESS_WO)) in vm_addr() 77 nv50_vm_map(struct nvkm_vma *vma, struct nvkm_memory *pgt, in nv50_vm_map() argument 80 struct nvkm_ram *ram = vma->vm->mmu->subdev.device->fb->ram; in nv50_vm_map() 92 phys = vm_addr(vma, phys, mem->memtype, target); in nv50_vm_map() 108 phys += block << (vma->node->type - 3); in nv50_vm_map() 113 delta += block << (vma->node->type - 3); in nv50_vm_map() 127 nv50_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt, in nv50_vm_map_sg() argument 130 u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 3 : 2; in nv50_vm_map_sg() [all …]
|
| D | gf100.c | 89 gf100_vm_addr(struct nvkm_vma *vma, u64 phys, u32 memtype, u32 target) in gf100_vm_addr() argument 94 if (vma->access & NV_MEM_ACCESS_SYS) in gf100_vm_addr() 103 gf100_vm_map(struct nvkm_vma *vma, struct nvkm_memory *pgt, in gf100_vm_map() argument 106 u64 next = 1 << (vma->node->type - 8); in gf100_vm_map() 108 phys = gf100_vm_addr(vma, phys, mem->memtype, 0); in gf100_vm_map() 112 struct nvkm_ltc *ltc = vma->vm->mmu->subdev.device->ltc; in gf100_vm_map() 130 gf100_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt, in gf100_vm_map_sg() argument 133 u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 7 : 5; in gf100_vm_map_sg() 140 u64 phys = gf100_vm_addr(vma, *list++, memtype, target); in gf100_vm_map_sg() 149 gf100_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt) in gf100_vm_unmap() argument
|
| /linux-4.4.14/arch/metag/include/asm/ |
| D | tlb.h | 11 #define tlb_start_vma(tlb, vma) \ argument 14 flush_cache_range(vma, vma->vm_start, vma->vm_end); \ 17 #define tlb_end_vma(tlb, vma) \ argument 20 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \ 26 #define tlb_start_vma(tlb, vma) do { } while (0) argument 27 #define tlb_end_vma(tlb, vma) do { } while (0) argument
|
| D | cacheflush.h | 49 static inline void flush_cache_range(struct vm_area_struct *vma, in flush_cache_range() argument 52 flush_cache_mm(vma->vm_mm); in flush_cache_range() 55 static inline void flush_cache_page(struct vm_area_struct *vma, in flush_cache_page() argument 58 flush_cache_mm(vma->vm_mm); in flush_cache_page() 70 static inline void flush_icache_page(struct vm_area_struct *vma, in flush_icache_page() argument 93 #define flush_cache_range(vma, start, end) do { } while (0) argument 94 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) argument 97 #define flush_icache_page(vma, pg) do { } while (0) argument 233 static inline void copy_to_user_page(struct vm_area_struct *vma, in copy_to_user_page() argument 242 static inline void copy_from_user_page(struct vm_area_struct *vma, in copy_from_user_page() argument
|
| /linux-4.4.14/scripts/coccinelle/api/ |
| D | vma_pages.cocci | 2 /// Use vma_pages function on vma object instead of explicit computation. 5 // Keywords: vma_pages vma 18 struct vm_area_struct *vma; 21 * (vma->vm_end - vma->vm_start) >> PAGE_SHIFT 28 struct vm_area_struct *vma; 31 - ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) 32 + vma_pages(vma) 39 struct vm_area_struct *vma; 43 (vma->vm_end@p - vma->vm_start) >> PAGE_SHIFT 47 x << r_org.vma; [all …]
|
| /linux-4.4.14/arch/xtensa/include/asm/ |
| D | tlb.h | 21 # define tlb_start_vma(tlb,vma) do { } while (0) argument 22 # define tlb_end_vma(tlb,vma) do { } while (0) argument 26 # define tlb_start_vma(tlb, vma) \ argument 29 flush_cache_range(vma, vma->vm_start, vma->vm_end); \ 32 # define tlb_end_vma(tlb, vma) \ argument 35 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
|
| D | cacheflush.h | 125 void local_flush_cache_range(struct vm_area_struct *vma, 127 void local_flush_cache_page(struct vm_area_struct *vma, 143 #define flush_cache_page(vma, addr, pfn) do { } while (0) argument 144 #define flush_cache_range(vma, start, end) do { } while (0) argument 156 #define flush_icache_page(vma,page) do { } while (0) argument 170 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument 177 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument
|
| /linux-4.4.14/arch/powerpc/include/asm/ |
| D | tlbflush.h | 37 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 42 extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 49 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 54 #define flush_tlb_page(vma,addr) local_flush_tlb_page(vma,addr) argument 57 #define flush_tlb_page_nohash(vma,addr) flush_tlb_page(vma,addr) argument 65 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 66 extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); 67 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 70 static inline void local_flush_tlb_page(struct vm_area_struct *vma, in local_flush_tlb_page() argument 73 flush_tlb_page(vma, vmaddr); in local_flush_tlb_page() [all …]
|
| D | cacheflush.h | 22 #define flush_cache_range(vma, start, end) do { } while (0) argument 23 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) argument 24 #define flush_icache_page(vma, page) do { } while (0) argument 36 extern void flush_icache_user_range(struct vm_area_struct *vma, 60 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument 63 flush_icache_user_range(vma, page, vaddr, len); \ 65 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument
|
| D | fb.h | 8 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, in fb_pgprotect() argument 11 vma->vm_page_prot = phys_mem_access_prot(file, off >> PAGE_SHIFT, in fb_pgprotect() 12 vma->vm_end - vma->vm_start, in fb_pgprotect() 13 vma->vm_page_prot); in fb_pgprotect()
|
| D | hugetlb.h | 86 void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, 88 void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr); 131 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, in huge_ptep_clear_flush() argument 135 pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); in huge_ptep_clear_flush() 136 flush_tlb_page(vma, addr); in huge_ptep_clear_flush() 149 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, in huge_ptep_set_access_flags() argument 159 ptep_set_access_flags(vma, addr, ptep, pte, dirty); in huge_ptep_set_access_flags() 162 return ptep_set_access_flags(vma, addr, ptep, pte, dirty); in huge_ptep_set_access_flags() 176 static inline void flush_hugetlb_page(struct vm_area_struct *vma, in flush_hugetlb_page() argument
|
| /linux-4.4.14/fs/ext2/ |
| D | file.c | 42 static int ext2_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) in ext2_dax_fault() argument 44 struct inode *inode = file_inode(vma->vm_file); in ext2_dax_fault() 50 file_update_time(vma->vm_file); in ext2_dax_fault() 54 ret = __dax_fault(vma, vmf, ext2_get_block, NULL); in ext2_dax_fault() 62 static int ext2_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, in ext2_dax_pmd_fault() argument 65 struct inode *inode = file_inode(vma->vm_file); in ext2_dax_pmd_fault() 71 file_update_time(vma->vm_file); in ext2_dax_pmd_fault() 75 ret = __dax_pmd_fault(vma, addr, pmd, flags, ext2_get_block, NULL); in ext2_dax_pmd_fault() 83 static int ext2_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) in ext2_dax_mkwrite() argument 85 struct inode *inode = file_inode(vma->vm_file); in ext2_dax_mkwrite() [all …]
|
| /linux-4.4.14/drivers/misc/mic/scif/ |
| D | scif_mmap.c | 27 struct vm_area_struct *vma; member 79 struct vm_area_struct *vma; in __scif_zap_mmaps() local 85 vma = info->vma; in __scif_zap_mmaps() 86 size = vma->vm_end - vma->vm_start; in __scif_zap_mmaps() 87 zap_vma_ptes(vma, vma->vm_start, size); in __scif_zap_mmaps() 90 __func__, ep, info->vma, size); in __scif_zap_mmaps() 169 static int scif_insert_vma(struct scif_endpt *ep, struct vm_area_struct *vma) in scif_insert_vma() argument 179 info->vma = vma; in scif_insert_vma() 188 static void scif_delete_vma(struct scif_endpt *ep, struct vm_area_struct *vma) in scif_delete_vma() argument 196 if (info->vma == vma) { in scif_delete_vma() [all …]
|
| /linux-4.4.14/drivers/char/ |
| D | mspec.c | 145 mspec_open(struct vm_area_struct *vma) in mspec_open() argument 149 vdata = vma->vm_private_data; in mspec_open() 160 mspec_close(struct vm_area_struct *vma) in mspec_close() argument 166 vdata = vma->vm_private_data; in mspec_close() 200 mspec_fault(struct vm_area_struct *vma, struct vm_fault *vmf) in mspec_fault() argument 205 struct vma_data *vdata = vma->vm_private_data; in mspec_fault() 236 vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); in mspec_fault() 255 mspec_mmap(struct file *file, struct vm_area_struct *vma, in mspec_mmap() argument 261 if (vma->vm_pgoff != 0) in mspec_mmap() 264 if ((vma->vm_flags & VM_SHARED) == 0) in mspec_mmap() [all …]
|
| D | uv_mmtimer.c | 43 static int uv_mmtimer_mmap(struct file *file, struct vm_area_struct *vma); 147 static int uv_mmtimer_mmap(struct file *file, struct vm_area_struct *vma) in uv_mmtimer_mmap() argument 151 if (vma->vm_end - vma->vm_start != PAGE_SIZE) in uv_mmtimer_mmap() 154 if (vma->vm_flags & VM_WRITE) in uv_mmtimer_mmap() 160 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in uv_mmtimer_mmap() 166 if (remap_pfn_range(vma, vma->vm_start, uv_mmtimer_addr >> PAGE_SHIFT, in uv_mmtimer_mmap() 167 PAGE_SIZE, vma->vm_page_prot)) { in uv_mmtimer_mmap()
|
| D | mem.c | 303 static inline int private_mapping_ok(struct vm_area_struct *vma) in private_mapping_ok() argument 305 return vma->vm_flags & VM_MAYSHARE; in private_mapping_ok() 309 static inline int private_mapping_ok(struct vm_area_struct *vma) in private_mapping_ok() argument 321 static int mmap_mem(struct file *file, struct vm_area_struct *vma) in mmap_mem() argument 323 size_t size = vma->vm_end - vma->vm_start; in mmap_mem() 325 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) in mmap_mem() 328 if (!private_mapping_ok(vma)) in mmap_mem() 331 if (!range_is_allowed(vma->vm_pgoff, size)) in mmap_mem() 334 if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size, in mmap_mem() 335 &vma->vm_page_prot)) in mmap_mem() [all …]
|
| /linux-4.4.14/drivers/media/v4l2-core/ |
| D | videobuf-dma-contig.c | 66 static void videobuf_vm_open(struct vm_area_struct *vma) in videobuf_vm_open() argument 68 struct videobuf_mapping *map = vma->vm_private_data; in videobuf_vm_open() 71 map, map->count, vma->vm_start, vma->vm_end); in videobuf_vm_open() 76 static void videobuf_vm_close(struct vm_area_struct *vma) in videobuf_vm_close() argument 78 struct videobuf_mapping *map = vma->vm_private_data; in videobuf_vm_close() 83 map, map->count, vma->vm_start, vma->vm_end); in videobuf_vm_close() 164 struct vm_area_struct *vma; in videobuf_dma_contig_user_get() local 176 vma = find_vma(mm, vb->baddr); in videobuf_dma_contig_user_get() 177 if (!vma) in videobuf_dma_contig_user_get() 180 if ((vb->baddr + mem->size) > vma->vm_end) in videobuf_dma_contig_user_get() [all …]
|
| D | videobuf-vmalloc.c | 54 static void videobuf_vm_open(struct vm_area_struct *vma) in videobuf_vm_open() argument 56 struct videobuf_mapping *map = vma->vm_private_data; in videobuf_vm_open() 59 map->count, vma->vm_start, vma->vm_end); in videobuf_vm_open() 64 static void videobuf_vm_close(struct vm_area_struct *vma) in videobuf_vm_close() argument 66 struct videobuf_mapping *map = vma->vm_private_data; in videobuf_vm_close() 71 map->count, vma->vm_start, vma->vm_end); in videobuf_vm_close() 212 rc = remap_vmalloc_range(mem->vma, (void *)vb->baddr, 0); in __videobuf_iolock() 234 struct vm_area_struct *vma) in __videobuf_mmap_mapper() argument 250 buf->baddr = vma->vm_start; in __videobuf_mmap_mapper() 256 pages = PAGE_ALIGN(vma->vm_end - vma->vm_start); in __videobuf_mmap_mapper() [all …]
|
| D | videobuf2-memops.c | 90 static void vb2_common_vm_open(struct vm_area_struct *vma) in vb2_common_vm_open() argument 92 struct vb2_vmarea_handler *h = vma->vm_private_data; in vb2_common_vm_open() 95 __func__, h, atomic_read(h->refcount), vma->vm_start, in vb2_common_vm_open() 96 vma->vm_end); in vb2_common_vm_open() 108 static void vb2_common_vm_close(struct vm_area_struct *vma) in vb2_common_vm_close() argument 110 struct vb2_vmarea_handler *h = vma->vm_private_data; in vb2_common_vm_close() 113 __func__, h, atomic_read(h->refcount), vma->vm_start, in vb2_common_vm_close() 114 vma->vm_end); in vb2_common_vm_close()
|
| /linux-4.4.14/arch/alpha/include/asm/ |
| D | cacheflush.h | 10 #define flush_cache_range(vma, start, end) do { } while (0) argument 11 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) argument 50 flush_icache_user_range(struct vm_area_struct *vma, struct page *page, in flush_icache_user_range() argument 53 if (vma->vm_flags & VM_EXEC) { in flush_icache_user_range() 54 struct mm_struct *mm = vma->vm_mm; in flush_icache_user_range() 62 extern void flush_icache_user_range(struct vm_area_struct *vma, 67 #define flush_icache_page(vma, page) \ argument 68 flush_icache_user_range((vma), (page), 0, 0) 70 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument 72 flush_icache_user_range(vma, page, vaddr, len); \ [all …]
|
| D | tlbflush.h | 39 struct vm_area_struct *vma, in ev4_flush_tlb_current_page() argument 43 if (vma->vm_flags & VM_EXEC) { in ev4_flush_tlb_current_page() 52 struct vm_area_struct *vma, in ev5_flush_tlb_current_page() argument 55 if (vma->vm_flags & VM_EXEC) in ev5_flush_tlb_current_page() 117 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) in flush_tlb_page() argument 119 struct mm_struct *mm = vma->vm_mm; in flush_tlb_page() 122 flush_tlb_current_page(mm, vma, addr); in flush_tlb_page() 130 flush_tlb_range(struct vm_area_struct *vma, unsigned long start, in flush_tlb_range() argument 133 flush_tlb_mm(vma->vm_mm); in flush_tlb_range()
|
| /linux-4.4.14/arch/parisc/kernel/ |
| D | cache.c | 78 update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) in update_mmu_cache() argument 276 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, in __flush_cache_page() argument 281 if (vma->vm_flags & VM_EXEC) in __flush_cache_page() 491 struct vm_area_struct *vma; in mm_total_size() local 494 for (vma = mm->mmap; vma; vma = vma->vm_next) in mm_total_size() 495 usize += vma->vm_end - vma->vm_start; in mm_total_size() 516 struct vm_area_struct *vma; in flush_cache_mm() local 527 for (vma = mm->mmap; vma; vma = vma->vm_next) { in flush_cache_mm() 528 flush_user_dcache_range_asm(vma->vm_start, vma->vm_end); in flush_cache_mm() 529 if ((vma->vm_flags & VM_EXEC) == 0) in flush_cache_mm() [all …]
|
| D | pci.c | 205 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, in pci_mmap_page_range() argument 224 prot = pgprot_val(vma->vm_page_prot); in pci_mmap_page_range() 226 vma->vm_page_prot = __pgprot(prot); in pci_mmap_page_range() 228 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, in pci_mmap_page_range() 229 vma->vm_end - vma->vm_start, vma->vm_page_prot); in pci_mmap_page_range()
|
| /linux-4.4.14/arch/alpha/kernel/ |
| D | pci-sysfs.c | 18 struct vm_area_struct *vma, in hose_mmap_page_range() argument 28 vma->vm_pgoff += base >> PAGE_SHIFT; in hose_mmap_page_range() 30 return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, in hose_mmap_page_range() 31 vma->vm_end - vma->vm_start, in hose_mmap_page_range() 32 vma->vm_page_prot); in hose_mmap_page_range() 36 struct vm_area_struct *vma, int sparse) in __pci_mmap_fits() argument 41 nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; in __pci_mmap_fits() 42 start = vma->vm_pgoff; in __pci_mmap_fits() 65 struct vm_area_struct *vma, int sparse) in pci_mmap_resource() argument 80 if (!__pci_mmap_fits(pdev, i, vma, sparse)) in pci_mmap_resource() [all …]
|
| /linux-4.4.14/arch/sparc/include/asm/ |
| D | cacheflush_64.h | 23 #define flush_cache_range(vma, start, end) \ argument 24 flush_cache_mm((vma)->vm_mm) 25 #define flush_cache_page(vma, page, pfn) \ argument 26 flush_cache_mm((vma)->vm_mm) 50 #define flush_icache_page(vma, pg) do { } while(0) argument 51 #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) argument 57 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument 59 flush_cache_page(vma, vaddr, page_to_pfn(page)); \ 61 flush_ptrace_access(vma, page, vaddr, src, len, 0); \ 64 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument [all …]
|
| D | tlb_32.h | 4 #define tlb_start_vma(tlb, vma) \ argument 6 flush_cache_range(vma, vma->vm_start, vma->vm_end); \ 9 #define tlb_end_vma(tlb, vma) \ argument 11 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
|
| D | cacheflush_32.h | 12 #define flush_cache_range(vma,start,end) \ argument 13 sparc32_cachetlb_ops->cache_range(vma, start, end) 14 #define flush_cache_page(vma,addr,pfn) \ argument 15 sparc32_cachetlb_ops->cache_page(vma, addr) 17 #define flush_icache_page(vma, pg) do { } while (0) argument 19 #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) argument 21 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument 23 flush_cache_page(vma, vaddr, page_to_pfn(page));\ 26 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument 28 flush_cache_page(vma, vaddr, page_to_pfn(page));\
|
| D | tlbflush_32.h | 10 #define flush_tlb_range(vma, start, end) \ argument 11 sparc32_cachetlb_ops->tlb_range(vma, start, end) 12 #define flush_tlb_page(vma, addr) \ argument 13 sparc32_cachetlb_ops->tlb_page(vma, addr)
|
| /linux-4.4.14/arch/arm/mm/ |
| D | fault-armv.c | 40 static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, in do_adjust_pte() argument 56 flush_cache_page(vma, address, pfn); in do_adjust_pte() 61 set_pte_at(vma->vm_mm, address, ptep, entry); in do_adjust_pte() 62 flush_tlb_page(vma, address); in do_adjust_pte() 92 static int adjust_pte(struct vm_area_struct *vma, unsigned long address, in adjust_pte() argument 102 pgd = pgd_offset(vma->vm_mm, address); in adjust_pte() 119 ptl = pte_lockptr(vma->vm_mm, pmd); in adjust_pte() 123 ret = do_adjust_pte(vma, address, pfn, pte); in adjust_pte() 132 make_coherent(struct address_space *mapping, struct vm_area_struct *vma, in make_coherent() argument 135 struct mm_struct *mm = vma->vm_mm; in make_coherent() [all …]
|
| D | flush.c | 82 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) in flush_cache_range() argument 85 vivt_flush_cache_range(vma, start, end); in flush_cache_range() 97 if (vma->vm_flags & VM_EXEC) in flush_cache_range() 101 void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) in flush_cache_page() argument 104 vivt_flush_cache_page(vma, user_addr, pfn); in flush_cache_page() 113 if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) in flush_cache_page() 162 void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, in flush_ptrace_access() argument 166 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) in flush_ptrace_access() 168 if (vma->vm_flags & VM_EXEC) in flush_ptrace_access() 188 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, in copy_to_user_page() argument [all …]
|
| /linux-4.4.14/arch/arc/kernel/ |
| D | arc_hostlink.c | 21 static int arc_hl_mmap(struct file *fp, struct vm_area_struct *vma) in arc_hl_mmap() argument 23 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in arc_hl_mmap() 25 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, in arc_hl_mmap() 26 vma->vm_end - vma->vm_start, in arc_hl_mmap() 27 vma->vm_page_prot)) { in arc_hl_mmap()
|
| /linux-4.4.14/arch/ia64/include/asm/ |
| D | fb.h | 9 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, in fb_pgprotect() argument 12 if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start)) in fb_pgprotect() 13 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); in fb_pgprotect() 15 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in fb_pgprotect()
|
| D | cacheflush.h | 22 #define flush_cache_range(vma, start, end) do { } while (0) argument 23 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) argument 24 #define flush_icache_page(vma,page) do { } while (0) argument 41 #define flush_icache_user_range(vma, page, user_addr, len) \ argument 47 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument 49 flush_icache_user_range(vma, page, vaddr, len); \ 51 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument
|
| D | tlbflush.h | 69 extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end); 75 flush_tlb_page (struct vm_area_struct *vma, unsigned long addr) in flush_tlb_page() argument 78 flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE); in flush_tlb_page() 80 if (vma->vm_mm == current->active_mm) in flush_tlb_page() 83 vma->vm_mm->context = 0; in flush_tlb_page()
|
| /linux-4.4.14/drivers/sbus/char/ |
| D | flash.c | 36 flash_mmap(struct file *file, struct vm_area_struct *vma) in flash_mmap() argument 46 if ((vma->vm_flags & VM_READ) && in flash_mmap() 47 (vma->vm_flags & VM_WRITE)) { in flash_mmap() 51 if (vma->vm_flags & VM_READ) { in flash_mmap() 54 } else if (vma->vm_flags & VM_WRITE) { in flash_mmap() 64 if ((vma->vm_pgoff << PAGE_SHIFT) > size) in flash_mmap() 66 addr = vma->vm_pgoff + (addr >> PAGE_SHIFT); in flash_mmap() 68 if (vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT)) > size) in flash_mmap() 69 size = vma->vm_end - (vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT)); in flash_mmap() 71 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in flash_mmap() [all …]
|
| /linux-4.4.14/arch/parisc/mm/ |
| D | fault.c | 182 struct vm_area_struct *vma) in show_signal_msg() argument 194 if (vma) in show_signal_msg() 196 vma->vm_start, vma->vm_end); in show_signal_msg() 204 struct vm_area_struct *vma, *prev_vma; in do_page_fault() local 228 vma = find_vma_prev(mm, address, &prev_vma); in do_page_fault() 229 if (!vma || address < vma->vm_start) in do_page_fault() 238 if ((vma->vm_flags & acc_type) != acc_type) in do_page_fault() 247 fault = handle_mm_fault(mm, vma, address, flags); in do_page_fault() 287 vma = prev_vma; in do_page_fault() 288 if (vma && (expand_stack(vma, address) == 0)) in do_page_fault() [all …]
|
| /linux-4.4.14/arch/avr32/include/asm/ |
| D | tlb.h | 11 #define tlb_start_vma(tlb, vma) \ argument 12 flush_cache_range(vma, vma->vm_start, vma->vm_end) 14 #define tlb_end_vma(tlb, vma) \ argument 15 flush_tlb_range(vma, vma->vm_start, vma->vm_end)
|
| D | cacheflush.h | 91 #define flush_cache_range(vma, start, end) do { } while (0) argument 92 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) argument 103 extern void flush_icache_page(struct vm_area_struct *vma, struct page *page); 121 extern void copy_to_user_page(struct vm_area_struct *vma, struct page *page, 125 static inline void copy_from_user_page(struct vm_area_struct *vma, in copy_from_user_page() argument
|
| /linux-4.4.14/kernel/events/ |
| D | uprobes.c | 121 static bool valid_vma(struct vm_area_struct *vma, bool is_register) in valid_vma() argument 128 return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC; in valid_vma() 131 static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset) in offset_to_vaddr() argument 133 return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT); in offset_to_vaddr() 136 static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr) in vaddr_to_offset() argument 138 return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start); in vaddr_to_offset() 152 static int __replace_page(struct vm_area_struct *vma, unsigned long addr, in __replace_page() argument 155 struct mm_struct *mm = vma->vm_mm; in __replace_page() 164 err = mem_cgroup_try_charge(kpage, vma->vm_mm, GFP_KERNEL, &memcg); in __replace_page() 178 page_add_new_anon_rmap(kpage, vma, addr); in __replace_page() [all …]
|
| /linux-4.4.14/arch/parisc/include/asm/ |
| D | tlb.h | 9 #define tlb_start_vma(tlb, vma) \ argument 11 flush_cache_range(vma, vma->vm_start, vma->vm_end); \ 14 #define tlb_end_vma(tlb, vma) \ argument 16 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
|
| D | cacheflush.h | 82 #define flush_icache_page(vma,page) do { \ argument 92 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument 94 flush_cache_page(vma, vaddr, page_to_pfn(page)); \ 99 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument 101 flush_cache_page(vma, vaddr, page_to_pfn(page)); \ 105 void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn); 106 void flush_cache_range(struct vm_area_struct *vma, 114 flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) in flush_anon_page() argument 117 flush_tlb_page(vma, vmaddr); in flush_anon_page()
|
| /linux-4.4.14/drivers/gpu/drm/udl/ |
| D | udl_gem.c | 61 struct vm_area_struct *vma) in update_vm_cache_attr() argument 67 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); in update_vm_cache_attr() 69 vma->vm_page_prot = in update_vm_cache_attr() 70 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); in update_vm_cache_attr() 72 vma->vm_page_prot = in update_vm_cache_attr() 73 pgprot_noncached(vm_get_page_prot(vma->vm_flags)); in update_vm_cache_attr() 87 int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) in udl_drm_gem_mmap() argument 91 ret = drm_gem_mmap(filp, vma); in udl_drm_gem_mmap() 95 vma->vm_flags &= ~VM_PFNMAP; in udl_drm_gem_mmap() 96 vma->vm_flags |= VM_MIXEDMAP; in udl_drm_gem_mmap() [all …]
|
| /linux-4.4.14/arch/arm/include/asm/ |
| D | cacheflush.h | 173 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument 228 vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) in vivt_flush_cache_range() argument 230 struct mm_struct *mm = vma->vm_mm; in vivt_flush_cache_range() 234 vma->vm_flags); in vivt_flush_cache_range() 238 vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) in vivt_flush_cache_page() argument 240 struct mm_struct *mm = vma->vm_mm; in vivt_flush_cache_page() 244 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); in vivt_flush_cache_page() 251 #define flush_cache_range(vma,start,end) \ argument 252 vivt_flush_cache_range(vma,start,end) 253 #define flush_cache_page(vma,addr,pfn) \ argument [all …]
|
| D | tlbflush.h | 419 __local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) in __local_flush_tlb_page() argument 424 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); in __local_flush_tlb_page() 427 cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { in __local_flush_tlb_page() 441 local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) in local_flush_tlb_page() argument 445 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); in local_flush_tlb_page() 450 __local_flush_tlb_page(vma, uaddr); in local_flush_tlb_page() 458 __flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) in __flush_tlb_page() argument 462 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); in __flush_tlb_page() 467 __local_flush_tlb_page(vma, uaddr); in __flush_tlb_page() 603 #define local_flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma) argument [all …]
|
| D | tlb.h | 72 struct vm_area_struct *vma; member 99 if (tlb->fullmm || !tlb->vma) in tlb_flush() 102 flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end); in tlb_flush() 157 tlb->vma = NULL; in tlb_gather_mmu() 195 tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) in tlb_start_vma() argument 198 flush_cache_range(vma, vma->vm_start, vma->vm_end); in tlb_start_vma() 199 tlb->vma = vma; in tlb_start_vma() 206 tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) in tlb_end_vma() argument
|
| /linux-4.4.14/arch/sh/include/asm/ |
| D | tlbflush.h | 15 extern void local_flush_tlb_range(struct vm_area_struct *vma, 18 extern void local_flush_tlb_page(struct vm_area_struct *vma, 30 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 32 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page); 40 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) argument 43 #define flush_tlb_range(vma, start, end) \ argument 44 local_flush_tlb_range(vma, start, end)
|
| D | tlb.h | 74 tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) in tlb_start_vma() argument 77 flush_cache_range(vma, vma->vm_start, vma->vm_end); in tlb_start_vma() 81 tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) in tlb_end_vma() argument 84 flush_tlb_range(vma, tlb->start, tlb->end); in tlb_end_vma() 122 static inline void tlb_wire_entry(struct vm_area_struct *vma , in tlb_wire_entry() argument 136 #define tlb_start_vma(tlb, vma) do { } while (0) argument 137 #define tlb_end_vma(tlb, vma) do { } while (0) argument
|
| D | cacheflush.h | 41 extern void flush_cache_page(struct vm_area_struct *vma, 43 extern void flush_cache_range(struct vm_area_struct *vma, 48 extern void flush_icache_page(struct vm_area_struct *vma, 53 struct vm_area_struct *vma; member 60 static inline void flush_anon_page(struct vm_area_struct *vma, in flush_anon_page() argument 81 extern void copy_to_user_page(struct vm_area_struct *vma, 85 extern void copy_from_user_page(struct vm_area_struct *vma,
|
| /linux-4.4.14/arch/ia64/mm/ |
| D | fault.c | 83 struct vm_area_struct *vma, *prev_vma; in ia64_do_page_fault() local 127 vma = find_vma_prev(mm, address, &prev_vma); in ia64_do_page_fault() 128 if (!vma && !prev_vma ) in ia64_do_page_fault() 138 if (( !vma && prev_vma ) || (address < vma->vm_start) ) in ia64_do_page_fault() 151 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE)))) in ia64_do_page_fault() 154 if ((vma->vm_flags & mask) != mask) in ia64_do_page_fault() 162 fault = handle_mm_fault(mm, vma, address, flags); in ia64_do_page_fault() 207 if (!vma) in ia64_do_page_fault() 209 if (!(vma->vm_flags & VM_GROWSDOWN)) in ia64_do_page_fault() 211 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) in ia64_do_page_fault() [all …]
|
| D | init.c | 106 struct vm_area_struct *vma; in ia64_init_addr_space() local 115 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); in ia64_init_addr_space() 116 if (vma) { in ia64_init_addr_space() 117 INIT_LIST_HEAD(&vma->anon_vma_chain); in ia64_init_addr_space() 118 vma->vm_mm = current->mm; in ia64_init_addr_space() 119 vma->vm_start = current->thread.rbs_bot & PAGE_MASK; in ia64_init_addr_space() 120 vma->vm_end = vma->vm_start + PAGE_SIZE; in ia64_init_addr_space() 121 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT; in ia64_init_addr_space() 122 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); in ia64_init_addr_space() 124 if (insert_vm_struct(current->mm, vma)) { in ia64_init_addr_space() [all …]
|
| /linux-4.4.14/arch/tile/kernel/ |
| D | tlb.c | 53 void flush_tlb_page_mm(struct vm_area_struct *vma, struct mm_struct *mm, in flush_tlb_page_mm() argument 56 unsigned long size = vma_kernel_pagesize(vma); in flush_tlb_page_mm() 57 int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0; in flush_tlb_page_mm() 62 void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) in flush_tlb_page() argument 64 flush_tlb_page_mm(vma, vma->vm_mm, va); in flush_tlb_page() 68 void flush_tlb_range(struct vm_area_struct *vma, in flush_tlb_range() argument 71 unsigned long size = vma_kernel_pagesize(vma); in flush_tlb_range() 72 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range() 73 int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0; in flush_tlb_range()
|
| /linux-4.4.14/include/asm-generic/ |
| D | pgtable.h | 27 extern int ptep_set_access_flags(struct vm_area_struct *vma, 34 extern int pmdp_set_access_flags(struct vm_area_struct *vma, 38 static inline int pmdp_set_access_flags(struct vm_area_struct *vma, in pmdp_set_access_flags() argument 49 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, in ptep_test_and_clear_young() argument 58 set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte)); in ptep_test_and_clear_young() 65 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, in pmdp_test_and_clear_young() argument 74 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd)); in pmdp_test_and_clear_young() 78 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, in pmdp_test_and_clear_young() argument 89 int ptep_clear_flush_young(struct vm_area_struct *vma, 95 extern int pmdp_clear_flush_young(struct vm_area_struct *vma, [all …]
|
| D | cacheflush.h | 14 #define flush_cache_range(vma, start, end) do { } while (0) argument 15 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) argument 21 #define flush_icache_page(vma,pg) do { } while (0) argument 22 #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) argument 26 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument 29 flush_icache_user_range(vma, page, vaddr, len); \ 31 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument
|
| /linux-4.4.14/drivers/staging/rdma/ipath/ |
| D | ipath_mmap.c | 64 static void ipath_vma_open(struct vm_area_struct *vma) in ipath_vma_open() argument 66 struct ipath_mmap_info *ip = vma->vm_private_data; in ipath_vma_open() 71 static void ipath_vma_close(struct vm_area_struct *vma) in ipath_vma_close() argument 73 struct ipath_mmap_info *ip = vma->vm_private_data; in ipath_vma_close() 89 int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) in ipath_mmap() argument 92 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; in ipath_mmap() 93 unsigned long size = vma->vm_end - vma->vm_start; in ipath_mmap() 115 ret = remap_vmalloc_range(vma, ip->obj, 0); in ipath_mmap() 118 vma->vm_ops = &ipath_vm_ops; in ipath_mmap() 119 vma->vm_private_data = ip; in ipath_mmap() [all …]
|
| /linux-4.4.14/drivers/infiniband/hw/qib/ |
| D | qib_mmap.c | 64 static void qib_vma_open(struct vm_area_struct *vma) in qib_vma_open() argument 66 struct qib_mmap_info *ip = vma->vm_private_data; in qib_vma_open() 71 static void qib_vma_close(struct vm_area_struct *vma) in qib_vma_close() argument 73 struct qib_mmap_info *ip = vma->vm_private_data; in qib_vma_close() 89 int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) in qib_mmap() argument 92 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; in qib_mmap() 93 unsigned long size = vma->vm_end - vma->vm_start; in qib_mmap() 115 ret = remap_vmalloc_range(vma, ip->obj, 0); in qib_mmap() 118 vma->vm_ops = &qib_vm_ops; in qib_mmap() 119 vma->vm_private_data = ip; in qib_mmap() [all …]
|
| D | qib_file_ops.c | 729 static int qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd, in qib_mmap_mem() argument 736 if ((vma->vm_end - vma->vm_start) > len) { in qib_mmap_mem() 739 vma->vm_end - vma->vm_start, len); in qib_mmap_mem() 749 if (vma->vm_flags & VM_WRITE) { in qib_mmap_mem() 757 vma->vm_flags &= ~VM_MAYWRITE; in qib_mmap_mem() 761 ret = remap_pfn_range(vma, vma->vm_start, pfn, in qib_mmap_mem() 762 len, vma->vm_page_prot); in qib_mmap_mem() 771 static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd, in mmap_ureg() argument 784 if ((vma->vm_end - vma->vm_start) > sz) { in mmap_ureg() 787 vma->vm_end - vma->vm_start); in mmap_ureg() [all …]
|
| /linux-4.4.14/drivers/staging/rdma/hfi1/ |
| D | mmap.c | 82 static void hfi1_vma_open(struct vm_area_struct *vma) in hfi1_vma_open() argument 84 struct hfi1_mmap_info *ip = vma->vm_private_data; in hfi1_vma_open() 89 static void hfi1_vma_close(struct vm_area_struct *vma) in hfi1_vma_close() argument 91 struct hfi1_mmap_info *ip = vma->vm_private_data; in hfi1_vma_close() 107 int hfi1_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) in hfi1_mmap() argument 110 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; in hfi1_mmap() 111 unsigned long size = vma->vm_end - vma->vm_start; in hfi1_mmap() 133 ret = remap_vmalloc_range(vma, ip->obj, 0); in hfi1_mmap() 136 vma->vm_ops = &hfi1_vm_ops; in hfi1_mmap() 137 vma->vm_private_data = ip; in hfi1_mmap() [all …]
|
| /linux-4.4.14/arch/ia64/pci/ |
| D | pci.c | 422 pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma, in pci_mmap_page_range() argument 425 unsigned long size = vma->vm_end - vma->vm_start; in pci_mmap_page_range() 441 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) in pci_mmap_page_range() 444 prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size, in pci_mmap_page_range() 445 vma->vm_page_prot); in pci_mmap_page_range() 455 efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start)) in pci_mmap_page_range() 456 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); in pci_mmap_page_range() 458 vma->vm_page_prot = prot; in pci_mmap_page_range() 460 if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, in pci_mmap_page_range() 461 vma->vm_end - vma->vm_start, vma->vm_page_prot)) in pci_mmap_page_range() [all …]
|
| /linux-4.4.14/arch/score/include/asm/ |
| D | cacheflush.h | 9 extern void flush_cache_range(struct vm_area_struct *vma, 11 extern void flush_cache_page(struct vm_area_struct *vma, 28 static inline void flush_icache_page(struct vm_area_struct *vma, in flush_icache_page() argument 31 if (vma->vm_flags & VM_EXEC) { in flush_icache_page() 38 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument 41 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument 44 if ((vma->vm_flags & VM_EXEC)) \ 45 flush_cache_page(vma, vaddr, page_to_pfn(page));\
|
| D | tlbflush.h | 17 extern void local_flush_tlb_range(struct vm_area_struct *vma, 21 extern void local_flush_tlb_page(struct vm_area_struct *vma, 27 #define flush_tlb_range(vma, vmaddr, end) \ argument 28 local_flush_tlb_range(vma, vmaddr, end) 31 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) argument
|
| /linux-4.4.14/arch/sh/mm/ |
| D | cache-sh5.c | 87 static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long eaddr) in sh64_icache_inv_user_page() argument 113 vma_asid = cpu_asid(cpu, vma->vm_mm); in sh64_icache_inv_user_page() 177 struct vm_area_struct *vma; in sh64_icache_inv_user_page_range() local 179 vma = find_vma(mm, aligned_start); in sh64_icache_inv_user_page_range() 180 if (!vma || (aligned_start <= vma->vm_end)) { in sh64_icache_inv_user_page_range() 185 vma_end = vma->vm_end; in sh64_icache_inv_user_page_range() 186 if (vma->vm_flags & VM_EXEC) { in sh64_icache_inv_user_page_range() 190 sh64_icache_inv_user_page(vma, eaddr); in sh64_icache_inv_user_page_range() 194 aligned_start = vma->vm_end; /* Skip to start of next region */ in sh64_icache_inv_user_page_range() 528 struct vm_area_struct *vma; in sh5_flush_cache_range() local [all …]
|
| D | cache.c | 58 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, in copy_to_user_page() argument 73 if (vma->vm_flags & VM_EXEC) in copy_to_user_page() 74 flush_cache_page(vma, vaddr, page_to_pfn(page)); in copy_to_user_page() 77 void copy_from_user_page(struct vm_area_struct *vma, struct page *page, in copy_from_user_page() argument 94 unsigned long vaddr, struct vm_area_struct *vma) in copy_user_highpage() argument 112 (vma->vm_flags & VM_EXEC)) in copy_user_highpage() 134 void __update_cache(struct vm_area_struct *vma, in __update_cache() argument 191 void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, in flush_cache_page() argument 196 data.vma = vma; in flush_cache_page() 203 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, in flush_cache_range() argument [all …]
|
| D | mmap.c | 37 struct vm_area_struct *vma; in arch_get_unmapped_area() local 64 vma = find_vma(mm, addr); in arch_get_unmapped_area() 66 (!vma || addr + len <= vma->vm_start)) in arch_get_unmapped_area() 84 struct vm_area_struct *vma; in arch_get_unmapped_area_topdown() local 114 vma = find_vma(mm, addr); in arch_get_unmapped_area_topdown() 116 (!vma || addr + len <= vma->vm_start)) in arch_get_unmapped_area_topdown()
|
| D | cache-sh4.c | 207 struct vm_area_struct *vma; in sh4_flush_cache_page() local 217 vma = data->vma; in sh4_flush_cache_page() 223 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) in sh4_flush_cache_page() 226 pgd = pgd_offset(vma->vm_mm, address); in sh4_flush_cache_page() 235 if ((vma->vm_mm == current->active_mm)) in sh4_flush_cache_page() 256 if (vma->vm_flags & VM_EXEC) in sh4_flush_cache_page() 279 struct vm_area_struct *vma; in sh4_flush_cache_range() local 282 vma = data->vma; in sh4_flush_cache_range() 286 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) in sh4_flush_cache_range() 298 if (vma->vm_flags & VM_EXEC) in sh4_flush_cache_range()
|
| D | tlbflush_32.c | 15 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) in local_flush_tlb_page() argument 19 if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) { in local_flush_tlb_page() 24 asid = cpu_asid(cpu, vma->vm_mm); in local_flush_tlb_page() 28 if (vma->vm_mm != current->mm) { in local_flush_tlb_page() 39 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, in local_flush_tlb_range() argument 42 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range()
|
| /linux-4.4.14/arch/tile/include/asm/ |
| D | tlbflush.h | 45 static inline void local_flush_tlb_page(struct vm_area_struct *vma, in local_flush_tlb_page() argument 53 if (!vma || (vma != FLUSH_NONEXEC && (vma->vm_flags & VM_EXEC))) in local_flush_tlb_page() 58 static inline void local_flush_tlb_pages(struct vm_area_struct *vma, in local_flush_tlb_pages() argument 67 if (!vma || (vma != FLUSH_NONEXEC && (vma->vm_flags & VM_EXEC))) in local_flush_tlb_pages()
|
| D | cacheflush.h | 29 #define flush_cache_range(vma, start, end) do { } while (0) argument 30 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) argument 37 #define flush_icache_page(vma, pg) do { } while (0) argument 38 #define flush_icache_user_range(vma, pg, adr, len) do { } while (0) argument 64 static inline void copy_to_user_page(struct vm_area_struct *vma, in copy_to_user_page() argument 69 if (vma->vm_flags & VM_EXEC) { in copy_to_user_page() 75 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument
|
| D | hugetlb.h | 63 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, in huge_ptep_clear_flush() argument 66 ptep_clear_flush(vma, addr, ptep); in huge_ptep_clear_flush() 85 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, in huge_ptep_set_access_flags() argument 89 return ptep_set_access_flags(vma, addr, ptep, pte, dirty); in huge_ptep_set_access_flags() 102 static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, in arch_make_huge_pte() argument 105 size_t pagesize = huge_page_size(hstate_vma(vma)); in arch_make_huge_pte()
|
| /linux-4.4.14/arch/mips/include/asm/ |
| D | tlbflush.h | 17 extern void local_flush_tlb_range(struct vm_area_struct *vma, 21 extern void local_flush_tlb_page(struct vm_area_struct *vma, 29 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long, 39 #define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, end) argument 42 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) argument
|
| D | tlb.h | 8 #define tlb_start_vma(tlb, vma) \ argument 11 flush_cache_range(vma, vma->vm_start, vma->vm_end); \ 13 #define tlb_end_vma(tlb, vma) do { } while (0) argument
|
| /linux-4.4.14/drivers/gpu/drm/amd/amdkfd/ |
| D | kfd_doorbell.c | 120 int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma) in kfd_doorbell_mmap() argument 129 if (vma->vm_end - vma->vm_start != doorbell_process_allocation()) in kfd_doorbell_mmap() 133 dev = kfd_device_by_id(vma->vm_pgoff); in kfd_doorbell_mmap() 140 vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE | in kfd_doorbell_mmap() 143 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in kfd_doorbell_mmap() 147 (unsigned long long) vma->vm_start); in kfd_doorbell_mmap() 149 pr_debug(" vm_flags == 0x%04lX\n", vma->vm_flags); in kfd_doorbell_mmap() 153 return io_remap_pfn_range(vma, in kfd_doorbell_mmap() 154 vma->vm_start, in kfd_doorbell_mmap() 157 vma->vm_page_prot); in kfd_doorbell_mmap()
|
| /linux-4.4.14/arch/m32r/mm/ |
| D | fault.c | 79 struct vm_area_struct * vma; in do_page_fault() local 145 vma = find_vma(mm, address); in do_page_fault() 146 if (!vma) in do_page_fault() 148 if (vma->vm_start <= address) in do_page_fault() 150 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_page_fault() 164 if (expand_stack(vma, address)) in do_page_fault() 176 if (!(vma->vm_flags & VM_WRITE)) in do_page_fault() 182 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) in do_page_fault() 189 if ((error_code & ACE_INSTRUCTION) && !(vma->vm_flags & VM_EXEC)) in do_page_fault() 199 fault = handle_mm_fault(mm, vma, addr, flags); in do_page_fault() [all …]
|
| /linux-4.4.14/arch/arm64/mm/ |
| D | flush.c | 30 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, in flush_cache_range() argument 33 if (vma->vm_flags & VM_EXEC) in flush_cache_range() 37 static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, in flush_ptrace_access() argument 41 if (vma->vm_flags & VM_EXEC) { in flush_ptrace_access() 59 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, in copy_to_user_page() argument 65 flush_ptrace_access(vma, page, uaddr, dst, len); in copy_to_user_page() 105 void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, in pmdp_splitting_flush() argument 111 set_pmd_at(vma->vm_mm, address, pmdp, pmd); in pmdp_splitting_flush()
|
| /linux-4.4.14/arch/cris/arch-v32/drivers/pci/ |
| D | bios.c | 17 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, in pci_mmap_page_range() argument 25 prot = pgprot_val(vma->vm_page_prot); in pci_mmap_page_range() 26 vma->vm_page_prot = __pgprot(prot); in pci_mmap_page_range() 31 if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, in pci_mmap_page_range() 32 vma->vm_end - vma->vm_start, in pci_mmap_page_range() 33 vma->vm_page_prot)) in pci_mmap_page_range()
|
| /linux-4.4.14/arch/hexagon/include/asm/ |
| D | cacheflush.h | 44 #define flush_cache_range(vma, start, end) do { } while (0) argument 45 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) argument 50 #define flush_icache_page(vma, pg) do { } while (0) argument 51 #define flush_icache_user_range(vma, pg, adr, len) do { } while (0) argument 86 static inline void update_mmu_cache(struct vm_area_struct *vma, in update_mmu_cache() argument 92 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, 95 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument
|
| /linux-4.4.14/arch/nios2/kernel/ |
| D | sys_nios2.c | 24 struct vm_area_struct *vma; in sys_cacheflush() local 41 vma = find_vma(current->mm, addr); in sys_cacheflush() 42 if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) in sys_cacheflush() 45 flush_cache_range(vma, addr, addr + len); in sys_cacheflush()
|
| /linux-4.4.14/arch/m68k/include/asm/ |
| D | fb.h | 11 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, in fb_pgprotect() argument 14 pgprot_val(vma->vm_page_prot) |= SUN3_PAGE_NOCACHE; in fb_pgprotect() 17 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, in fb_pgprotect() argument 21 pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE030; in fb_pgprotect() 23 pgprot_val(vma->vm_page_prot) &= _CACHEMASK040; in fb_pgprotect() 25 pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE_S; in fb_pgprotect()
|
| D | cacheflush_mm.h | 205 static inline void flush_cache_range(struct vm_area_struct *vma, in flush_cache_range() argument 209 if (vma->vm_mm == current->mm) in flush_cache_range() 213 static inline void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long… in flush_cache_page() argument 215 if (vma->vm_mm == current->mm) in flush_cache_page() 254 #define flush_icache_page(vma, page) __flush_page_to_ram(page_address(page)) argument 256 extern void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, 260 static inline void copy_to_user_page(struct vm_area_struct *vma, in copy_to_user_page() argument 264 flush_cache_page(vma, vaddr, page_to_pfn(page)); in copy_to_user_page() 266 flush_icache_user_range(vma, page, vaddr, len); in copy_to_user_page() 268 static inline void copy_from_user_page(struct vm_area_struct *vma, in copy_from_user_page() argument [all …]
|
| D | cacheflush_no.h | 13 #define flush_cache_range(vma, start, end) do { } while (0) argument 14 #define flush_cache_page(vma, vmaddr) do { } while (0) argument 21 #define flush_icache_page(vma,pg) do { } while (0) argument 22 #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) argument 26 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument 28 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument
|
| D | tlbflush.h | 84 static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) in flush_tlb_page() argument 86 if (vma->vm_mm == current->active_mm) { in flush_tlb_page() 94 static inline void flush_tlb_range(struct vm_area_struct *vma, in flush_tlb_range() argument 97 if (vma->vm_mm == current->active_mm) in flush_tlb_range() 170 static inline void flush_tlb_page (struct vm_area_struct *vma, in flush_tlb_page() argument 177 sun3_put_context(vma->vm_mm->context); in flush_tlb_page() 191 static inline void flush_tlb_range (struct vm_area_struct *vma, in flush_tlb_range() argument 194 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range() 260 static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) in flush_tlb_page() argument
|
| /linux-4.4.14/arch/frv/include/asm/ |
| D | cacheflush.h | 25 #define flush_cache_page(vma, vmaddr, pfn) do {} while(0) argument 77 extern void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, 80 static inline void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, in flush_icache_user_range() argument 87 static inline void flush_icache_page(struct vm_area_struct *vma, struct page *page) in flush_icache_page() argument 89 flush_icache_user_range(vma, page, page_to_phys(page), PAGE_SIZE); in flush_icache_page() 96 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument 99 flush_icache_user_range((vma), (page), (vaddr), (len)); \ 102 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument
|
| D | tlbflush.h | 42 #define flush_tlb_range(vma,start,end) \ argument 45 __flush_tlb_range((vma)->vm_mm->context.id, start, end); \ 49 #define flush_tlb_page(vma,addr) \ argument 52 __flush_tlb_page((vma)->vm_mm->context.id, addr); \ 66 #define flush_tlb_page(vma,addr) BUG() argument
|
| /linux-4.4.14/arch/hexagon/mm/ |
| D | vm_fault.c | 50 struct vm_area_struct *vma; in do_page_fault() local 71 vma = find_vma(mm, address); in do_page_fault() 72 if (!vma) in do_page_fault() 75 if (vma->vm_start <= address) in do_page_fault() 78 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_page_fault() 81 if (expand_stack(vma, address)) in do_page_fault() 90 if (!(vma->vm_flags & VM_EXEC)) in do_page_fault() 94 if (!(vma->vm_flags & VM_READ)) in do_page_fault() 98 if (!(vma->vm_flags & VM_WRITE)) in do_page_fault() 104 fault = handle_mm_fault(mm, vma, address, flags); in do_page_fault()
|
| /linux-4.4.14/arch/nios2/mm/ |
| D | cacheflush.c | 132 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, in flush_cache_range() argument 136 if (vma == NULL || (vma->vm_flags & VM_EXEC)) in flush_cache_range() 140 void flush_icache_page(struct vm_area_struct *vma, struct page *page) in flush_icache_page() argument 149 void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, in flush_cache_page() argument 156 if (vma->vm_flags & VM_EXEC) in flush_cache_page() 200 void update_mmu_cache(struct vm_area_struct *vma, in update_mmu_cache() argument 225 if (vma->vm_flags & VM_EXEC) in update_mmu_cache() 226 flush_icache_page(vma, page); in update_mmu_cache() 249 void copy_from_user_page(struct vm_area_struct *vma, struct page *page, in copy_from_user_page() argument 253 flush_cache_page(vma, user_vaddr, page_to_pfn(page)); in copy_from_user_page() [all …]
|
| D | fault.c | 45 struct vm_area_struct *vma = NULL; in do_page_fault() local 93 vma = find_vma(mm, address); in do_page_fault() 94 if (!vma) in do_page_fault() 96 if (vma->vm_start <= address) in do_page_fault() 98 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_page_fault() 100 if (expand_stack(vma, address)) in do_page_fault() 115 if (!(vma->vm_flags & VM_EXEC)) in do_page_fault() 119 if (!(vma->vm_flags & VM_READ)) in do_page_fault() 123 if (!(vma->vm_flags & VM_WRITE)) in do_page_fault() 134 fault = handle_mm_fault(mm, vma, address, flags); in do_page_fault()
|
| /linux-4.4.14/ipc/ |
| D | shm.c | 68 static void shm_open(struct vm_area_struct *vma); 69 static void shm_close(struct vm_area_struct *vma); 190 static int __shm_open(struct vm_area_struct *vma) in __shm_open() argument 192 struct file *file = vma->vm_file; in __shm_open() 209 static void shm_open(struct vm_area_struct *vma) in shm_open() argument 211 int err = __shm_open(vma); in shm_open() 269 static void shm_close(struct vm_area_struct *vma) in shm_close() argument 271 struct file *file = vma->vm_file; in shm_close() 376 static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) in shm_fault() argument 378 struct file *file = vma->vm_file; in shm_fault() [all …]
|
| /linux-4.4.14/arch/sparc/mm/ |
| D | fault_32.c | 170 struct vm_area_struct *vma; in do_sparc_fault() local 210 vma = find_vma(mm, address); in do_sparc_fault() 211 if (!vma) in do_sparc_fault() 213 if (vma->vm_start <= address) in do_sparc_fault() 215 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_sparc_fault() 217 if (expand_stack(vma, address)) in do_sparc_fault() 226 if (!(vma->vm_flags & VM_WRITE)) in do_sparc_fault() 230 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) in do_sparc_fault() 244 fault = handle_mm_fault(mm, vma, address, flags); in do_sparc_fault() 386 struct vm_area_struct *vma; in force_user_fault() local [all …]
|
| D | srmmu.c | 545 extern void tsunami_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned lon… 546 extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page); 552 extern void tsunami_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long … 553 extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); 559 extern void swift_flush_cache_range(struct vm_area_struct *vma, 561 extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page); 567 extern void swift_flush_tlb_range(struct vm_area_struct *vma, 569 extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); 572 void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 577 if ((ctx1 = vma->vm_mm->context) != -1) { [all …]
|
| /linux-4.4.14/arch/unicore32/mm/ |
| D | flush.c | 23 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, in flush_cache_range() argument 26 if (vma->vm_flags & VM_EXEC) in flush_cache_range() 30 void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, in flush_cache_page() argument 35 static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, in flush_ptrace_access() argument 39 if (vma->vm_flags & VM_EXEC) { in flush_ptrace_access() 53 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, in copy_to_user_page() argument 58 flush_ptrace_access(vma, page, uaddr, dst, len); in copy_to_user_page()
|
| /linux-4.4.14/arch/arm/kernel/ |
| D | smp_tlb.c | 156 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) in flush_tlb_page() argument 160 ta.ta_vma = vma; in flush_tlb_page() 162 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, in flush_tlb_page() 165 __flush_tlb_page(vma, uaddr); in flush_tlb_page() 166 broadcast_tlb_mm_a15_erratum(vma->vm_mm); in flush_tlb_page() 180 void flush_tlb_range(struct vm_area_struct *vma, in flush_tlb_range() argument 185 ta.ta_vma = vma; in flush_tlb_range() 188 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, in flush_tlb_range() 191 local_flush_tlb_range(vma, start, end); in flush_tlb_range() 192 broadcast_tlb_mm_a15_erratum(vma->vm_mm); in flush_tlb_range()
|
| /linux-4.4.14/drivers/xen/xenfs/ |
| D | xenstored.c | 33 static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma) in xsd_kva_mmap() argument 35 size_t size = vma->vm_end - vma->vm_start; in xsd_kva_mmap() 37 if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0)) in xsd_kva_mmap() 40 if (remap_pfn_range(vma, vma->vm_start, in xsd_kva_mmap() 42 size, vma->vm_page_prot)) in xsd_kva_mmap()
|
| /linux-4.4.14/arch/arc/mm/ |
| D | tlb.c | 323 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, in local_flush_tlb_range() argument 337 local_flush_tlb_mm(vma->vm_mm); in local_flush_tlb_range() 350 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { in local_flush_tlb_range() 352 tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu)); in local_flush_tlb_range() 397 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) in local_flush_tlb_page() argument 407 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { in local_flush_tlb_page() 408 tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu)); in local_flush_tlb_page() 464 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) in flush_tlb_page() argument 467 .ta_vma = vma, in flush_tlb_page() 471 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1); in flush_tlb_page() [all …]
|
| D | fault.c | 65 struct vm_area_struct *vma = NULL; in do_page_fault() local 103 vma = find_vma(mm, address); in do_page_fault() 104 if (!vma) in do_page_fault() 106 if (vma->vm_start <= address) in do_page_fault() 108 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_page_fault() 110 if (expand_stack(vma, address)) in do_page_fault() 127 if (!(vma->vm_flags & VM_WRITE)) in do_page_fault() 131 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) in do_page_fault() 140 fault = handle_mm_fault(mm, vma, address, flags); in do_page_fault()
|
| /linux-4.4.14/arch/powerpc/mm/ |
| D | copro_fault.c | 39 struct vm_area_struct *vma; in copro_handle_mm_fault() local 51 vma = find_vma(mm, ea); in copro_handle_mm_fault() 52 if (!vma) in copro_handle_mm_fault() 55 if (ea < vma->vm_start) { in copro_handle_mm_fault() 56 if (!(vma->vm_flags & VM_GROWSDOWN)) in copro_handle_mm_fault() 58 if (expand_stack(vma, ea)) in copro_handle_mm_fault() 64 if (!(vma->vm_flags & VM_WRITE)) in copro_handle_mm_fault() 67 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) in copro_handle_mm_fault() 78 *flt = handle_mm_fault(mm, vma, ea, is_write ? FAULT_FLAG_WRITE : 0); in copro_handle_mm_fault()
|
| D | subpage-prot.c | 137 struct vm_area_struct *vma = walk->vma; in subpage_walk_pmd_entry() local 138 split_huge_page_pmd(vma, addr, pmd); in subpage_walk_pmd_entry() 145 struct vm_area_struct *vma; in subpage_mark_vma_nohuge() local 155 vma = find_vma(mm, addr); in subpage_mark_vma_nohuge() 159 if (vma && ((addr + len) <= vma->vm_start)) in subpage_mark_vma_nohuge() 162 while (vma) { in subpage_mark_vma_nohuge() 163 if (vma->vm_start >= (addr + len)) in subpage_mark_vma_nohuge() 165 vma->vm_flags |= VM_NOHUGEPAGE; in subpage_mark_vma_nohuge() 166 walk_page_vma(vma, &subpage_proto_walk); in subpage_mark_vma_nohuge() 167 vma = vma->vm_next; in subpage_mark_vma_nohuge()
|
| D | pgtable.c | 88 static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma, in set_access_flags_filter() argument 128 static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma, in set_access_flags_filter() argument 146 if (WARN_ON(!(vma->vm_flags & VM_EXEC))) in set_access_flags_filter() 200 int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, in ptep_set_access_flags() argument 204 entry = set_access_flags_filter(entry, vma, dirty); in ptep_set_access_flags() 207 if (!is_vm_hugetlb_page(vma)) in ptep_set_access_flags() 208 assert_pte_locked(vma->vm_mm, address); in ptep_set_access_flags() 210 flush_tlb_page_nohash(vma, address); in ptep_set_access_flags()
|
| D | fault.c | 212 struct vm_area_struct * vma; in do_page_fault() local 333 vma = find_vma(mm, address); in do_page_fault() 334 if (!vma) in do_page_fault() 336 if (vma->vm_start <= address) in do_page_fault() 338 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_page_fault() 350 if (address + 0x100000 < vma->vm_end) { in do_page_fault() 371 if (expand_stack(vma, address)) in do_page_fault() 403 if (!(vma->vm_flags & VM_EXEC) && in do_page_fault() 405 !(vma->vm_flags & (VM_READ | VM_WRITE)))) in do_page_fault() 417 if (!(vma->vm_flags & VM_WRITE)) in do_page_fault() [all …]
|
| /linux-4.4.14/arch/xtensa/kernel/ |
| D | pci.c | 286 __pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vma, in __pci_mmap_make_offset() argument 290 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; in __pci_mmap_make_offset() 329 vma->vm_pgoff = offset >> PAGE_SHIFT; in __pci_mmap_make_offset() 341 __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma, in __pci_mmap_set_pgprot() argument 344 int prot = pgprot_val(vma->vm_page_prot); in __pci_mmap_set_pgprot() 352 vma->vm_page_prot = __pgprot(prot); in __pci_mmap_set_pgprot() 365 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, in pci_mmap_page_range() argument 371 ret = __pci_mmap_make_offset(dev, vma, mmap_state); in pci_mmap_page_range() 375 __pci_mmap_set_pgprot(dev, vma, mmap_state, write_combine); in pci_mmap_page_range() 377 ret = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, in pci_mmap_page_range() [all …]
|
| D | smp.c | 442 struct vm_area_struct *vma; member 470 local_flush_tlb_page(fd->vma, fd->addr1); in ipi_flush_tlb_page() 473 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) in flush_tlb_page() argument 476 .vma = vma, in flush_tlb_page() 485 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); in ipi_flush_tlb_range() 488 void flush_tlb_range(struct vm_area_struct *vma, in flush_tlb_range() argument 492 .vma = vma, in flush_tlb_range() 529 local_flush_cache_page(fd->vma, fd->addr1, fd->addr2); in ipi_flush_cache_page() 532 void flush_cache_page(struct vm_area_struct *vma, in flush_cache_page() argument 536 .vma = vma, in flush_cache_page() [all …]
|
| /linux-4.4.14/arch/mn10300/unit-asb2305/ |
| D | pci-asb2305.c | 214 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, in pci_mmap_page_range() argument 222 vma->vm_flags |= VM_LOCKED; in pci_mmap_page_range() 224 prot = pgprot_val(vma->vm_page_prot); in pci_mmap_page_range() 226 vma->vm_page_prot = __pgprot(prot); in pci_mmap_page_range() 229 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, in pci_mmap_page_range() 230 vma->vm_end - vma->vm_start, in pci_mmap_page_range() 231 vma->vm_page_prot)) in pci_mmap_page_range()
|
| /linux-4.4.14/arch/m68k/mm/ |
| D | fault.c | 73 struct vm_area_struct * vma; in do_page_fault() local 92 vma = find_vma(mm, address); in do_page_fault() 93 if (!vma) in do_page_fault() 95 if (vma->vm_flags & VM_IO) in do_page_fault() 97 if (vma->vm_start <= address) in do_page_fault() 99 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_page_fault() 109 if (expand_stack(vma, address)) in do_page_fault() 122 if (!(vma->vm_flags & VM_WRITE)) in do_page_fault() 129 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) in do_page_fault() 139 fault = handle_mm_fault(mm, vma, address, flags); in do_page_fault()
|
| /linux-4.4.14/drivers/misc/sgi-gru/ |
| D | grufile.c | 73 static void gru_vma_close(struct vm_area_struct *vma) in gru_vma_close() argument 79 if (!vma->vm_private_data) in gru_vma_close() 82 vdata = vma->vm_private_data; in gru_vma_close() 83 vma->vm_private_data = NULL; in gru_vma_close() 84 gru_dbg(grudev, "vma %p, file %p, vdata %p\n", vma, vma->vm_file, in gru_vma_close() 107 static int gru_file_mmap(struct file *file, struct vm_area_struct *vma) in gru_file_mmap() argument 109 if ((vma->vm_flags & (VM_SHARED | VM_WRITE)) != (VM_SHARED | VM_WRITE)) in gru_file_mmap() 112 if (vma->vm_start & (GRU_GSEG_PAGESIZE - 1) || in gru_file_mmap() 113 vma->vm_end & (GRU_GSEG_PAGESIZE - 1)) in gru_file_mmap() 116 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_LOCKED | in gru_file_mmap() [all …]
|
| D | grufault.c | 63 struct vm_area_struct *vma; in gru_find_vma() local 65 vma = find_vma(current->mm, vaddr); in gru_find_vma() 66 if (vma && vma->vm_start <= vaddr && vma->vm_ops == &gru_vm_ops) in gru_find_vma() 67 return vma; in gru_find_vma() 82 struct vm_area_struct *vma; in gru_find_lock_gts() local 86 vma = gru_find_vma(vaddr); in gru_find_lock_gts() 87 if (vma) in gru_find_lock_gts() 88 gts = gru_find_thread_state(vma, TSID(vaddr, vma)); in gru_find_lock_gts() 99 struct vm_area_struct *vma; in gru_alloc_locked_gts() local 103 vma = gru_find_vma(vaddr); in gru_alloc_locked_gts() [all …]
|
| /linux-4.4.14/arch/microblaze/kernel/ |
| D | dma.c | 158 int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, in dma_direct_mmap_coherent() argument 163 unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; in dma_direct_mmap_coherent() 165 unsigned long off = vma->vm_pgoff; in dma_direct_mmap_coherent() 172 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in dma_direct_mmap_coherent() 177 return remap_pfn_range(vma, vma->vm_start, pfn + off, in dma_direct_mmap_coherent() 178 vma->vm_end - vma->vm_start, vma->vm_page_prot); in dma_direct_mmap_coherent()
|
| /linux-4.4.14/arch/cris/include/asm/ |
| D | cacheflush.h | 13 #define flush_cache_range(vma, start, end) do { } while (0) argument 14 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) argument 20 #define flush_icache_page(vma,pg) do { } while (0) argument 21 #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) argument 25 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument 27 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument
|
| /linux-4.4.14/arch/tile/mm/ |
| D | elf.c | 45 struct vm_area_struct *vma; in notify_exec() local 64 for (vma = current->mm->mmap; ; vma = vma->vm_next) { in notify_exec() 65 if (vma == NULL) { in notify_exec() 69 if (vma->vm_file == exe_file) in notify_exec() 78 if (vma->vm_start == (ELF_ET_DYN_BASE & PAGE_MASK)) { in notify_exec() 82 snprintf(buf, sizeof(buf), "0x%lx:@", vma->vm_start); in notify_exec()
|
| /linux-4.4.14/arch/mips/kernel/ |
| D | smp.c | 342 struct vm_area_struct *vma; member 351 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); in flush_tlb_range_ipi() 354 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) in flush_tlb_range() argument 356 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range() 361 .vma = vma, in flush_tlb_range() 375 local_flush_tlb_range(vma, start, end); in flush_tlb_range() 400 local_flush_tlb_page(fd->vma, fd->addr1); in flush_tlb_page_ipi() 403 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) in flush_tlb_page() argument 406 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) { in flush_tlb_page() 408 .vma = vma, in flush_tlb_page() [all …]
|
| D | vdso.c | 103 struct vm_area_struct *vma; in arch_setup_additional_pages() local 130 vma = _install_special_mapping(mm, base, vvar_size, in arch_setup_additional_pages() 133 if (IS_ERR(vma)) { in arch_setup_additional_pages() 134 ret = PTR_ERR(vma); in arch_setup_additional_pages() 144 ret = io_remap_pfn_range(vma, base, in arch_setup_additional_pages() 153 ret = remap_pfn_range(vma, data_addr, in arch_setup_additional_pages() 160 vma = _install_special_mapping(mm, vdso_addr, image->size, in arch_setup_additional_pages() 164 if (IS_ERR(vma)) { in arch_setup_additional_pages() 165 ret = PTR_ERR(vma); in arch_setup_additional_pages()
|
| /linux-4.4.14/drivers/misc/cxl/ |
| D | context.c | 103 static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) in cxl_mmap_fault() argument 105 struct cxl_context *ctx = vma->vm_file->private_data; in cxl_mmap_fault() 138 vma->vm_page_prot = pgprot_cached(vma->vm_page_prot); in cxl_mmap_fault() 144 vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT); in cxl_mmap_fault() 158 int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma) in cxl_context_iomap() argument 160 u64 start = vma->vm_pgoff << PAGE_SHIFT; in cxl_context_iomap() 161 u64 len = vma->vm_end - vma->vm_start; in cxl_context_iomap() 186 vma->vm_flags |= VM_IO | VM_PFNMAP; in cxl_context_iomap() 187 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in cxl_context_iomap() 188 vma->vm_ops = &cxl_mmap_vmops; in cxl_context_iomap()
|
| /linux-4.4.14/arch/sh/kernel/ |
| D | smp.c | 376 struct vm_area_struct *vma; member 385 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); in flush_tlb_range_ipi() 388 void flush_tlb_range(struct vm_area_struct *vma, in flush_tlb_range() argument 391 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range() 397 fd.vma = vma; in flush_tlb_range() 407 local_flush_tlb_range(vma, start, end); in flush_tlb_range() 431 local_flush_tlb_page(fd->vma, fd->addr1); in flush_tlb_page_ipi() 434 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) in flush_tlb_page() argument 437 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || in flush_tlb_page() 438 (current->mm != vma->vm_mm)) { in flush_tlb_page() [all …]
|
| /linux-4.4.14/arch/nios2/include/asm/ |
| D | tlb.h | 22 #define tlb_start_vma(tlb, vma) \ argument 25 flush_cache_range(vma, vma->vm_start, vma->vm_end); \ 28 #define tlb_end_vma(tlb, vma) do { } while (0) argument
|
| D | cacheflush.h | 26 extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, 28 extern void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, 34 extern void flush_icache_page(struct vm_area_struct *vma, struct page *page); 39 extern void copy_to_user_page(struct vm_area_struct *vma, struct page *page, 42 extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
|
| /linux-4.4.14/arch/powerpc/oprofile/cell/ |
| D | vma_map.c | 37 vma_map_lookup(struct vma_to_fileoffset_map *map, unsigned int vma, in vma_map_lookup() argument 47 u32 offset = 0x10000000 + vma; in vma_map_lookup() 51 if (vma < map->vma || vma >= map->vma + map->size) in vma_map_lookup() 60 offset = vma - map->vma + map->offset; in vma_map_lookup() 68 vma_map_add(struct vma_to_fileoffset_map *map, unsigned int vma, in vma_map_add() argument 82 new->vma = vma; in vma_map_add() 272 map = vma_map_add(map, ovly.vma, ovly.size, ovly.offset, in create_vma_map()
|
| /linux-4.4.14/drivers/gpu/drm/rockchip/ |
| D | rockchip_drm_gem.c | 58 struct vm_area_struct *vma) in rockchip_drm_gem_object_mmap() argument 69 vma->vm_flags &= ~VM_PFNMAP; in rockchip_drm_gem_object_mmap() 70 vma->vm_pgoff = 0; in rockchip_drm_gem_object_mmap() 72 ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, in rockchip_drm_gem_object_mmap() 75 drm_gem_vm_close(vma); in rockchip_drm_gem_object_mmap() 81 struct vm_area_struct *vma) in rockchip_gem_mmap_buf() argument 85 ret = drm_gem_mmap_obj(obj, obj->size, vma); in rockchip_gem_mmap_buf() 89 return rockchip_drm_gem_object_mmap(obj, vma); in rockchip_gem_mmap_buf() 93 int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma) in rockchip_gem_mmap() argument 98 ret = drm_gem_mmap(filp, vma); in rockchip_gem_mmap() [all …]
|
| /linux-4.4.14/drivers/video/fbdev/core/ |
| D | fb_defio.c | 40 static int fb_deferred_io_fault(struct vm_area_struct *vma, in fb_deferred_io_fault() argument 45 struct fb_info *info = vma->vm_private_data; in fb_deferred_io_fault() 57 if (vma->vm_file) in fb_deferred_io_fault() 58 page->mapping = vma->vm_file->f_mapping; in fb_deferred_io_fault() 94 static int fb_deferred_io_mkwrite(struct vm_area_struct *vma, in fb_deferred_io_mkwrite() argument 98 struct fb_info *info = vma->vm_private_data; in fb_deferred_io_mkwrite() 108 file_update_time(vma->vm_file); in fb_deferred_io_mkwrite() 167 static int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma) in fb_deferred_io_mmap() argument 169 vma->vm_ops = &fb_deferred_io_vm_ops; in fb_deferred_io_mmap() 170 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; in fb_deferred_io_mmap() [all …]
|
| /linux-4.4.14/arch/arm64/include/asm/ |
| D | tlbflush.h | 91 static inline void flush_tlb_page(struct vm_area_struct *vma, in flush_tlb_page() argument 94 unsigned long addr = uaddr >> 12 | (ASID(vma->vm_mm) << 48); in flush_tlb_page() 107 static inline void __flush_tlb_range(struct vm_area_struct *vma, in __flush_tlb_range() argument 111 unsigned long asid = ASID(vma->vm_mm) << 48; in __flush_tlb_range() 115 flush_tlb_mm(vma->vm_mm); in __flush_tlb_range() 132 static inline void flush_tlb_range(struct vm_area_struct *vma, in flush_tlb_range() argument 135 __flush_tlb_range(vma, start, end, false); in flush_tlb_range()
|
| /linux-4.4.14/drivers/uio/ |
| D | uio.c | 580 static int uio_find_mem_index(struct vm_area_struct *vma) in uio_find_mem_index() argument 582 struct uio_device *idev = vma->vm_private_data; in uio_find_mem_index() 584 if (vma->vm_pgoff < MAX_UIO_MAPS) { in uio_find_mem_index() 585 if (idev->info->mem[vma->vm_pgoff].size == 0) in uio_find_mem_index() 587 return (int)vma->vm_pgoff; in uio_find_mem_index() 592 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) in uio_vma_fault() argument 594 struct uio_device *idev = vma->vm_private_data; in uio_vma_fault() 599 int mi = uio_find_mem_index(vma); in uio_vma_fault() 623 static int uio_mmap_logical(struct vm_area_struct *vma) in uio_mmap_logical() argument 625 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; in uio_mmap_logical() [all …]
|
| /linux-4.4.14/drivers/gpu/drm/exynos/ |
| D | exynos_drm_gem.h | 121 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 124 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); 126 static inline int vma_is_io(struct vm_area_struct *vma) in vma_is_io() argument 128 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP)); in vma_is_io() 132 struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma); 135 void exynos_gem_put_vma(struct vm_area_struct *vma); 141 struct vm_area_struct *vma); 146 struct vm_area_struct *vma);
|
| D | exynos_drm_gem.c | 322 struct vm_area_struct *vma) in exynos_drm_gem_mmap_buffer() argument 328 vma->vm_flags &= ~VM_PFNMAP; in exynos_drm_gem_mmap_buffer() 329 vma->vm_pgoff = 0; in exynos_drm_gem_mmap_buffer() 331 vm_size = vma->vm_end - vma->vm_start; in exynos_drm_gem_mmap_buffer() 337 ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem->pages, in exynos_drm_gem_mmap_buffer() 475 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) in exynos_drm_gem_fault() argument 477 struct drm_gem_object *obj = vma->vm_private_data; in exynos_drm_gem_fault() 484 vma->vm_start) >> PAGE_SHIFT; in exynos_drm_gem_fault() 493 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn); in exynos_drm_gem_fault() 508 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) in exynos_drm_gem_mmap() argument [all …]
|
| /linux-4.4.14/arch/microblaze/include/asm/ |
| D | cacheflush.h | 64 #define flush_icache_user_range(vma, pg, adr, len) flush_icache(); argument 65 #define flush_icache_page(vma, pg) do { } while (0) argument 92 #define flush_cache_page(vma, vmaddr, pfn) \ argument 97 #define flush_cache_range(vma, start, len) { \ 103 #define flush_cache_range(vma, start, len) do { } while (0) argument 105 static inline void copy_to_user_page(struct vm_area_struct *vma, in copy_to_user_page() argument 111 if (vma->vm_flags & VM_EXEC) { in copy_to_user_page() 117 static inline void copy_from_user_page(struct vm_area_struct *vma, in copy_from_user_page() argument
|
| /linux-4.4.14/arch/microblaze/mm/ |
| D | fault.c | 89 struct vm_area_struct *vma; in do_page_fault() local 149 vma = find_vma(mm, address); in do_page_fault() 150 if (unlikely(!vma)) in do_page_fault() 153 if (vma->vm_start <= address) in do_page_fault() 156 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) in do_page_fault() 171 if (unlikely(address + 0x100000 < vma->vm_end)) { in do_page_fault() 194 if (expand_stack(vma, address)) in do_page_fault() 202 if (unlikely(!(vma->vm_flags & VM_WRITE))) in do_page_fault() 210 if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC)))) in do_page_fault() 219 fault = handle_mm_fault(mm, vma, address, flags); in do_page_fault()
|
| /linux-4.4.14/drivers/xen/xenbus/ |
| D | xenbus_dev_backend.c | 93 static int xenbus_backend_mmap(struct file *file, struct vm_area_struct *vma) in xenbus_backend_mmap() argument 95 size_t size = vma->vm_end - vma->vm_start; in xenbus_backend_mmap() 100 if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0)) in xenbus_backend_mmap() 103 if (remap_pfn_range(vma, vma->vm_start, in xenbus_backend_mmap() 105 size, vma->vm_page_prot)) in xenbus_backend_mmap()
|
| /linux-4.4.14/fs/kernfs/ |
| D | file.c | 328 static void kernfs_vma_open(struct vm_area_struct *vma) in kernfs_vma_open() argument 330 struct file *file = vma->vm_file; in kernfs_vma_open() 340 of->vm_ops->open(vma); in kernfs_vma_open() 345 static int kernfs_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) in kernfs_vma_fault() argument 347 struct file *file = vma->vm_file; in kernfs_vma_fault() 359 ret = of->vm_ops->fault(vma, vmf); in kernfs_vma_fault() 365 static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma, in kernfs_vma_page_mkwrite() argument 368 struct file *file = vma->vm_file; in kernfs_vma_page_mkwrite() 380 ret = of->vm_ops->page_mkwrite(vma, vmf); in kernfs_vma_page_mkwrite() 388 static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr, in kernfs_vma_access() argument [all …]
|
| /linux-4.4.14/arch/openrisc/mm/ |
| D | fault.c | 54 struct vm_area_struct *vma; in do_page_fault() local 112 vma = find_vma(mm, address); in do_page_fault() 114 if (!vma) in do_page_fault() 117 if (vma->vm_start <= address) in do_page_fault() 120 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_page_fault() 133 if (expand_stack(vma, address)) in do_page_fault() 147 if (!(vma->vm_flags & VM_WRITE)) in do_page_fault() 152 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) in do_page_fault() 157 if ((vector == 0x400) && !(vma->vm_page_prot.pgprot & _PAGE_EXEC)) in do_page_fault() 166 fault = handle_mm_fault(mm, vma, address, flags); in do_page_fault()
|
| /linux-4.4.14/arch/alpha/mm/ |
| D | fault.c | 86 struct vm_area_struct * vma; in do_page_fault() local 120 vma = find_vma(mm, address); in do_page_fault() 121 if (!vma) in do_page_fault() 123 if (vma->vm_start <= address) in do_page_fault() 125 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_page_fault() 127 if (expand_stack(vma, address)) in do_page_fault() 135 if (!(vma->vm_flags & VM_EXEC)) in do_page_fault() 139 if (!(vma->vm_flags & (VM_READ | VM_WRITE))) in do_page_fault() 142 if (!(vma->vm_flags & VM_WRITE)) in do_page_fault() 150 fault = handle_mm_fault(mm, vma, address, flags); in do_page_fault()
|
| /linux-4.4.14/arch/avr32/mm/ |
| D | fault.c | 60 struct vm_area_struct *vma; in do_page_fault() local 94 vma = find_vma(mm, address); in do_page_fault() 95 if (!vma) in do_page_fault() 97 if (vma->vm_start <= address) in do_page_fault() 99 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_page_fault() 101 if (expand_stack(vma, address)) in do_page_fault() 114 if (!(vma->vm_flags & VM_EXEC)) in do_page_fault() 119 if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) in do_page_fault() 124 if (!(vma->vm_flags & VM_WRITE)) in do_page_fault() 137 fault = handle_mm_fault(mm, vma, address, flags); in do_page_fault()
|
| /linux-4.4.14/arch/um/drivers/ |
| D | mmapper_kern.c | 48 static int mmapper_mmap(struct file *file, struct vm_area_struct *vma) in mmapper_mmap() argument 53 if (vma->vm_pgoff != 0) in mmapper_mmap() 56 size = vma->vm_end - vma->vm_start; in mmapper_mmap() 64 if (remap_pfn_range(vma, vma->vm_start, p_buf >> PAGE_SHIFT, size, in mmapper_mmap() 65 vma->vm_page_prot)) in mmapper_mmap()
|
| /linux-4.4.14/arch/metag/kernel/ |
| D | dma.c | 335 static int dma_mmap(struct device *dev, struct vm_area_struct *vma, in dma_mmap() argument 343 user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; in dma_mmap() 350 unsigned long off = vma->vm_pgoff; in dma_mmap() 356 ret = remap_pfn_range(vma, vma->vm_start, in dma_mmap() 359 vma->vm_page_prot); in dma_mmap() 367 int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, in dma_mmap_coherent() argument 370 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in dma_mmap_coherent() 371 return dma_mmap(dev, vma, cpu_addr, dma_addr, size); in dma_mmap_coherent() 375 int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, in dma_mmap_writecombine() argument 378 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); in dma_mmap_writecombine() [all …]
|
| /linux-4.4.14/arch/powerpc/kernel/ |
| D | proc_powerpc.c | 44 static int page_map_mmap( struct file *file, struct vm_area_struct *vma ) in page_map_mmap() argument 46 if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) in page_map_mmap() 49 remap_pfn_range(vma, vma->vm_start, in page_map_mmap() 51 PAGE_SIZE, vma->vm_page_prot); in page_map_mmap()
|
| /linux-4.4.14/arch/mips/mm/ |
| D | tlb-r3k.c | 81 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, in local_flush_tlb_range() argument 84 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range() 160 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) in local_flush_tlb_page() argument 164 if (cpu_context(cpu, vma->vm_mm) != 0) { in local_flush_tlb_page() 169 printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page); in local_flush_tlb_page() 171 newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK; in local_flush_tlb_page() 191 void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) in __update_tlb() argument 199 if (current->active_mm != vma->vm_mm) in __update_tlb() 205 if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) { in __update_tlb() 207 (cpu_context(cpu, vma->vm_mm)), pid); in __update_tlb()
|
| D | c-octeon.c | 65 static void octeon_flush_icache_all_cores(struct vm_area_struct *vma) in octeon_flush_icache_all_cores() argument 83 if (vma) in octeon_flush_icache_all_cores() 84 mask = *mm_cpumask(vma->vm_mm); in octeon_flush_icache_all_cores() 138 struct vm_area_struct *vma; in octeon_flush_cache_sigtramp() local 141 vma = find_vma(current->mm, addr); in octeon_flush_cache_sigtramp() 142 octeon_flush_icache_all_cores(vma); in octeon_flush_cache_sigtramp() 154 static void octeon_flush_cache_range(struct vm_area_struct *vma, in octeon_flush_cache_range() argument 157 if (vma->vm_flags & VM_EXEC) in octeon_flush_cache_range() 158 octeon_flush_icache_all_cores(vma); in octeon_flush_cache_range() 169 static void octeon_flush_cache_page(struct vm_area_struct *vma, in octeon_flush_cache_page() argument [all …]
|
| D | fault.c | 42 struct vm_area_struct * vma = NULL; in __do_page_fault() local 102 vma = find_vma(mm, address); in __do_page_fault() 103 if (!vma) in __do_page_fault() 105 if (vma->vm_start <= address) in __do_page_fault() 107 if (!(vma->vm_flags & VM_GROWSDOWN)) in __do_page_fault() 109 if (expand_stack(vma, address)) in __do_page_fault() 119 if (!(vma->vm_flags & VM_WRITE)) in __do_page_fault() 124 if (address == regs->cp0_epc && !(vma->vm_flags & VM_EXEC)) { in __do_page_fault() 134 if (!(vma->vm_flags & VM_READ) && in __do_page_fault() 146 if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) in __do_page_fault() [all …]
|
| /linux-4.4.14/arch/score/mm/ |
| D | fault.c | 47 struct vm_area_struct *vma = NULL; in do_page_fault() local 84 vma = find_vma(mm, address); in do_page_fault() 85 if (!vma) in do_page_fault() 87 if (vma->vm_start <= address) in do_page_fault() 89 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_page_fault() 91 if (expand_stack(vma, address)) in do_page_fault() 101 if (!(vma->vm_flags & VM_WRITE)) in do_page_fault() 105 if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) in do_page_fault() 114 fault = handle_mm_fault(mm, vma, address, flags); in do_page_fault()
|
| /linux-4.4.14/arch/unicore32/include/asm/ |
| D | cacheflush.h | 120 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument 140 extern void flush_cache_range(struct vm_area_struct *vma, 142 extern void flush_cache_page(struct vm_area_struct *vma, 152 #define flush_cache_user_range(vma, start, end) \ argument 187 #define flush_icache_user_range(vma, page, addr, len) \ argument 194 #define flush_icache_page(vma, page) do { } while (0) argument
|
| D | tlbflush.h | 88 local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) in local_flush_tlb_page() argument 90 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { in local_flush_tlb_page() 170 #define local_flush_tlb_range(vma, start, end) \ argument 171 __cpu_flush_user_tlb_range(start, end, vma) 187 extern void update_mmu_cache(struct vm_area_struct *vma,
|
| /linux-4.4.14/arch/cris/mm/ |
| D | fault.c | 58 struct vm_area_struct * vma; in do_page_fault() local 123 vma = find_vma(mm, address); in do_page_fault() 124 if (!vma) in do_page_fault() 126 if (vma->vm_start <= address) in do_page_fault() 128 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_page_fault() 140 if (expand_stack(vma, address)) in do_page_fault() 154 if (!(vma->vm_flags & VM_EXEC)) in do_page_fault() 157 if (!(vma->vm_flags & VM_WRITE)) in do_page_fault() 161 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) in do_page_fault() 171 fault = handle_mm_fault(mm, vma, address, flags); in do_page_fault()
|
| /linux-4.4.14/drivers/misc/mic/host/ |
| D | mic_fops.c | 192 mic_mmap(struct file *f, struct vm_area_struct *vma) in mic_mmap() argument 195 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; in mic_mmap() 196 unsigned long pa, size = vma->vm_end - vma->vm_start, size_rem = size; in mic_mmap() 203 if (vma->vm_flags & VM_WRITE) in mic_mmap() 210 err = remap_pfn_range(vma, vma->vm_start + offset, in mic_mmap() 211 pa >> PAGE_SHIFT, size, vma->vm_page_prot); in mic_mmap() 217 pa, vma->vm_start + offset); in mic_mmap()
|
| /linux-4.4.14/drivers/vfio/platform/ |
| D | vfio_platform_common.c | 478 struct vm_area_struct *vma) in vfio_platform_mmap_mmio() argument 482 req_len = vma->vm_end - vma->vm_start; in vfio_platform_mmap_mmio() 483 pgoff = vma->vm_pgoff & in vfio_platform_mmap_mmio() 490 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); in vfio_platform_mmap_mmio() 491 vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff; in vfio_platform_mmap_mmio() 493 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, in vfio_platform_mmap_mmio() 494 req_len, vma->vm_page_prot); in vfio_platform_mmap_mmio() 497 static int vfio_platform_mmap(void *device_data, struct vm_area_struct *vma) in vfio_platform_mmap() argument 502 index = vma->vm_pgoff >> (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT); in vfio_platform_mmap() 504 if (vma->vm_end < vma->vm_start) in vfio_platform_mmap() [all …]
|
| /linux-4.4.14/arch/x86/mm/ |
| D | pgtable.c | 409 int ptep_set_access_flags(struct vm_area_struct *vma, in ptep_set_access_flags() argument 417 pte_update_defer(vma->vm_mm, address, ptep); in ptep_set_access_flags() 424 int pmdp_set_access_flags(struct vm_area_struct *vma, in pmdp_set_access_flags() argument 434 pmd_update_defer(vma->vm_mm, address, pmdp); in pmdp_set_access_flags() 447 int ptep_test_and_clear_young(struct vm_area_struct *vma, in ptep_test_and_clear_young() argument 457 pte_update(vma->vm_mm, addr, ptep); in ptep_test_and_clear_young() 463 int pmdp_test_and_clear_young(struct vm_area_struct *vma, in pmdp_test_and_clear_young() argument 473 pmd_update(vma->vm_mm, addr, pmdp); in pmdp_test_and_clear_young() 479 int ptep_clear_flush_young(struct vm_area_struct *vma, in ptep_clear_flush_young() argument 495 return ptep_test_and_clear_young(vma, address, ptep); in ptep_clear_flush_young() [all …]
|
| D | hugetlbpage.c | 27 struct vm_area_struct *vma; 29 vma = find_vma(mm, addr); 30 if (!vma || !is_vm_hugetlb_page(vma)) 130 struct vm_area_struct *vma; in hugetlb_get_unmapped_area() local 145 vma = find_vma(mm, addr); in hugetlb_get_unmapped_area() 147 (!vma || addr + len <= vma->vm_start)) in hugetlb_get_unmapped_area()
|
| /linux-4.4.14/arch/xtensa/mm/ |
| D | cache.c | 108 unsigned long vaddr, struct vm_area_struct *vma) in copy_user_highpage() argument 185 void local_flush_cache_range(struct vm_area_struct *vma, in local_flush_cache_range() argument 199 void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address, in local_flush_cache_page() argument 214 update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) in update_mmu_cache() argument 226 flush_tlb_page(vma, addr); in update_mmu_cache() 244 && (vma->vm_flags & VM_EXEC) != 0) { in update_mmu_cache() 261 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, in copy_to_user_page() argument 288 if ((vma->vm_flags & VM_EXEC) != 0) in copy_to_user_page() 291 } else if ((vma->vm_flags & VM_EXEC) != 0) { in copy_to_user_page() 297 extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page, in copy_from_user_page() argument
|
| D | fault.c | 40 struct vm_area_struct * vma; in do_page_fault() local 80 vma = find_vma(mm, address); in do_page_fault() 82 if (!vma) in do_page_fault() 84 if (vma->vm_start <= address) in do_page_fault() 86 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_page_fault() 88 if (expand_stack(vma, address)) in do_page_fault() 99 if (!(vma->vm_flags & VM_WRITE)) in do_page_fault() 103 if (!(vma->vm_flags & VM_EXEC)) in do_page_fault() 106 if (!(vma->vm_flags & (VM_READ | VM_WRITE))) in do_page_fault() 113 fault = handle_mm_fault(mm, vma, address, flags); in do_page_fault()
|
| /linux-4.4.14/arch/mn10300/mm/ |
| D | fault.c | 121 struct vm_area_struct *vma; in do_page_fault() local 179 vma = find_vma(mm, address); in do_page_fault() 180 if (!vma) in do_page_fault() 182 if (vma->vm_start <= address) in do_page_fault() 184 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_page_fault() 197 vma->vm_start, vma->vm_end); in do_page_fault() 218 if (expand_stack(vma, address)) in do_page_fault() 236 if (!(vma->vm_flags & VM_WRITE)) in do_page_fault() 247 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) in do_page_fault() 257 fault = handle_mm_fault(mm, vma, address, flags); in do_page_fault()
|
| /linux-4.4.14/arch/c6x/include/asm/ |
| D | cacheflush.h | 29 #define flush_cache_page(vma, vmaddr, pfn) do {} while (0) argument 46 #define flush_icache_page(vma, page) \ argument 48 if ((vma)->vm_flags & PROT_EXEC) \ 56 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ argument 62 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ argument
|
| /linux-4.4.14/arch/powerpc/platforms/powernv/ |
| D | opal-prd.c | 112 static int opal_prd_mmap(struct file *file, struct vm_area_struct *vma) in opal_prd_mmap() argument 119 vma->vm_start, vma->vm_end, vma->vm_pgoff, in opal_prd_mmap() 120 vma->vm_flags); in opal_prd_mmap() 122 addr = vma->vm_pgoff << PAGE_SHIFT; in opal_prd_mmap() 123 size = vma->vm_end - vma->vm_start; in opal_prd_mmap() 129 page_prot = phys_mem_access_prot(file, vma->vm_pgoff, in opal_prd_mmap() 130 size, vma->vm_page_prot); in opal_prd_mmap() 132 rc = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, size, in opal_prd_mmap()
|
| /linux-4.4.14/arch/x86/entry/vdso/ |
| D | vma.c | 95 struct vm_area_struct *vma; in map_vdso() local 126 vma = _install_special_mapping(mm, in map_vdso() 133 if (IS_ERR(vma)) { in map_vdso() 134 ret = PTR_ERR(vma); in map_vdso() 138 vma = _install_special_mapping(mm, in map_vdso() 144 if (IS_ERR(vma)) { in map_vdso() 145 ret = PTR_ERR(vma); in map_vdso() 150 ret = remap_pfn_range(vma, in map_vdso() 161 ret = io_remap_pfn_range(vma, in map_vdso()
|