Lines Matching refs:vma

256 	int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
259 void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf);
263 int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
266 int (*pfn_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
271 int (*access)(struct vm_area_struct *vma, unsigned long addr,
277 const char *(*name)(struct vm_area_struct *vma);
287 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
299 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
307 struct page *(*find_special_page)(struct vm_area_struct *vma,
572 static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) in maybe_mkwrite() argument
574 if (likely(vma->vm_flags & VM_WRITE)) in maybe_mkwrite()
579 void do_set_pte(struct vm_area_struct *vma, unsigned long address,
1085 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
1087 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
1090 int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1092 void zap_page_range(struct vm_area_struct *vma, unsigned long address,
1130 struct vm_area_struct *vma; member
1136 int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk);
1140 struct vm_area_struct *vma);
1143 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
1145 int follow_phys(struct vm_area_struct *vma, unsigned long address,
1147 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
1165 extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
1171 struct vm_area_struct *vma, unsigned long address, in handle_mm_fault() argument
1282 static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr) in vma_growsdown() argument
1284 return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); in vma_growsdown()
1287 static inline bool vma_is_anonymous(struct vm_area_struct *vma) in vma_is_anonymous() argument
1289 return !vma->vm_ops; in vma_is_anonymous()
1292 static inline int stack_guard_page_start(struct vm_area_struct *vma, in stack_guard_page_start() argument
1295 return (vma->vm_flags & VM_GROWSDOWN) && in stack_guard_page_start()
1296 (vma->vm_start == addr) && in stack_guard_page_start()
1297 !vma_growsdown(vma->vm_prev, addr); in stack_guard_page_start()
1301 static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr) in vma_growsup() argument
1303 return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP); in vma_growsup()
1306 static inline int stack_guard_page_end(struct vm_area_struct *vma, in stack_guard_page_end() argument
1309 return (vma->vm_flags & VM_GROWSUP) && in stack_guard_page_end()
1310 (vma->vm_end == addr) && in stack_guard_page_end()
1311 !vma_growsup(vma->vm_next, addr); in stack_guard_page_end()
1315 struct vm_area_struct *vma, bool in_group);
1317 extern unsigned long move_page_tables(struct vm_area_struct *vma,
1321 extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
1324 extern int mprotect_fixup(struct vm_area_struct *vma,
1418 int vma_wants_writenotify(struct vm_area_struct *vma);
1481 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
1608 #define pte_alloc_map(mm, vma, pmd, address) \ argument
1609 ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma, \
1843 #define vma_interval_tree_foreach(vma, root, start, last) \ argument
1844 for (vma = vma_interval_tree_iter_first(root, start, last); \
1845 vma; vma = vma_interval_tree_iter_next(vma, start, last))
1865 extern int vma_adjust(struct vm_area_struct *vma, unsigned long start,
1988 extern void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf);
1989 extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
2016 extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
2019 extern int expand_downwards(struct vm_area_struct *vma,
2022 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
2024 #define expand_upwards(vma, address) (0) argument
2036 struct vm_area_struct * vma = find_vma(mm,start_addr); in find_vma_intersection() local
2038 if (vma && end_addr <= vma->vm_start) in find_vma_intersection()
2039 vma = NULL; in find_vma_intersection()
2040 return vma; in find_vma_intersection()
2043 static inline unsigned long vma_pages(struct vm_area_struct *vma) in vma_pages() argument
2045 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; in vma_pages()
2052 struct vm_area_struct *vma = find_vma(mm, vm_start); in find_exact_vma() local
2054 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end)) in find_exact_vma()
2055 vma = NULL; in find_exact_vma()
2057 return vma; in find_exact_vma()
2062 void vma_set_page_prot(struct vm_area_struct *vma);
2068 static inline void vma_set_page_prot(struct vm_area_struct *vma) in vma_set_page_prot() argument
2070 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); in vma_set_page_prot()
2075 unsigned long change_prot_numa(struct vm_area_struct *vma,
2083 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2085 int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2087 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
2090 struct page *follow_page_mask(struct vm_area_struct *vma,
2094 static inline struct page *follow_page(struct vm_area_struct *vma, in follow_page() argument
2098 return follow_page_mask(vma, address, foll_flags, &unused_page_mask); in follow_page()
2190 const char * arch_vma_name(struct vm_area_struct *vma);
2273 unsigned long addr, struct vm_area_struct *vma,