Lines Matching refs:vma

247 	int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
248 void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf);
252 int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
255 int (*pfn_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
260 int (*access)(struct vm_area_struct *vma, unsigned long addr,
266 const char *(*name)(struct vm_area_struct *vma);
276 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
288 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
296 struct page *(*find_special_page)(struct vm_area_struct *vma,
610 static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) in maybe_mkwrite() argument
612 if (likely(vma->vm_flags & VM_WRITE)) in maybe_mkwrite()
617 void do_set_pte(struct vm_area_struct *vma, unsigned long address,
1102 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
1105 int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1107 void zap_page_range(struct vm_area_struct *vma, unsigned long address,
1145 struct vm_area_struct *vma; member
1151 int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk);
1155 struct vm_area_struct *vma);
1158 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
1160 int follow_phys(struct vm_area_struct *vma, unsigned long address,
1162 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
1180 extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
1186 struct vm_area_struct *vma, unsigned long address, in handle_mm_fault() argument
1251 static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr) in vma_growsdown() argument
1253 return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); in vma_growsdown()
1256 static inline int stack_guard_page_start(struct vm_area_struct *vma, in stack_guard_page_start() argument
1259 return (vma->vm_flags & VM_GROWSDOWN) && in stack_guard_page_start()
1260 (vma->vm_start == addr) && in stack_guard_page_start()
1261 !vma_growsdown(vma->vm_prev, addr); in stack_guard_page_start()
1265 static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr) in vma_growsup() argument
1267 return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP); in vma_growsup()
1270 static inline int stack_guard_page_end(struct vm_area_struct *vma, in stack_guard_page_end() argument
1273 return (vma->vm_flags & VM_GROWSUP) && in stack_guard_page_end()
1274 (vma->vm_end == addr) && in stack_guard_page_end()
1275 !vma_growsup(vma->vm_next, addr); in stack_guard_page_end()
1279 struct vm_area_struct *vma, bool in_group);
1281 extern unsigned long move_page_tables(struct vm_area_struct *vma,
1285 extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
1288 extern int mprotect_fixup(struct vm_area_struct *vma,
1382 int vma_wants_writenotify(struct vm_area_struct *vma);
1445 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
1571 #define pte_alloc_map(mm, vma, pmd, address) \ argument
1572 ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma, \
1802 #define vma_interval_tree_foreach(vma, root, start, last) \ argument
1803 for (vma = vma_interval_tree_iter_first(root, start, last); \
1804 vma; vma = vma_interval_tree_iter_next(vma, start, last))
1824 extern int vma_adjust(struct vm_area_struct *vma, unsigned long start,
1939 extern void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf);
1940 extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
1969 extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
1972 extern int expand_downwards(struct vm_area_struct *vma,
1975 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
1977 #define expand_upwards(vma, address) (0) argument
1989 struct vm_area_struct * vma = find_vma(mm,start_addr); in find_vma_intersection() local
1991 if (vma && end_addr <= vma->vm_start) in find_vma_intersection()
1992 vma = NULL; in find_vma_intersection()
1993 return vma; in find_vma_intersection()
1996 static inline unsigned long vma_pages(struct vm_area_struct *vma) in vma_pages() argument
1998 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; in vma_pages()
2005 struct vm_area_struct *vma = find_vma(mm, vm_start); in find_exact_vma() local
2007 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end)) in find_exact_vma()
2008 vma = NULL; in find_exact_vma()
2010 return vma; in find_exact_vma()
2015 void vma_set_page_prot(struct vm_area_struct *vma);
2021 static inline void vma_set_page_prot(struct vm_area_struct *vma) in vma_set_page_prot() argument
2023 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); in vma_set_page_prot()
2028 unsigned long change_prot_numa(struct vm_area_struct *vma,
2036 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2038 int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2040 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
2043 struct page *follow_page_mask(struct vm_area_struct *vma,
2047 static inline struct page *follow_page(struct vm_area_struct *vma, in follow_page() argument
2051 return follow_page_mask(vma, address, foll_flags, &unused_page_mask); in follow_page()
2142 const char * arch_vma_name(struct vm_area_struct *vma);
2189 unsigned long addr, struct vm_area_struct *vma,