Searched refs:anon_vma (Results 1 - 29 of 29) sorted by relevance

/linux-4.1.27/include/linux/
H A Drmap.h14 * The anon_vma heads a list of private "related" vmas, to scan if
15 * an anonymous page pointing to this anon_vma needs to be unmapped:
20 * directly to a vma: instead it points to an anon_vma, on whose list
24 * the anon_vma object itself: we're guaranteed no page can be
25 * pointing to this anon_vma once its vma list is empty.
27 struct anon_vma { struct
28 struct anon_vma *root; /* Root of this anon_vma tree */
31 * The refcount is taken on an anon_vma when there is no
35 * anon_vma if they are the last user on release
40 * Count of child anon_vmas and VMAs which points to this anon_vma.
42 * This counter is used for making decision about reusing anon_vma
47 struct anon_vma *parent; /* Parent of this anon_vma */
61 * The copy-on-write semantics of fork mean that an anon_vma
63 * each child process will have its own anon_vma, where new
67 * with a VMA, or the VMAs associated with an anon_vma.
71 * which link all the VMAs associated with this anon_vma.
75 struct anon_vma *anon_vma; member in struct:anon_vma_chain
77 struct rb_node rb; /* locked by anon_vma->rwsem */
95 static inline void get_anon_vma(struct anon_vma *anon_vma) get_anon_vma() argument
97 atomic_inc(&anon_vma->refcount); get_anon_vma()
100 void __put_anon_vma(struct anon_vma *anon_vma);
102 static inline void put_anon_vma(struct anon_vma *anon_vma) put_anon_vma() argument
104 if (atomic_dec_and_test(&anon_vma->refcount)) put_anon_vma()
105 __put_anon_vma(anon_vma); put_anon_vma()
108 static inline void anon_vma_lock_write(struct anon_vma *anon_vma) anon_vma_lock_write() argument
110 down_write(&anon_vma->root->rwsem); anon_vma_lock_write()
113 static inline void anon_vma_unlock_write(struct anon_vma *anon_vma) anon_vma_unlock_write() argument
115 up_write(&anon_vma->root->rwsem); anon_vma_unlock_write()
118 static inline void anon_vma_lock_read(struct anon_vma *anon_vma) anon_vma_lock_read() argument
120 down_read(&anon_vma->root->rwsem); anon_vma_lock_read()
123 static inline void anon_vma_unlock_read(struct anon_vma *anon_vma) anon_vma_unlock_read() argument
125 up_read(&anon_vma->root->rwsem); anon_vma_unlock_read()
130 * anon_vma helper functions.
141 VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma); anon_vma_merge()
145 struct anon_vma *page_get_anon_vma(struct page *page);
217 struct anon_vma *page_lock_anon_vma_read(struct page *page);
218 void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
235 struct anon_vma *(*anon_lock)(struct page *page);
H A Dksm.h52 * no problem, it will be assigned to this vma's anon_vma; but thereafter,
53 * it might be faulted into a different anon_vma (or perhaps to a different
54 * offset in the same anon_vma). do_swap_page() cannot do all the locking
55 * needed to reconstitute a cross-anon_vma KSM page: for now it has to make
H A Dmm_types.h55 * it points to anon_vma object:
283 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
286 * or brk vma (with NULL file) can only be in an anon_vma list.
290 struct anon_vma *anon_vma; /* Serialized by page_table_lock */ member in struct:vm_area_struct
H A Dhuge_mm.h146 if (!vma->anon_vma || vma->vm_ops) vma_adjust_trans_huge()
H A Dpage-flags.h294 * page->mapping points to its anon_vma, not to a struct address_space;
299 * and then page->mapping points, not to an anon_vma, but to a private
322 * anon_vma, but to that page's node of the stable tree.
H A Dmmu_notifier.h180 * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem).
H A Dmm.h25 struct anon_vma;
957 extern struct anon_vma *page_anon_vma(struct page *page);
1828 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
1830 extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
/linux-4.1.27/mm/
H A Drmap.c27 * anon_vma->rwsem
40 * anon_vma->rwsem,mapping->i_mutex (memory_failure, collect_procs_anon)
68 static inline struct anon_vma *anon_vma_alloc(void) anon_vma_alloc()
70 struct anon_vma *anon_vma; anon_vma_alloc() local
72 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); anon_vma_alloc()
73 if (anon_vma) { anon_vma_alloc()
74 atomic_set(&anon_vma->refcount, 1); anon_vma_alloc()
75 anon_vma->degree = 1; /* Reference for first vma */ anon_vma_alloc()
76 anon_vma->parent = anon_vma; anon_vma_alloc()
78 * Initialise the anon_vma root to point to itself. If called anon_vma_alloc()
79 * from fork, the root will be reset to the parents anon_vma. anon_vma_alloc()
81 anon_vma->root = anon_vma; anon_vma_alloc()
84 return anon_vma; anon_vma_alloc()
87 static inline void anon_vma_free(struct anon_vma *anon_vma) anon_vma_free() argument
89 VM_BUG_ON(atomic_read(&anon_vma->refcount)); anon_vma_free()
93 * we can safely hold the lock without the anon_vma getting anon_vma_free()
109 if (rwsem_is_locked(&anon_vma->root->rwsem)) { anon_vma_free()
110 anon_vma_lock_write(anon_vma); anon_vma_free()
111 anon_vma_unlock_write(anon_vma); anon_vma_free()
114 kmem_cache_free(anon_vma_cachep, anon_vma); anon_vma_free()
129 struct anon_vma *anon_vma) anon_vma_chain_link()
132 avc->anon_vma = anon_vma; anon_vma_chain_link()
134 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); anon_vma_chain_link()
138 * anon_vma_prepare - attach an anon_vma to a memory region
142 * an 'anon_vma' attached to it, so that we can associate the
143 * anonymous pages mapped into it with that anon_vma.
147 * can re-use the anon_vma from (very common when the only
152 * optimistically looked up an anon_vma in page_lock_anon_vma_read()
155 * anon_vma isn't actually destroyed).
157 * As a result, we need to do proper anon_vma locking even
160 * an anon_vma.
166 struct anon_vma *anon_vma = vma->anon_vma; anon_vma_prepare() local
170 if (unlikely(!anon_vma)) { anon_vma_prepare()
172 struct anon_vma *allocated; anon_vma_prepare()
178 anon_vma = find_mergeable_anon_vma(vma); anon_vma_prepare()
180 if (!anon_vma) { anon_vma_prepare()
181 anon_vma = anon_vma_alloc(); anon_vma_prepare()
182 if (unlikely(!anon_vma)) anon_vma_prepare()
184 allocated = anon_vma; anon_vma_prepare()
187 anon_vma_lock_write(anon_vma); anon_vma_prepare()
190 if (likely(!vma->anon_vma)) { anon_vma_prepare()
191 vma->anon_vma = anon_vma; anon_vma_prepare()
192 anon_vma_chain_link(vma, avc, anon_vma); anon_vma_prepare()
194 anon_vma->degree++; anon_vma_prepare()
199 anon_vma_unlock_write(anon_vma); anon_vma_prepare()
215 * This is a useful helper function for locking the anon_vma root as
216 * we traverse the vma->anon_vma_chain, looping over anon_vma's that
219 * Such anon_vma's should have the same root, so you'd expect to see
222 static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma) lock_anon_vma_root() argument
224 struct anon_vma *new_root = anon_vma->root; lock_anon_vma_root()
234 static inline void unlock_anon_vma_root(struct anon_vma *root) unlock_anon_vma_root()
244 * If dst->anon_vma is NULL this function tries to find and reuse existing
245 * anon_vma which has no vmas and only one child anon_vma. This prevents
246 * degradation of anon_vma hierarchy to endless linear chain in case of
247 * constantly forking task. On the other hand, an anon_vma with more than one
255 struct anon_vma *root = NULL; anon_vma_clone()
258 struct anon_vma *anon_vma; anon_vma_clone() local
268 anon_vma = pavc->anon_vma; anon_vma_clone()
269 root = lock_anon_vma_root(root, anon_vma); anon_vma_clone()
270 anon_vma_chain_link(dst, avc, anon_vma); anon_vma_clone()
273 * Reuse existing anon_vma if its degree lower than two, anon_vma_clone()
274 * that means it has no vma and only one anon_vma child. anon_vma_clone()
276 * Do not chose parent anon_vma, otherwise first child anon_vma_clone()
277 * will always reuse it. Root anon_vma is never reused: anon_vma_clone()
280 if (!dst->anon_vma && anon_vma != src->anon_vma && anon_vma_clone()
281 anon_vma->degree < 2) anon_vma_clone()
282 dst->anon_vma = anon_vma; anon_vma_clone()
284 if (dst->anon_vma) anon_vma_clone()
285 dst->anon_vma->degree++; anon_vma_clone()
291 * dst->anon_vma is dropped here otherwise its degree can be incorrectly anon_vma_clone()
294 * about dst->anon_vma if anon_vma_clone() failed. anon_vma_clone()
296 dst->anon_vma = NULL; anon_vma_clone()
302 * Attach vma to its own anon_vma, as well as to the anon_vmas that
309 struct anon_vma *anon_vma; anon_vma_fork() local
312 /* Don't bother if the parent process has no anon_vma here. */ anon_vma_fork()
313 if (!pvma->anon_vma) anon_vma_fork()
316 /* Drop inherited anon_vma, we'll reuse existing or allocate new. */ anon_vma_fork()
317 vma->anon_vma = NULL; anon_vma_fork()
327 /* An existing anon_vma has been reused, all done then. */ anon_vma_fork()
328 if (vma->anon_vma) anon_vma_fork()
331 /* Then add our own anon_vma. */ anon_vma_fork()
332 anon_vma = anon_vma_alloc(); anon_vma_fork()
333 if (!anon_vma) anon_vma_fork()
340 * The root anon_vma's spinlock is the lock actually used when we anon_vma_fork()
341 * lock any of the anon_vmas in this anon_vma tree. anon_vma_fork()
343 anon_vma->root = pvma->anon_vma->root; anon_vma_fork()
344 anon_vma->parent = pvma->anon_vma; anon_vma_fork()
346 * With refcounts, an anon_vma can stay around longer than the anon_vma_fork()
347 * process it belongs to. The root anon_vma needs to be pinned until anon_vma_fork()
348 * this anon_vma is freed, because the lock lives in the root. anon_vma_fork()
350 get_anon_vma(anon_vma->root); anon_vma_fork()
351 /* Mark this anon_vma as the one where our new (COWed) pages go. */ anon_vma_fork()
352 vma->anon_vma = anon_vma; anon_vma_fork()
353 anon_vma_lock_write(anon_vma); anon_vma_fork()
354 anon_vma_chain_link(vma, avc, anon_vma); anon_vma_fork()
355 anon_vma->parent->degree++; anon_vma_fork()
356 anon_vma_unlock_write(anon_vma); anon_vma_fork()
361 put_anon_vma(anon_vma); anon_vma_fork()
370 struct anon_vma *root = NULL; unlink_anon_vmas()
373 * Unlink each anon_vma chained to the VMA. This list is ordered unlink_anon_vmas()
374 * from newest to oldest, ensuring the root anon_vma gets freed last. unlink_anon_vmas()
377 struct anon_vma *anon_vma = avc->anon_vma; unlink_anon_vmas() local
379 root = lock_anon_vma_root(root, anon_vma); unlink_anon_vmas()
380 anon_vma_interval_tree_remove(avc, &anon_vma->rb_root); unlink_anon_vmas()
386 if (RB_EMPTY_ROOT(&anon_vma->rb_root)) { unlink_anon_vmas()
387 anon_vma->parent->degree--; unlink_anon_vmas()
394 if (vma->anon_vma) unlink_anon_vmas()
395 vma->anon_vma->degree--; unlink_anon_vmas()
401 * needing to write-acquire the anon_vma->root->rwsem. unlink_anon_vmas()
404 struct anon_vma *anon_vma = avc->anon_vma; unlink_anon_vmas() local
406 BUG_ON(anon_vma->degree); unlink_anon_vmas()
407 put_anon_vma(anon_vma); unlink_anon_vmas()
416 struct anon_vma *anon_vma = data; anon_vma_ctor() local
418 init_rwsem(&anon_vma->rwsem); anon_vma_ctor()
419 atomic_set(&anon_vma->refcount, 0); anon_vma_ctor()
420 anon_vma->rb_root = RB_ROOT; anon_vma_ctor()
425 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), anon_vma_init()
431 * Getting a lock on a stable anon_vma from a page off the LRU is tricky!
434 * the best this function can do is return a locked anon_vma that might
437 * The page might have been remapped to a different anon_vma or the anon_vma
440 * In case it was remapped to a different anon_vma, the new anon_vma will be a
441 * child of the old anon_vma, and the anon_vma lifetime rules will therefore
442 * ensure that any anon_vma obtained from the page will still be valid for as
445 * All users of this function must be very careful when walking the anon_vma
449 * Since anon_vma's slab is DESTROY_BY_RCU and we know from page_remove_rmap()
450 * that the anon_vma pointer from page->mapping is valid if there is a
451 * mapcount, we can dereference the anon_vma after observing those.
453 struct anon_vma *page_get_anon_vma(struct page *page) page_get_anon_vma()
455 struct anon_vma *anon_vma = NULL; page_get_anon_vma() local
465 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); page_get_anon_vma()
466 if (!atomic_inc_not_zero(&anon_vma->refcount)) { page_get_anon_vma()
467 anon_vma = NULL; page_get_anon_vma()
472 * If this page is still mapped, then its anon_vma cannot have been page_get_anon_vma()
474 * anon_vma structure being freed and reused (for another anon_vma: page_get_anon_vma()
480 put_anon_vma(anon_vma); page_get_anon_vma()
486 return anon_vma; page_get_anon_vma()
490 * Similar to page_get_anon_vma() except it locks the anon_vma.
496 struct anon_vma *page_lock_anon_vma_read(struct page *page) page_lock_anon_vma_read()
498 struct anon_vma *anon_vma = NULL; page_lock_anon_vma_read() local
499 struct anon_vma *root_anon_vma; page_lock_anon_vma_read()
509 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); page_lock_anon_vma_read()
510 root_anon_vma = READ_ONCE(anon_vma->root); page_lock_anon_vma_read()
513 * If the page is still mapped, then this anon_vma is still page_lock_anon_vma_read()
514 * its anon_vma, and holding the mutex ensures that it will page_lock_anon_vma_read()
519 anon_vma = NULL; page_lock_anon_vma_read()
525 if (!atomic_inc_not_zero(&anon_vma->refcount)) { page_lock_anon_vma_read()
526 anon_vma = NULL; page_lock_anon_vma_read()
532 put_anon_vma(anon_vma); page_lock_anon_vma_read()
536 /* we pinned the anon_vma, its safe to sleep */ page_lock_anon_vma_read()
538 anon_vma_lock_read(anon_vma); page_lock_anon_vma_read()
540 if (atomic_dec_and_test(&anon_vma->refcount)) { page_lock_anon_vma_read()
546 anon_vma_unlock_read(anon_vma); page_lock_anon_vma_read()
547 __put_anon_vma(anon_vma); page_lock_anon_vma_read()
548 anon_vma = NULL; page_lock_anon_vma_read()
551 return anon_vma; page_lock_anon_vma_read()
555 return anon_vma; page_lock_anon_vma_read()
558 void page_unlock_anon_vma_read(struct anon_vma *anon_vma) page_unlock_anon_vma_read() argument
560 anon_vma_unlock_read(anon_vma); page_unlock_anon_vma_read()
592 struct anon_vma *page__anon_vma = page_anon_vma(page); page_address_in_vma()
595 * check, and needs it to match anon_vma when KSM is active. page_address_in_vma()
597 if (!vma->anon_vma || !page__anon_vma || page_address_in_vma()
598 vma->anon_vma->root != page__anon_vma->root) page_address_in_vma()
629 * without holding anon_vma lock for write. So when looking for a mm_find_pmd()
933 * page_move_anon_rmap - move a page to our anon_vma
934 * @page: the page to move to our anon_vma
939 * that page can be moved into the anon_vma that belongs to just that
946 struct anon_vma *anon_vma = vma->anon_vma; page_move_anon_rmap() local
949 VM_BUG_ON_VMA(!anon_vma, vma); page_move_anon_rmap()
952 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; page_move_anon_rmap()
953 page->mapping = (struct address_space *) anon_vma; page_move_anon_rmap()
966 struct anon_vma *anon_vma = vma->anon_vma; __page_set_anon_rmap() local
968 BUG_ON(!anon_vma); __page_set_anon_rmap()
975 * we must use the _oldest_ possible anon_vma for the __page_set_anon_rmap()
979 anon_vma = anon_vma->root; __page_set_anon_rmap()
981 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; __page_set_anon_rmap()
982 page->mapping = (struct address_space *) anon_vma; __page_set_anon_rmap()
1008 BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root); __page_check_anon_rmap()
1020 * the anon_vma case: to serialize mapping,index checking after setting,
1305 * we now hold anon_vma->rwsem or mapping->i_mmap_rwsem. try_to_unmap_one()
1372 * The VMA is moved under the anon_vma lock but not the try_to_unmap()
1420 void __put_anon_vma(struct anon_vma *anon_vma) __put_anon_vma() argument
1422 struct anon_vma *root = anon_vma->root; __put_anon_vma()
1424 anon_vma_free(anon_vma); __put_anon_vma()
1425 if (root != anon_vma && atomic_dec_and_test(&root->refcount)) __put_anon_vma()
1429 static struct anon_vma *rmap_walk_anon_lock(struct page *page, rmap_walk_anon_lock()
1432 struct anon_vma *anon_vma; rmap_walk_anon_lock() local
1441 * take a reference count to prevent the anon_vma disappearing rmap_walk_anon_lock()
1443 anon_vma = page_anon_vma(page); rmap_walk_anon_lock()
1444 if (!anon_vma) rmap_walk_anon_lock()
1447 anon_vma_lock_read(anon_vma); rmap_walk_anon_lock()
1448 return anon_vma; rmap_walk_anon_lock()
1458 * contained in the anon_vma struct it points to.
1467 struct anon_vma *anon_vma; rmap_walk_anon() local
1472 anon_vma = rmap_walk_anon_lock(page, rwc); rmap_walk_anon()
1473 if (!anon_vma) rmap_walk_anon()
1477 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { rmap_walk_anon()
1490 anon_vma_unlock_read(anon_vma); rmap_walk_anon()
1564 struct anon_vma *anon_vma = vma->anon_vma; __hugepage_set_anon_rmap() local
1566 BUG_ON(!anon_vma); __hugepage_set_anon_rmap()
1571 anon_vma = anon_vma->root; __hugepage_set_anon_rmap()
1573 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; __hugepage_set_anon_rmap()
1574 page->mapping = (struct address_space *) anon_vma; __hugepage_set_anon_rmap()
1581 struct anon_vma *anon_vma = vma->anon_vma; hugepage_add_anon_rmap() local
1585 BUG_ON(!anon_vma); hugepage_add_anon_rmap()
127 anon_vma_chain_link(struct vm_area_struct *vma, struct anon_vma_chain *avc, struct anon_vma *anon_vma) anon_vma_chain_link() argument
H A Dmmap.c443 struct anon_vma *anon_vma = vma->anon_vma; validate_mm() local
446 if (anon_vma) { validate_mm()
447 anon_vma_lock_read(anon_vma); validate_mm()
450 anon_vma_unlock_read(anon_vma); validate_mm()
522 * vma has some anon_vma assigned, and is already inserted on that
523 * anon_vma's interval trees.
526 * vma must be removed from the anon_vma's interval trees using
533 * the root anon_vma's mutex.
541 anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root); anon_vma_interval_tree_pre_update_vma()
550 anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root); anon_vma_interval_tree_post_update_vma()
734 struct anon_vma *anon_vma = NULL; vma_adjust() local
773 * make sure the expanding vma has anon_vma set if the vma_adjust()
776 if (exporter && exporter->anon_vma && !importer->anon_vma) { vma_adjust()
779 importer->anon_vma = exporter->anon_vma; vma_adjust()
808 anon_vma = vma->anon_vma; vma_adjust()
809 if (!anon_vma && adjust_next) vma_adjust()
810 anon_vma = next->anon_vma; vma_adjust()
811 if (anon_vma) { vma_adjust()
812 VM_BUG_ON_VMA(adjust_next && next->anon_vma && vma_adjust()
813 anon_vma != next->anon_vma, next); vma_adjust()
814 anon_vma_lock_write(anon_vma); vma_adjust()
874 if (anon_vma) { vma_adjust()
878 anon_vma_unlock_write(anon_vma); vma_adjust()
895 if (next->anon_vma) vma_adjust()
945 static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1, is_mergeable_anon_vma()
946 struct anon_vma *anon_vma2, is_mergeable_anon_vma()
951 * parents. This can improve scalability caused by anon_vma lock. is_mergeable_anon_vma()
960 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
964 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
972 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff) can_vma_merge_before()
975 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { can_vma_merge_before()
983 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
987 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
991 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff) can_vma_merge_after()
994 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { can_vma_merge_after()
1035 struct anon_vma *anon_vma, struct file *file, vma_merge()
1063 anon_vma, file, pgoff)) { vma_merge()
1070 anon_vma, file, pgoff+pglen) && vma_merge()
1071 is_mergeable_anon_vma(prev->anon_vma, vma_merge()
1072 next->anon_vma, NULL)) { vma_merge()
1091 anon_vma, file, pgoff+pglen)) { vma_merge()
1109 * at sharing an anon_vma.
1114 * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
1118 * really matter for the anon_vma sharing case.
1130 * Do some basic sanity checking to see if we can re-use the anon_vma
1133 * to share the anon_vma.
1136 * the anon_vma of 'old' is concurrently in the process of being set up
1143 * matters for the 'stable anon_vma' case (ie the thing we want to avoid
1144 * is to return an anon_vma that is "complex" due to having gone through
1151 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b) reusable_anon_vma()
1154 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma); reusable_anon_vma() local
1156 if (anon_vma && list_is_singular(&old->anon_vma_chain)) reusable_anon_vma()
1157 return anon_vma; reusable_anon_vma()
1164 * neighbouring vmas for a suitable anon_vma, before it goes off
1165 * to allocate a new anon_vma. It checks because a repetitive
1170 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) find_mergeable_anon_vma()
1172 struct anon_vma *anon_vma; find_mergeable_anon_vma() local
1179 anon_vma = reusable_anon_vma(near, vma, near); find_mergeable_anon_vma()
1180 if (anon_vma) find_mergeable_anon_vma()
1181 return anon_vma; find_mergeable_anon_vma()
1187 anon_vma = reusable_anon_vma(near, near, vma); find_mergeable_anon_vma()
1188 if (anon_vma) find_mergeable_anon_vma()
1189 return anon_vma; find_mergeable_anon_vma()
1195 * or lead to too many vmas hanging off the same anon_vma. find_mergeable_anon_vma()
1372 * Set pgoff according to addr for anon_vma. do_mmap_pgoff()
2159 /* We must make sure the anon_vma is allocated. */ expand_upwards()
2166 * anon_vma lock to serialize against concurrent expand_stacks. expand_upwards()
2168 anon_vma_lock_write(vma->anon_vma); expand_upwards()
2206 anon_vma_unlock_write(vma->anon_vma); expand_upwards()
2226 /* We must make sure the anon_vma is allocated. */ expand_downwards()
2233 * anon_vma lock to serialize against concurrent expand_stacks. expand_downwards()
2235 anon_vma_lock_write(vma->anon_vma); expand_downwards()
2271 anon_vma_unlock_write(vma->anon_vma); expand_downwards()
2883 * until its first write fault, when page's anon_vma and index insert_vm_struct()
2894 BUG_ON(vma->anon_vma); insert_vm_struct()
2927 if (unlikely(!vma->vm_file && !vma->anon_vma)) { copy_vma()
2935 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma)); copy_vma()
3132 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma) vm_lock_anon_vma() argument
3134 if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_node)) { vm_lock_anon_vma()
3139 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_sem); vm_lock_anon_vma()
3142 * anon_vma->root->rwsem. If some other vma in this mm shares vm_lock_anon_vma()
3143 * the same anon_vma we won't take it again. vm_lock_anon_vma()
3147 * anon_vma->root->rwsem. vm_lock_anon_vma()
3150 &anon_vma->root->rb_root.rb_node)) vm_lock_anon_vma()
3190 * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
3192 * vma in this mm is backed by the same anon_vma or address_space.
3195 * taking i_mmap_rwsem or anon_vma->rwsem outside the mmap_sem never
3223 if (vma->anon_vma) mm_take_all_locks()
3225 vm_lock_anon_vma(mm, avc->anon_vma); mm_take_all_locks()
3235 static void vm_unlock_anon_vma(struct anon_vma *anon_vma) vm_unlock_anon_vma() argument
3237 if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_node)) { vm_unlock_anon_vma()
3243 * the vma so the users using the anon_vma->rb_root will vm_unlock_anon_vma()
3248 * anon_vma->root->rwsem. vm_unlock_anon_vma()
3251 &anon_vma->root->rb_root.rb_node)) vm_unlock_anon_vma()
3253 anon_vma_unlock_write(anon_vma); vm_unlock_anon_vma()
3284 if (vma->anon_vma) mm_drop_all_locks()
3286 vm_unlock_anon_vma(avc->anon_vma); mm_drop_all_locks()
971 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff) can_vma_merge_before() argument
990 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff) can_vma_merge_after() argument
1032 vma_merge(struct mm_struct *mm, struct vm_area_struct *prev, unsigned long addr, unsigned long end, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t pgoff, struct mempolicy *policy) vma_merge() argument
H A Dmremap.c94 struct anon_vma *anon_vma = NULL; move_ptes() local
100 * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma move_ptes()
122 if (vma->anon_vma) { move_ptes()
123 anon_vma = vma->anon_vma; move_ptes()
124 anon_vma_lock_write(anon_vma); move_ptes()
154 if (anon_vma) move_ptes()
155 anon_vma_unlock_write(anon_vma); move_ptes()
196 VM_BUG_ON_VMA(vma->vm_file || !vma->anon_vma, move_page_tables()
200 anon_vma_lock_write(vma->anon_vma); move_page_tables()
205 anon_vma_unlock_write(vma->anon_vma); move_page_tables()
H A Dhuge_memory.c896 wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */ copy_huge_pmd()
1095 VM_BUG_ON_VMA(!vma->anon_vma, vma); do_huge_pmd_wp_page()
1263 struct anon_vma *anon_vma = NULL; do_huge_pmd_numa_page() local
1327 * Page is misplaced. Page lock serialises migrations. Acquire anon_vma do_huge_pmd_numa_page()
1332 anon_vma = page_lock_anon_vma_read(page); do_huge_pmd_numa_page()
1344 if (unlikely(!anon_vma)) { do_huge_pmd_numa_page()
1378 if (anon_vma) do_huge_pmd_numa_page()
1379 page_unlock_anon_vma_read(anon_vma); do_huge_pmd_numa_page()
1536 wait_split_huge_page(vma->anon_vma, pmd); __pmd_trans_huge_lock()
1622 * and it won't wait on the anon_vma->root->rwsem to __split_huge_page_splitting()
1826 /* must be called with anon_vma->root->rwsem held */ __split_huge_page()
1828 struct anon_vma *anon_vma, __split_huge_page()
1839 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { __split_huge_page()
1847 * anon_vma list. This guarantes that if copy_huge_pmd() runs __split_huge_page()
1864 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { __split_huge_page()
1886 struct anon_vma *anon_vma; split_huge_page_to_list() local
1894 * the anon_vma disappearing so we first we take a reference to it split_huge_page_to_list()
1895 * and then lock the anon_vma for write. This is similar to split_huge_page_to_list()
1899 anon_vma = page_get_anon_vma(page); split_huge_page_to_list()
1900 if (!anon_vma) split_huge_page_to_list()
1902 anon_vma_lock_write(anon_vma); split_huge_page_to_list()
1909 __split_huge_page(page, anon_vma, list); split_huge_page_to_list()
1914 anon_vma_unlock_write(anon_vma); split_huge_page_to_list()
1915 put_anon_vma(anon_vma); split_huge_page_to_list()
2058 if (!vma->anon_vma) khugepaged_enter_vma_merge()
2419 if (!vma->anon_vma || vma->vm_ops) hugepage_vma_check()
2462 * handled by the anon_vma lock + PG_lock. collapse_huge_page()
2481 anon_vma_lock_write(vma->anon_vma); collapse_huge_page()
2515 anon_vma_unlock_write(vma->anon_vma); collapse_huge_page()
2520 * All pages are isolated and locked so anon_vma rmap collapse_huge_page()
2523 anon_vma_unlock_write(vma->anon_vma); collapse_huge_page()
2923 * huge page before our split_huge_page() got the anon_vma lock. __split_huge_page_pmd()
1827 __split_huge_page(struct page *page, struct anon_vma *anon_vma, struct list_head *list) __split_huge_page() argument
H A Ddebug.c158 "prot %lx anon_vma %p vm_ops %p\n" dump_vma()
163 vma->anon_vma, vma->vm_ops, vma->vm_pgoff, dump_vma()
H A Dksm.c152 * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree
164 struct anon_vma *anon_vma; /* when stable */ member in union:rmap_item::__anon14056
420 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) find_mergeable_vma()
433 * to undo, we also need to drop a reference to the anon_vma. break_cow()
435 put_anon_vma(rmap_item->anon_vma); break_cow()
504 put_anon_vma(rmap_item->anon_vma); remove_node_from_stable_tree()
633 put_anon_vma(rmap_item->anon_vma); remove_rmap_item_from_tree()
786 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) unmerge_and_remove_all_rmap_items()
1051 * PageAnon+anon_vma to PageKsm+NULL stable_node: try_to_merge_one_page()
1100 /* Unstable nid is in union with stable anon_vma: remove first */ try_to_merge_with_ksm_page()
1103 /* Must get reference to anon_vma while still holding mmap_sem */ try_to_merge_with_ksm_page()
1104 rmap_item->anon_vma = vma->anon_vma; try_to_merge_with_ksm_page()
1105 get_anon_vma(vma->anon_vma); try_to_merge_with_ksm_page()
1611 if (!vma->anon_vma) scan_get_next_rmap_item()
1772 if (vma->anon_vma) { ksm_madvise()
1865 struct anon_vma *anon_vma = page_anon_vma(page); ksm_might_need_to_copy() local
1872 } else if (!anon_vma) { ksm_might_need_to_copy()
1874 } else if (anon_vma->root == vma->anon_vma->root && ksm_might_need_to_copy()
1913 struct anon_vma *anon_vma = rmap_item->anon_vma; rmap_walk_ksm() local
1917 anon_vma_lock_read(anon_vma); rmap_walk_ksm()
1918 anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, rmap_walk_ksm()
1939 anon_vma_unlock_read(anon_vma); rmap_walk_ksm()
1943 anon_vma_unlock_read(anon_vma); rmap_walk_ksm()
1947 anon_vma_unlock_read(anon_vma); rmap_walk_ksm()
H A Dmigrate.c128 * can race mremap's move_ptes(), which skips anon_vma lock. remove_migration_pte()
771 struct anon_vma *anon_vma = NULL; __unmap_and_move() local
813 * we cannot notice that anon_vma is freed while we migrates a page. __unmap_and_move()
814 * This get_anon_vma() delays freeing anon_vma pointer until the end __unmap_and_move()
822 * getting a hold on an anon_vma from outside one of its mms. __unmap_and_move()
824 anon_vma = page_get_anon_vma(page); __unmap_and_move()
825 if (anon_vma) { __unmap_and_move()
831 * We cannot be sure that the anon_vma of an unmapped __unmap_and_move()
835 * data have been freed, then the anon_vma could __unmap_and_move()
894 /* Drop an anon_vma reference if we took one */ __unmap_and_move()
895 if (anon_vma) __unmap_and_move()
896 put_anon_vma(anon_vma); __unmap_and_move()
1010 struct anon_vma *anon_vma = NULL; unmap_and_move_huge_page() local
1037 anon_vma = page_get_anon_vma(hpage); unmap_and_move_huge_page()
1051 if (anon_vma) unmap_and_move_huge_page()
1052 put_anon_vma(anon_vma); unmap_and_move_huge_page()
H A Dutil.c338 /* Neutral page->mapping pointer to address_space or anon_vma or other */ page_rmapping()
345 struct anon_vma *page_anon_vma(struct page *page) page_anon_vma()
H A Dmprotect.c293 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma)); mprotect_fixup()
H A Dmadvise.c104 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, madvise_behavior()
H A Dmlock.c512 *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma, mlock_fixup()
H A Dmemory.c599 wait_split_huge_page(vma->anon_vma, pmd); __pte_alloc()
688 "addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n", print_bad_pte()
689 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); print_bad_pte()
1024 !vma->anon_vma) copy_page_range()
2330 * The page is all ours. Move it to our anon_vma so __releases()
H A Dswapfile.c1270 if (vma->anon_vma && (ret = unuse_vma(vma, entry, page))) unuse_mm()
1502 * in an anon_vma, once the anon_vma has been determined, try_to_unuse()
H A Dfilemap.c88 * ->anon_vma.lock (vma_adjust)
90 * ->anon_vma.lock
H A Dgup.c198 wait_split_huge_page(vma->anon_vma, pmd); follow_page_mask()
H A Dmemory-failure.c419 struct anon_vma *av; collect_procs_anon()
H A Dmempolicy.c725 vma->anon_vma, vma->vm_file, pgoff, mbind_range()
H A Dhugetlb.c3003 * anon_vma prepared. hugetlb_cow()
/linux-4.1.27/arch/x86/mm/
H A Dmpx.c57 /* Set pgoff according to addr for anon_vma */ mpx_mmap()
/linux-4.1.27/fs/
H A Dbinfmt_elf_fdpic.c1226 if (!vma->anon_vma) { maydump()
H A Dbinfmt_elf.c1261 if (vma->anon_vma && FILTER(ANON_PRIVATE)) vma_dump_size()
/linux-4.1.27/security/selinux/
H A Dhooks.c3368 } else if (vma->vm_file && vma->anon_vma) { selinux_file_mprotect()

Completed in 1811 milliseconds