This source file includes following definitions.
- get_anon_vma
- put_anon_vma
- anon_vma_lock_write
- anon_vma_unlock_write
- anon_vma_lock_read
- anon_vma_unlock_read
- anon_vma_prepare
- anon_vma_merge
- page_dup_rmap
- page_vma_mapped_walk_done
- page_referenced
- page_mkclean
   1 
   2 #ifndef _LINUX_RMAP_H
   3 #define _LINUX_RMAP_H
   4 
   5 
   6 
   7 
   8 #include <linux/list.h>
   9 #include <linux/slab.h>
  10 #include <linux/mm.h>
  11 #include <linux/rwsem.h>
  12 #include <linux/memcontrol.h>
  13 #include <linux/highmem.h>
  14 
  15 
  16 
  17 
  18 
  19 
  20 
  21 
  22 
  23 
  24 
  25 
  26 
  27 
  28 
  29 struct anon_vma {
  30         struct anon_vma *root;          
  31         struct rw_semaphore rwsem;      
  32         
  33 
  34 
  35 
  36 
  37 
  38 
  39         atomic_t refcount;
  40 
  41         
  42 
  43 
  44 
  45 
  46 
  47         unsigned degree;
  48 
  49         struct anon_vma *parent;        
  50 
  51         
  52 
  53 
  54 
  55 
  56 
  57 
  58 
  59 
  60         
  61         struct rb_root_cached rb_root;
  62 };
  63 
  64 
  65 
  66 
  67 
  68 
  69 
  70 
  71 
  72 
  73 
  74 
  75 
  76 
  77 struct anon_vma_chain {
  78         struct vm_area_struct *vma;
  79         struct anon_vma *anon_vma;
  80         struct list_head same_vma;   
  81         struct rb_node rb;                      
  82         unsigned long rb_subtree_last;
  83 #ifdef CONFIG_DEBUG_VM_RB
  84         unsigned long cached_vma_start, cached_vma_last;
  85 #endif
  86 };
  87 
  88 enum ttu_flags {
  89         TTU_MIGRATION           = 0x1,  
  90         TTU_MUNLOCK             = 0x2,  
  91 
  92         TTU_SPLIT_HUGE_PMD      = 0x4,  
  93         TTU_IGNORE_MLOCK        = 0x8,  
  94         TTU_IGNORE_ACCESS       = 0x10, 
  95         TTU_IGNORE_HWPOISON     = 0x20, 
  96         TTU_BATCH_FLUSH         = 0x40, 
  97 
  98 
  99         TTU_RMAP_LOCKED         = 0x80, 
 100 
 101         TTU_SPLIT_FREEZE        = 0x100,                
 102 };
 103 
 104 #ifdef CONFIG_MMU
 105 static inline void get_anon_vma(struct anon_vma *anon_vma)
 106 {
 107         atomic_inc(&anon_vma->refcount);
 108 }
 109 
 110 void __put_anon_vma(struct anon_vma *anon_vma);
 111 
 112 static inline void put_anon_vma(struct anon_vma *anon_vma)
 113 {
 114         if (atomic_dec_and_test(&anon_vma->refcount))
 115                 __put_anon_vma(anon_vma);
 116 }
 117 
 118 static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
 119 {
 120         down_write(&anon_vma->root->rwsem);
 121 }
 122 
 123 static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
 124 {
 125         up_write(&anon_vma->root->rwsem);
 126 }
 127 
 128 static inline void anon_vma_lock_read(struct anon_vma *anon_vma)
 129 {
 130         down_read(&anon_vma->root->rwsem);
 131 }
 132 
 133 static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
 134 {
 135         up_read(&anon_vma->root->rwsem);
 136 }
 137 
 138 
 139 
 140 
 141 
 142 void anon_vma_init(void);       
 143 int  __anon_vma_prepare(struct vm_area_struct *);
 144 void unlink_anon_vmas(struct vm_area_struct *);
 145 int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
 146 int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
 147 
 148 static inline int anon_vma_prepare(struct vm_area_struct *vma)
 149 {
 150         if (likely(vma->anon_vma))
 151                 return 0;
 152 
 153         return __anon_vma_prepare(vma);
 154 }
 155 
 156 static inline void anon_vma_merge(struct vm_area_struct *vma,
 157                                   struct vm_area_struct *next)
 158 {
 159         VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma);
 160         unlink_anon_vmas(next);
 161 }
 162 
 163 struct anon_vma *page_get_anon_vma(struct page *page);
 164 
 165 
 166 #define RMAP_EXCLUSIVE 0x01
 167 #define RMAP_COMPOUND 0x02
 168 
 169 
 170 
 171 
 172 void page_move_anon_rmap(struct page *, struct vm_area_struct *);
 173 void page_add_anon_rmap(struct page *, struct vm_area_struct *,
 174                 unsigned long, bool);
 175 void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
 176                            unsigned long, int);
 177 void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
 178                 unsigned long, bool);
 179 void page_add_file_rmap(struct page *, bool);
 180 void page_remove_rmap(struct page *, bool);
 181 
 182 void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
 183                             unsigned long);
 184 void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
 185                                 unsigned long);
 186 
 187 static inline void page_dup_rmap(struct page *page, bool compound)
 188 {
 189         atomic_inc(compound ? compound_mapcount_ptr(page) : &page->_mapcount);
 190 }
 191 
 192 
 193 
 194 
 195 int page_referenced(struct page *, int is_locked,
 196                         struct mem_cgroup *memcg, unsigned long *vm_flags);
 197 
 198 bool try_to_unmap(struct page *, enum ttu_flags flags);
 199 
 200 
 201 #define PVMW_SYNC               (1 << 0)
 202 
 203 #define PVMW_MIGRATION          (1 << 1)
 204 
 205 struct page_vma_mapped_walk {
 206         struct page *page;
 207         struct vm_area_struct *vma;
 208         unsigned long address;
 209         pmd_t *pmd;
 210         pte_t *pte;
 211         spinlock_t *ptl;
 212         unsigned int flags;
 213 };
 214 
 215 static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw)
 216 {
 217         if (pvmw->pte)
 218                 pte_unmap(pvmw->pte);
 219         if (pvmw->ptl)
 220                 spin_unlock(pvmw->ptl);
 221 }
 222 
 223 bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw);
 224 
 225 
 226 
 227 
 228 unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
 229 
 230 
 231 
 232 
 233 
 234 
 235 
 236 int page_mkclean(struct page *);
 237 
 238 
 239 
 240 
 241 
 242 void try_to_munlock(struct page *);
 243 
 244 void remove_migration_ptes(struct page *old, struct page *new, bool locked);
 245 
 246 
 247 
 248 
 249 struct anon_vma *page_lock_anon_vma_read(struct page *page);
 250 void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
 251 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
 252 
 253 
 254 
 255 
 256 
 257 
 258 
 259 
 260 
 261 
 262 struct rmap_walk_control {
 263         void *arg;
 264         
 265 
 266 
 267 
 268         bool (*rmap_one)(struct page *page, struct vm_area_struct *vma,
 269                                         unsigned long addr, void *arg);
 270         int (*done)(struct page *page);
 271         struct anon_vma *(*anon_lock)(struct page *page);
 272         bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
 273 };
 274 
 275 void rmap_walk(struct page *page, struct rmap_walk_control *rwc);
 276 void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
 277 
 278 #else   
 279 
 280 #define anon_vma_init()         do {} while (0)
 281 #define anon_vma_prepare(vma)   (0)
 282 #define anon_vma_link(vma)      do {} while (0)
 283 
 284 static inline int page_referenced(struct page *page, int is_locked,
 285                                   struct mem_cgroup *memcg,
 286                                   unsigned long *vm_flags)
 287 {
 288         *vm_flags = 0;
 289         return 0;
 290 }
 291 
 292 #define try_to_unmap(page, refs) false
 293 
 294 static inline int page_mkclean(struct page *page)
 295 {
 296         return 0;
 297 }
 298 
 299 
 300 #endif  
 301 
 302 #endif