root/include/linux/mempolicy.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. mpol_put
  2. mpol_needs_cond_ref
  3. mpol_cond_put
  4. mpol_dup
  5. mpol_get
  6. mpol_equal
  7. check_highest_zone
  8. vma_migratable
  9. mpol_equal
  10. mpol_put
  11. mpol_cond_put
  12. mpol_get
  13. mpol_shared_policy_init
  14. mpol_free_shared_policy
  15. mpol_shared_policy_lookup
  16. vma_dup_policy
  17. numa_policy_init
  18. numa_default_policy
  19. mpol_rebind_task
  20. mpol_rebind_mm
  21. huge_node
  22. init_nodemask_of_mempolicy
  23. do_migrate_pages
  24. check_highest_zone
  25. mpol_parse_str
  26. mpol_misplaced
  27. mpol_put_task_policy

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 /*
   3  * NUMA memory policies for Linux.
   4  * Copyright 2003,2004 Andi Kleen SuSE Labs
   5  */
   6 #ifndef _LINUX_MEMPOLICY_H
   7 #define _LINUX_MEMPOLICY_H 1
   8 
   9 
  10 #include <linux/mmzone.h>
  11 #include <linux/dax.h>
  12 #include <linux/slab.h>
  13 #include <linux/rbtree.h>
  14 #include <linux/spinlock.h>
  15 #include <linux/nodemask.h>
  16 #include <linux/pagemap.h>
  17 #include <uapi/linux/mempolicy.h>
  18 
  19 struct mm_struct;
  20 
  21 #ifdef CONFIG_NUMA
  22 
  23 /*
  24  * Describe a memory policy.
  25  *
  26  * A mempolicy can be either associated with a process or with a VMA.
  27  * For VMA related allocations the VMA policy is preferred, otherwise
  28  * the process policy is used. Interrupts ignore the memory policy
  29  * of the current process.
  30  *
  31  * Locking policy for interlave:
  32  * In process context there is no locking because only the process accesses
  33  * its own state. All vma manipulation is somewhat protected by a down_read on
  34  * mmap_sem.
  35  *
  36  * Freeing policy:
  37  * Mempolicy objects are reference counted.  A mempolicy will be freed when
  38  * mpol_put() decrements the reference count to zero.
  39  *
  40  * Duplicating policy objects:
  41  * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
  42  * to the new storage.  The reference count of the new object is initialized
  43  * to 1, representing the caller of mpol_dup().
  44  */
  45 struct mempolicy {
  46         atomic_t refcnt;
  47         unsigned short mode;    /* See MPOL_* above */
  48         unsigned short flags;   /* See set_mempolicy() MPOL_F_* above */
  49         union {
  50                 short            preferred_node; /* preferred */
  51                 nodemask_t       nodes;         /* interleave/bind */
  52                 /* undefined for default */
  53         } v;
  54         union {
  55                 nodemask_t cpuset_mems_allowed; /* relative to these nodes */
  56                 nodemask_t user_nodemask;       /* nodemask passed by user */
  57         } w;
  58 };
  59 
  60 /*
  61  * Support for managing mempolicy data objects (clone, copy, destroy)
  62  * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
  63  */
  64 
  65 extern void __mpol_put(struct mempolicy *pol);
  66 static inline void mpol_put(struct mempolicy *pol)
  67 {
  68         if (pol)
  69                 __mpol_put(pol);
  70 }
  71 
  72 /*
  73  * Does mempolicy pol need explicit unref after use?
  74  * Currently only needed for shared policies.
  75  */
  76 static inline int mpol_needs_cond_ref(struct mempolicy *pol)
  77 {
  78         return (pol && (pol->flags & MPOL_F_SHARED));
  79 }
  80 
  81 static inline void mpol_cond_put(struct mempolicy *pol)
  82 {
  83         if (mpol_needs_cond_ref(pol))
  84                 __mpol_put(pol);
  85 }
  86 
  87 extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
  88 static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
  89 {
  90         if (pol)
  91                 pol = __mpol_dup(pol);
  92         return pol;
  93 }
  94 
  95 #define vma_policy(vma) ((vma)->vm_policy)
  96 
  97 static inline void mpol_get(struct mempolicy *pol)
  98 {
  99         if (pol)
 100                 atomic_inc(&pol->refcnt);
 101 }
 102 
 103 extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b);
 104 static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
 105 {
 106         if (a == b)
 107                 return true;
 108         return __mpol_equal(a, b);
 109 }
 110 
 111 /*
 112  * Tree of shared policies for a shared memory region.
 113  * Maintain the policies in a pseudo mm that contains vmas. The vmas
 114  * carry the policy. As a special twist the pseudo mm is indexed in pages, not
 115  * bytes, so that we can work with shared memory segments bigger than
 116  * unsigned long.
 117  */
 118 
 119 struct sp_node {
 120         struct rb_node nd;
 121         unsigned long start, end;
 122         struct mempolicy *policy;
 123 };
 124 
 125 struct shared_policy {
 126         struct rb_root root;
 127         rwlock_t lock;
 128 };
 129 
 130 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst);
 131 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
 132 int mpol_set_shared_policy(struct shared_policy *info,
 133                                 struct vm_area_struct *vma,
 134                                 struct mempolicy *new);
 135 void mpol_free_shared_policy(struct shared_policy *p);
 136 struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
 137                                             unsigned long idx);
 138 
 139 struct mempolicy *get_task_policy(struct task_struct *p);
 140 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
 141                 unsigned long addr);
 142 bool vma_policy_mof(struct vm_area_struct *vma);
 143 
 144 extern void numa_default_policy(void);
 145 extern void numa_policy_init(void);
 146 extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new);
 147 extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
 148 
 149 extern int huge_node(struct vm_area_struct *vma,
 150                                 unsigned long addr, gfp_t gfp_flags,
 151                                 struct mempolicy **mpol, nodemask_t **nodemask);
 152 extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
 153 extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
 154                                 const nodemask_t *mask);
 155 extern unsigned int mempolicy_slab_node(void);
 156 
 157 extern enum zone_type policy_zone;
 158 
 159 static inline void check_highest_zone(enum zone_type k)
 160 {
 161         if (k > policy_zone && k != ZONE_MOVABLE)
 162                 policy_zone = k;
 163 }
 164 
 165 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
 166                      const nodemask_t *to, int flags);
 167 
 168 
 169 #ifdef CONFIG_TMPFS
 170 extern int mpol_parse_str(char *str, struct mempolicy **mpol);
 171 #endif
 172 
 173 extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
 174 
 175 /* Check if a vma is migratable */
 176 static inline bool vma_migratable(struct vm_area_struct *vma)
 177 {
 178         if (vma->vm_flags & (VM_IO | VM_PFNMAP))
 179                 return false;
 180 
 181         /*
 182          * DAX device mappings require predictable access latency, so avoid
 183          * incurring periodic faults.
 184          */
 185         if (vma_is_dax(vma))
 186                 return false;
 187 
 188 #ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
 189         if (vma->vm_flags & VM_HUGETLB)
 190                 return false;
 191 #endif
 192 
 193         /*
 194          * Migration allocates pages in the highest zone. If we cannot
 195          * do so then migration (at least from node to node) is not
 196          * possible.
 197          */
 198         if (vma->vm_file &&
 199                 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
 200                                                                 < policy_zone)
 201                         return false;
 202         return true;
 203 }
 204 
 205 extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
 206 extern void mpol_put_task_policy(struct task_struct *);
 207 
 208 #else
 209 
 210 struct mempolicy {};
 211 
 212 static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
 213 {
 214         return true;
 215 }
 216 
 217 static inline void mpol_put(struct mempolicy *p)
 218 {
 219 }
 220 
 221 static inline void mpol_cond_put(struct mempolicy *pol)
 222 {
 223 }
 224 
 225 static inline void mpol_get(struct mempolicy *pol)
 226 {
 227 }
 228 
 229 struct shared_policy {};
 230 
 231 static inline void mpol_shared_policy_init(struct shared_policy *sp,
 232                                                 struct mempolicy *mpol)
 233 {
 234 }
 235 
 236 static inline void mpol_free_shared_policy(struct shared_policy *p)
 237 {
 238 }
 239 
 240 static inline struct mempolicy *
 241 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
 242 {
 243         return NULL;
 244 }
 245 
 246 #define vma_policy(vma) NULL
 247 
 248 static inline int
 249 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
 250 {
 251         return 0;
 252 }
 253 
 254 static inline void numa_policy_init(void)
 255 {
 256 }
 257 
 258 static inline void numa_default_policy(void)
 259 {
 260 }
 261 
 262 static inline void mpol_rebind_task(struct task_struct *tsk,
 263                                 const nodemask_t *new)
 264 {
 265 }
 266 
 267 static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
 268 {
 269 }
 270 
 271 static inline int huge_node(struct vm_area_struct *vma,
 272                                 unsigned long addr, gfp_t gfp_flags,
 273                                 struct mempolicy **mpol, nodemask_t **nodemask)
 274 {
 275         *mpol = NULL;
 276         *nodemask = NULL;
 277         return 0;
 278 }
 279 
 280 static inline bool init_nodemask_of_mempolicy(nodemask_t *m)
 281 {
 282         return false;
 283 }
 284 
 285 static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
 286                                    const nodemask_t *to, int flags)
 287 {
 288         return 0;
 289 }
 290 
 291 static inline void check_highest_zone(int k)
 292 {
 293 }
 294 
 295 #ifdef CONFIG_TMPFS
 296 static inline int mpol_parse_str(char *str, struct mempolicy **mpol)
 297 {
 298         return 1;       /* error */
 299 }
 300 #endif
 301 
 302 static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
 303                                  unsigned long address)
 304 {
 305         return -1; /* no node preference */
 306 }
 307 
 308 static inline void mpol_put_task_policy(struct task_struct *task)
 309 {
 310 }
 311 #endif /* CONFIG_NUMA */
 312 #endif

/* [<][>][^][v][top][bottom][index][help] */