root/include/linux/cpuset.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. cpusets_enabled
  2. cpuset_inc
  3. cpuset_dec
  4. cpuset_node_allowed
  5. __cpuset_zone_allowed
  6. cpuset_zone_allowed
  7. cpuset_do_page_mem_spread
  8. cpuset_do_slab_mem_spread
  9. read_mems_allowed_begin
  10. read_mems_allowed_retry
  11. set_mems_allowed
  12. cpusets_enabled
  13. cpuset_init
  14. cpuset_init_smp
  15. cpuset_force_rebuild
  16. cpuset_update_active_cpus
  17. cpuset_wait_for_hotplug
  18. cpuset_read_lock
  19. cpuset_read_unlock
  20. cpuset_cpus_allowed
  21. cpuset_cpus_allowed_fallback
  22. cpuset_mems_allowed
  23. cpuset_init_current_mems_allowed
  24. cpuset_nodemask_valid_mems_allowed
  25. cpuset_node_allowed
  26. __cpuset_zone_allowed
  27. cpuset_zone_allowed
  28. cpuset_mems_allowed_intersects
  29. cpuset_memory_pressure_bump
  30. cpuset_task_status_allowed
  31. cpuset_mem_spread_node
  32. cpuset_slab_spread_node
  33. cpuset_do_page_mem_spread
  34. cpuset_do_slab_mem_spread
  35. current_cpuset_is_being_rebound
  36. rebuild_sched_domains
  37. cpuset_print_current_mems_allowed
  38. set_mems_allowed
  39. read_mems_allowed_begin
  40. read_mems_allowed_retry

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef _LINUX_CPUSET_H
   3 #define _LINUX_CPUSET_H
   4 /*
   5  *  cpuset interface
   6  *
   7  *  Copyright (C) 2003 BULL SA
   8  *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
   9  *
  10  */
  11 
  12 #include <linux/sched.h>
  13 #include <linux/sched/topology.h>
  14 #include <linux/sched/task.h>
  15 #include <linux/cpumask.h>
  16 #include <linux/nodemask.h>
  17 #include <linux/mm.h>
  18 #include <linux/jump_label.h>
  19 
  20 #ifdef CONFIG_CPUSETS
  21 
  22 /*
  23  * Static branch rewrites can happen in an arbitrary order for a given
  24  * key. In code paths where we need to loop with read_mems_allowed_begin() and
  25  * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
  26  * to ensure that begin() always gets rewritten before retry() in the
  27  * disabled -> enabled transition. If not, then if local irqs are disabled
  28  * around the loop, we can deadlock since retry() would always be
  29  * comparing the latest value of the mems_allowed seqcount against 0 as
  30  * begin() still would see cpusets_enabled() as false. The enabled -> disabled
  31  * transition should happen in reverse order for the same reasons (want to stop
  32  * looking at real value of mems_allowed.sequence in retry() first).
  33  */
  34 extern struct static_key_false cpusets_pre_enable_key;
  35 extern struct static_key_false cpusets_enabled_key;
  36 static inline bool cpusets_enabled(void)
  37 {
  38         return static_branch_unlikely(&cpusets_enabled_key);
  39 }
  40 
  41 static inline void cpuset_inc(void)
  42 {
  43         static_branch_inc_cpuslocked(&cpusets_pre_enable_key);
  44         static_branch_inc_cpuslocked(&cpusets_enabled_key);
  45 }
  46 
  47 static inline void cpuset_dec(void)
  48 {
  49         static_branch_dec_cpuslocked(&cpusets_enabled_key);
  50         static_branch_dec_cpuslocked(&cpusets_pre_enable_key);
  51 }
  52 
  53 extern int cpuset_init(void);
  54 extern void cpuset_init_smp(void);
  55 extern void cpuset_force_rebuild(void);
  56 extern void cpuset_update_active_cpus(void);
  57 extern void cpuset_wait_for_hotplug(void);
  58 extern void cpuset_read_lock(void);
  59 extern void cpuset_read_unlock(void);
  60 extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
  61 extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
  62 extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
  63 #define cpuset_current_mems_allowed (current->mems_allowed)
  64 void cpuset_init_current_mems_allowed(void);
  65 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
  66 
  67 extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask);
  68 
  69 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
  70 {
  71         if (cpusets_enabled())
  72                 return __cpuset_node_allowed(node, gfp_mask);
  73         return true;
  74 }
  75 
  76 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
  77 {
  78         return __cpuset_node_allowed(zone_to_nid(z), gfp_mask);
  79 }
  80 
  81 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
  82 {
  83         if (cpusets_enabled())
  84                 return __cpuset_zone_allowed(z, gfp_mask);
  85         return true;
  86 }
  87 
  88 extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
  89                                           const struct task_struct *tsk2);
  90 
  91 #define cpuset_memory_pressure_bump()                           \
  92         do {                                                    \
  93                 if (cpuset_memory_pressure_enabled)             \
  94                         __cpuset_memory_pressure_bump();        \
  95         } while (0)
  96 extern int cpuset_memory_pressure_enabled;
  97 extern void __cpuset_memory_pressure_bump(void);
  98 
  99 extern void cpuset_task_status_allowed(struct seq_file *m,
 100                                         struct task_struct *task);
 101 extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
 102                             struct pid *pid, struct task_struct *tsk);
 103 
 104 extern int cpuset_mem_spread_node(void);
 105 extern int cpuset_slab_spread_node(void);
 106 
 107 static inline int cpuset_do_page_mem_spread(void)
 108 {
 109         return task_spread_page(current);
 110 }
 111 
 112 static inline int cpuset_do_slab_mem_spread(void)
 113 {
 114         return task_spread_slab(current);
 115 }
 116 
 117 extern bool current_cpuset_is_being_rebound(void);
 118 
 119 extern void rebuild_sched_domains(void);
 120 
 121 extern void cpuset_print_current_mems_allowed(void);
 122 
 123 /*
 124  * read_mems_allowed_begin is required when making decisions involving
 125  * mems_allowed such as during page allocation. mems_allowed can be updated in
 126  * parallel and depending on the new value an operation can fail potentially
 127  * causing process failure. A retry loop with read_mems_allowed_begin and
 128  * read_mems_allowed_retry prevents these artificial failures.
 129  */
 130 static inline unsigned int read_mems_allowed_begin(void)
 131 {
 132         if (!static_branch_unlikely(&cpusets_pre_enable_key))
 133                 return 0;
 134 
 135         return read_seqcount_begin(&current->mems_allowed_seq);
 136 }
 137 
 138 /*
 139  * If this returns true, the operation that took place after
 140  * read_mems_allowed_begin may have failed artificially due to a concurrent
 141  * update of mems_allowed. It is up to the caller to retry the operation if
 142  * appropriate.
 143  */
 144 static inline bool read_mems_allowed_retry(unsigned int seq)
 145 {
 146         if (!static_branch_unlikely(&cpusets_enabled_key))
 147                 return false;
 148 
 149         return read_seqcount_retry(&current->mems_allowed_seq, seq);
 150 }
 151 
 152 static inline void set_mems_allowed(nodemask_t nodemask)
 153 {
 154         unsigned long flags;
 155 
 156         task_lock(current);
 157         local_irq_save(flags);
 158         write_seqcount_begin(&current->mems_allowed_seq);
 159         current->mems_allowed = nodemask;
 160         write_seqcount_end(&current->mems_allowed_seq);
 161         local_irq_restore(flags);
 162         task_unlock(current);
 163 }
 164 
 165 #else /* !CONFIG_CPUSETS */
 166 
 167 static inline bool cpusets_enabled(void) { return false; }
 168 
 169 static inline int cpuset_init(void) { return 0; }
 170 static inline void cpuset_init_smp(void) {}
 171 
 172 static inline void cpuset_force_rebuild(void) { }
 173 
 174 static inline void cpuset_update_active_cpus(void)
 175 {
 176         partition_sched_domains(1, NULL, NULL);
 177 }
 178 
 179 static inline void cpuset_wait_for_hotplug(void) { }
 180 
 181 static inline void cpuset_read_lock(void) { }
 182 static inline void cpuset_read_unlock(void) { }
 183 
 184 static inline void cpuset_cpus_allowed(struct task_struct *p,
 185                                        struct cpumask *mask)
 186 {
 187         cpumask_copy(mask, cpu_possible_mask);
 188 }
 189 
 190 static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
 191 {
 192 }
 193 
 194 static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
 195 {
 196         return node_possible_map;
 197 }
 198 
 199 #define cpuset_current_mems_allowed (node_states[N_MEMORY])
 200 static inline void cpuset_init_current_mems_allowed(void) {}
 201 
 202 static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
 203 {
 204         return 1;
 205 }
 206 
 207 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
 208 {
 209         return true;
 210 }
 211 
 212 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
 213 {
 214         return true;
 215 }
 216 
 217 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
 218 {
 219         return true;
 220 }
 221 
 222 static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
 223                                                  const struct task_struct *tsk2)
 224 {
 225         return 1;
 226 }
 227 
 228 static inline void cpuset_memory_pressure_bump(void) {}
 229 
 230 static inline void cpuset_task_status_allowed(struct seq_file *m,
 231                                                 struct task_struct *task)
 232 {
 233 }
 234 
 235 static inline int cpuset_mem_spread_node(void)
 236 {
 237         return 0;
 238 }
 239 
 240 static inline int cpuset_slab_spread_node(void)
 241 {
 242         return 0;
 243 }
 244 
 245 static inline int cpuset_do_page_mem_spread(void)
 246 {
 247         return 0;
 248 }
 249 
 250 static inline int cpuset_do_slab_mem_spread(void)
 251 {
 252         return 0;
 253 }
 254 
 255 static inline bool current_cpuset_is_being_rebound(void)
 256 {
 257         return false;
 258 }
 259 
 260 static inline void rebuild_sched_domains(void)
 261 {
 262         partition_sched_domains(1, NULL, NULL);
 263 }
 264 
 265 static inline void cpuset_print_current_mems_allowed(void)
 266 {
 267 }
 268 
 269 static inline void set_mems_allowed(nodemask_t nodemask)
 270 {
 271 }
 272 
 273 static inline unsigned int read_mems_allowed_begin(void)
 274 {
 275         return 0;
 276 }
 277 
 278 static inline bool read_mems_allowed_retry(unsigned int seq)
 279 {
 280         return false;
 281 }
 282 
 283 #endif /* !CONFIG_CPUSETS */
 284 
 285 #endif /* _LINUX_CPUSET_H */

/* [<][>][^][v][top][bottom][index][help] */