1#ifndef _LINUX_CPUSET_H 2#define _LINUX_CPUSET_H 3/* 4 * cpuset interface 5 * 6 * Copyright (C) 2003 BULL SA 7 * Copyright (C) 2004-2006 Silicon Graphics, Inc. 8 * 9 */ 10 11#include <linux/sched.h> 12#include <linux/cpumask.h> 13#include <linux/nodemask.h> 14#include <linux/mm.h> 15#include <linux/jump_label.h> 16 17#ifdef CONFIG_CPUSETS 18 19extern struct static_key cpusets_enabled_key; 20static inline bool cpusets_enabled(void) 21{ 22 return static_key_false(&cpusets_enabled_key); 23} 24 25static inline int nr_cpusets(void) 26{ 27 /* jump label reference count + the top-level cpuset */ 28 return static_key_count(&cpusets_enabled_key) + 1; 29} 30 31static inline void cpuset_inc(void) 32{ 33 static_key_slow_inc(&cpusets_enabled_key); 34} 35 36static inline void cpuset_dec(void) 37{ 38 static_key_slow_dec(&cpusets_enabled_key); 39} 40 41extern int cpuset_init(void); 42extern void cpuset_init_smp(void); 43extern void cpuset_update_active_cpus(bool cpu_online); 44extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); 45extern void cpuset_cpus_allowed_fallback(struct task_struct *p); 46extern nodemask_t cpuset_mems_allowed(struct task_struct *p); 47#define cpuset_current_mems_allowed (current->mems_allowed) 48void cpuset_init_current_mems_allowed(void); 49int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); 50 51extern int __cpuset_node_allowed(int node, gfp_t gfp_mask); 52 53static inline int cpuset_node_allowed(int node, gfp_t gfp_mask) 54{ 55 return nr_cpusets() <= 1 || __cpuset_node_allowed(node, gfp_mask); 56} 57 58static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) 59{ 60 return cpuset_node_allowed(zone_to_nid(z), gfp_mask); 61} 62 63extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, 64 const struct task_struct *tsk2); 65 66#define cpuset_memory_pressure_bump() \ 67 do { \ 68 if (cpuset_memory_pressure_enabled) \ 69 __cpuset_memory_pressure_bump(); \ 70 } while (0) 71extern int cpuset_memory_pressure_enabled; 72extern void __cpuset_memory_pressure_bump(void); 73 74extern void cpuset_task_status_allowed(struct seq_file *m, 75 struct task_struct *task); 76extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, 77 struct pid *pid, struct task_struct *tsk); 78 79extern int cpuset_mem_spread_node(void); 80extern int cpuset_slab_spread_node(void); 81 82static inline int cpuset_do_page_mem_spread(void) 83{ 84 return task_spread_page(current); 85} 86 87static inline int cpuset_do_slab_mem_spread(void) 88{ 89 return task_spread_slab(current); 90} 91 92extern int current_cpuset_is_being_rebound(void); 93 94extern void rebuild_sched_domains(void); 95 96extern void cpuset_print_task_mems_allowed(struct task_struct *p); 97 98/* 99 * read_mems_allowed_begin is required when making decisions involving 100 * mems_allowed such as during page allocation. mems_allowed can be updated in 101 * parallel and depending on the new value an operation can fail potentially 102 * causing process failure. A retry loop with read_mems_allowed_begin and 103 * read_mems_allowed_retry prevents these artificial failures. 104 */ 105static inline unsigned int read_mems_allowed_begin(void) 106{ 107 return read_seqcount_begin(¤t->mems_allowed_seq); 108} 109 110/* 111 * If this returns true, the operation that took place after 112 * read_mems_allowed_begin may have failed artificially due to a concurrent 113 * update of mems_allowed. It is up to the caller to retry the operation if 114 * appropriate. 115 */ 116static inline bool read_mems_allowed_retry(unsigned int seq) 117{ 118 return read_seqcount_retry(¤t->mems_allowed_seq, seq); 119} 120 121static inline void set_mems_allowed(nodemask_t nodemask) 122{ 123 unsigned long flags; 124 125 task_lock(current); 126 local_irq_save(flags); 127 write_seqcount_begin(¤t->mems_allowed_seq); 128 current->mems_allowed = nodemask; 129 write_seqcount_end(¤t->mems_allowed_seq); 130 local_irq_restore(flags); 131 task_unlock(current); 132} 133 134#else /* !CONFIG_CPUSETS */ 135 136static inline bool cpusets_enabled(void) { return false; } 137 138static inline int cpuset_init(void) { return 0; } 139static inline void cpuset_init_smp(void) {} 140 141static inline void cpuset_update_active_cpus(bool cpu_online) 142{ 143 partition_sched_domains(1, NULL, NULL); 144} 145 146static inline void cpuset_cpus_allowed(struct task_struct *p, 147 struct cpumask *mask) 148{ 149 cpumask_copy(mask, cpu_possible_mask); 150} 151 152static inline void cpuset_cpus_allowed_fallback(struct task_struct *p) 153{ 154} 155 156static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) 157{ 158 return node_possible_map; 159} 160 161#define cpuset_current_mems_allowed (node_states[N_MEMORY]) 162static inline void cpuset_init_current_mems_allowed(void) {} 163 164static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) 165{ 166 return 1; 167} 168 169static inline int cpuset_node_allowed(int node, gfp_t gfp_mask) 170{ 171 return 1; 172} 173 174static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) 175{ 176 return 1; 177} 178 179static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, 180 const struct task_struct *tsk2) 181{ 182 return 1; 183} 184 185static inline void cpuset_memory_pressure_bump(void) {} 186 187static inline void cpuset_task_status_allowed(struct seq_file *m, 188 struct task_struct *task) 189{ 190} 191 192static inline int cpuset_mem_spread_node(void) 193{ 194 return 0; 195} 196 197static inline int cpuset_slab_spread_node(void) 198{ 199 return 0; 200} 201 202static inline int cpuset_do_page_mem_spread(void) 203{ 204 return 0; 205} 206 207static inline int cpuset_do_slab_mem_spread(void) 208{ 209 return 0; 210} 211 212static inline int current_cpuset_is_being_rebound(void) 213{ 214 return 0; 215} 216 217static inline void rebuild_sched_domains(void) 218{ 219 partition_sched_domains(1, NULL, NULL); 220} 221 222static inline void cpuset_print_task_mems_allowed(struct task_struct *p) 223{ 224} 225 226static inline void set_mems_allowed(nodemask_t nodemask) 227{ 228} 229 230static inline unsigned int read_mems_allowed_begin(void) 231{ 232 return 0; 233} 234 235static inline bool read_mems_allowed_retry(unsigned int seq) 236{ 237 return false; 238} 239 240#endif /* !CONFIG_CPUSETS */ 241 242#endif /* _LINUX_CPUSET_H */ 243