1 #ifndef _LINUX_CPUSET_H
2 #define _LINUX_CPUSET_H
3 /*
4  *  cpuset interface
5  *
6  *  Copyright (C) 2003 BULL SA
7  *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
8  *
9  */
10 
11 #include <linux/sched.h>
12 #include <linux/cpumask.h>
13 #include <linux/nodemask.h>
14 #include <linux/mm.h>
15 #include <linux/jump_label.h>
16 
17 #ifdef CONFIG_CPUSETS
18 
19 extern struct static_key cpusets_enabled_key;
cpusets_enabled(void)20 static inline bool cpusets_enabled(void)
21 {
22 	return static_key_false(&cpusets_enabled_key);
23 }
24 
nr_cpusets(void)25 static inline int nr_cpusets(void)
26 {
27 	/* jump label reference count + the top-level cpuset */
28 	return static_key_count(&cpusets_enabled_key) + 1;
29 }
30 
cpuset_inc(void)31 static inline void cpuset_inc(void)
32 {
33 	static_key_slow_inc(&cpusets_enabled_key);
34 }
35 
cpuset_dec(void)36 static inline void cpuset_dec(void)
37 {
38 	static_key_slow_dec(&cpusets_enabled_key);
39 }
40 
41 extern int cpuset_init(void);
42 extern void cpuset_init_smp(void);
43 extern void cpuset_update_active_cpus(bool cpu_online);
44 extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
45 extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
46 extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
47 #define cpuset_current_mems_allowed (current->mems_allowed)
48 void cpuset_init_current_mems_allowed(void);
49 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
50 
51 extern int __cpuset_node_allowed(int node, gfp_t gfp_mask);
52 
cpuset_node_allowed(int node,gfp_t gfp_mask)53 static inline int cpuset_node_allowed(int node, gfp_t gfp_mask)
54 {
55 	return nr_cpusets() <= 1 || __cpuset_node_allowed(node, gfp_mask);
56 }
57 
cpuset_zone_allowed(struct zone * z,gfp_t gfp_mask)58 static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
59 {
60 	return cpuset_node_allowed(zone_to_nid(z), gfp_mask);
61 }
62 
63 extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
64 					  const struct task_struct *tsk2);
65 
66 #define cpuset_memory_pressure_bump() 				\
67 	do {							\
68 		if (cpuset_memory_pressure_enabled)		\
69 			__cpuset_memory_pressure_bump();	\
70 	} while (0)
71 extern int cpuset_memory_pressure_enabled;
72 extern void __cpuset_memory_pressure_bump(void);
73 
74 extern void cpuset_task_status_allowed(struct seq_file *m,
75 					struct task_struct *task);
76 extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
77 			    struct pid *pid, struct task_struct *tsk);
78 
79 extern int cpuset_mem_spread_node(void);
80 extern int cpuset_slab_spread_node(void);
81 
cpuset_do_page_mem_spread(void)82 static inline int cpuset_do_page_mem_spread(void)
83 {
84 	return task_spread_page(current);
85 }
86 
cpuset_do_slab_mem_spread(void)87 static inline int cpuset_do_slab_mem_spread(void)
88 {
89 	return task_spread_slab(current);
90 }
91 
92 extern int current_cpuset_is_being_rebound(void);
93 
94 extern void rebuild_sched_domains(void);
95 
96 extern void cpuset_print_current_mems_allowed(void);
97 
98 /*
99  * read_mems_allowed_begin is required when making decisions involving
100  * mems_allowed such as during page allocation. mems_allowed can be updated in
101  * parallel and depending on the new value an operation can fail potentially
102  * causing process failure. A retry loop with read_mems_allowed_begin and
103  * read_mems_allowed_retry prevents these artificial failures.
104  */
read_mems_allowed_begin(void)105 static inline unsigned int read_mems_allowed_begin(void)
106 {
107 	if (!cpusets_enabled())
108 		return 0;
109 
110 	return read_seqcount_begin(&current->mems_allowed_seq);
111 }
112 
113 /*
114  * If this returns true, the operation that took place after
115  * read_mems_allowed_begin may have failed artificially due to a concurrent
116  * update of mems_allowed. It is up to the caller to retry the operation if
117  * appropriate.
118  */
read_mems_allowed_retry(unsigned int seq)119 static inline bool read_mems_allowed_retry(unsigned int seq)
120 {
121 	if (!cpusets_enabled())
122 		return false;
123 
124 	return read_seqcount_retry(&current->mems_allowed_seq, seq);
125 }
126 
set_mems_allowed(nodemask_t nodemask)127 static inline void set_mems_allowed(nodemask_t nodemask)
128 {
129 	unsigned long flags;
130 
131 	task_lock(current);
132 	local_irq_save(flags);
133 	write_seqcount_begin(&current->mems_allowed_seq);
134 	current->mems_allowed = nodemask;
135 	write_seqcount_end(&current->mems_allowed_seq);
136 	local_irq_restore(flags);
137 	task_unlock(current);
138 }
139 
140 #else /* !CONFIG_CPUSETS */
141 
cpusets_enabled(void)142 static inline bool cpusets_enabled(void) { return false; }
143 
cpuset_init(void)144 static inline int cpuset_init(void) { return 0; }
cpuset_init_smp(void)145 static inline void cpuset_init_smp(void) {}
146 
cpuset_update_active_cpus(bool cpu_online)147 static inline void cpuset_update_active_cpus(bool cpu_online)
148 {
149 	partition_sched_domains(1, NULL, NULL);
150 }
151 
cpuset_cpus_allowed(struct task_struct * p,struct cpumask * mask)152 static inline void cpuset_cpus_allowed(struct task_struct *p,
153 				       struct cpumask *mask)
154 {
155 	cpumask_copy(mask, cpu_possible_mask);
156 }
157 
cpuset_cpus_allowed_fallback(struct task_struct * p)158 static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
159 {
160 }
161 
cpuset_mems_allowed(struct task_struct * p)162 static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
163 {
164 	return node_possible_map;
165 }
166 
167 #define cpuset_current_mems_allowed (node_states[N_MEMORY])
cpuset_init_current_mems_allowed(void)168 static inline void cpuset_init_current_mems_allowed(void) {}
169 
cpuset_nodemask_valid_mems_allowed(nodemask_t * nodemask)170 static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
171 {
172 	return 1;
173 }
174 
cpuset_node_allowed(int node,gfp_t gfp_mask)175 static inline int cpuset_node_allowed(int node, gfp_t gfp_mask)
176 {
177 	return 1;
178 }
179 
cpuset_zone_allowed(struct zone * z,gfp_t gfp_mask)180 static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
181 {
182 	return 1;
183 }
184 
cpuset_mems_allowed_intersects(const struct task_struct * tsk1,const struct task_struct * tsk2)185 static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
186 						 const struct task_struct *tsk2)
187 {
188 	return 1;
189 }
190 
cpuset_memory_pressure_bump(void)191 static inline void cpuset_memory_pressure_bump(void) {}
192 
cpuset_task_status_allowed(struct seq_file * m,struct task_struct * task)193 static inline void cpuset_task_status_allowed(struct seq_file *m,
194 						struct task_struct *task)
195 {
196 }
197 
cpuset_mem_spread_node(void)198 static inline int cpuset_mem_spread_node(void)
199 {
200 	return 0;
201 }
202 
cpuset_slab_spread_node(void)203 static inline int cpuset_slab_spread_node(void)
204 {
205 	return 0;
206 }
207 
cpuset_do_page_mem_spread(void)208 static inline int cpuset_do_page_mem_spread(void)
209 {
210 	return 0;
211 }
212 
cpuset_do_slab_mem_spread(void)213 static inline int cpuset_do_slab_mem_spread(void)
214 {
215 	return 0;
216 }
217 
current_cpuset_is_being_rebound(void)218 static inline int current_cpuset_is_being_rebound(void)
219 {
220 	return 0;
221 }
222 
rebuild_sched_domains(void)223 static inline void rebuild_sched_domains(void)
224 {
225 	partition_sched_domains(1, NULL, NULL);
226 }
227 
cpuset_print_current_mems_allowed(void)228 static inline void cpuset_print_current_mems_allowed(void)
229 {
230 }
231 
set_mems_allowed(nodemask_t nodemask)232 static inline void set_mems_allowed(nodemask_t nodemask)
233 {
234 }
235 
read_mems_allowed_begin(void)236 static inline unsigned int read_mems_allowed_begin(void)
237 {
238 	return 0;
239 }
240 
read_mems_allowed_retry(unsigned int seq)241 static inline bool read_mems_allowed_retry(unsigned int seq)
242 {
243 	return false;
244 }
245 
246 #endif /* !CONFIG_CPUSETS */
247 
248 #endif /* _LINUX_CPUSET_H */
249