This source file includes following definitions.
- idle_policy
- fair_policy
- rt_policy
- dl_policy
- valid_policy
- task_has_idle_policy
- task_has_rt_policy
- task_has_dl_policy
- dl_entity_is_special
- dl_entity_preempt
- dl_bandwidth_enabled
- __dl_sub
- __dl_add
- __dl_overflow
- walk_tg_tree
- set_task_rq_fair
- rt_bandwidth_enabled
- rt_rq_is_runnable
- se_weight
- se_runnable
- sched_asym_prefer
- rq_of
- rq_of
- cpu_of
- update_idle_core
- update_idle_core
- __rq_clock_broken
- assert_clock_updated
- rq_clock
- rq_clock_task
- rq_clock_skip_update
- rq_clock_cancel_skipupdate
- rq_pin_lock
- rq_unpin_lock
- rq_repin_lock
- __task_rq_lock
- task_rq_unlock
- rq_lock_irqsave
- rq_lock_irq
- rq_lock
- rq_relock
- rq_unlock_irqrestore
- rq_unlock_irq
- rq_unlock
- this_rq_lock_irq
- sched_init_numa
- sched_domains_numa_masks_set
- sched_domains_numa_masks_clear
- sched_numa_find_closest
- init_numa_balancing
- queue_balance_callback
- highest_flag_domain
- lowest_flag_domain
- sched_group_span
- group_balance_mask
- group_first_cpu
- register_sched_domain_sysctl
- dirty_sched_domain_sysctl
- unregister_sched_domain_sysctl
- sched_ttwu_pending
- newidle_balance
- set_task_rq
- set_task_rq
- __set_task_cpu
- global_rt_period
- global_rt_runtime
- task_current
- task_running
- task_on_rq_queued
- task_on_rq_migrating
- put_prev_task
- set_next_task
- sched_stop_runnable
- sched_dl_runnable
- sched_rt_runnable
- sched_fair_runnable
- idle_set_state
- idle_get_state
- idle_set_state
- idle_get_state
- sched_update_tick_dependency
- sched_tick_offload_init
- sched_update_tick_dependency
- add_nr_running
- sub_nr_running
- hrtick_enabled
- hrtick_enabled
- arch_scale_freq_capacity
- _double_lock_balance
- _double_lock_balance
- double_lock_balance
- double_unlock_balance
- double_lock
- double_lock_irq
- double_raw_lock
- double_rq_lock
- double_rq_unlock
- double_rq_lock
- double_rq_unlock
- nohz_balance_exit_idle
- __dl_update
- __dl_update
- irq_time_read
- cpufreq_update_util
- cpufreq_update_util
- uclamp_util_with
- uclamp_util
- uclamp_util_with
- uclamp_util
- capacity_orig_of
- cpu_bw_dl
- cpu_util_dl
- cpu_util_cfs
- cpu_util_rt
- schedutil_cpu_util
- cpu_util_irq
- scale_irq_capacity
- cpu_util_irq
- scale_irq_capacity
- sched_energy_enabled
- sched_energy_enabled
- membarrier_switch_mm
- membarrier_switch_mm
1
2
3
4
5 #include <linux/sched.h>
6
7 #include <linux/sched/autogroup.h>
8 #include <linux/sched/clock.h>
9 #include <linux/sched/coredump.h>
10 #include <linux/sched/cpufreq.h>
11 #include <linux/sched/cputime.h>
12 #include <linux/sched/deadline.h>
13 #include <linux/sched/debug.h>
14 #include <linux/sched/hotplug.h>
15 #include <linux/sched/idle.h>
16 #include <linux/sched/init.h>
17 #include <linux/sched/isolation.h>
18 #include <linux/sched/jobctl.h>
19 #include <linux/sched/loadavg.h>
20 #include <linux/sched/mm.h>
21 #include <linux/sched/nohz.h>
22 #include <linux/sched/numa_balancing.h>
23 #include <linux/sched/prio.h>
24 #include <linux/sched/rt.h>
25 #include <linux/sched/signal.h>
26 #include <linux/sched/smt.h>
27 #include <linux/sched/stat.h>
28 #include <linux/sched/sysctl.h>
29 #include <linux/sched/task.h>
30 #include <linux/sched/task_stack.h>
31 #include <linux/sched/topology.h>
32 #include <linux/sched/user.h>
33 #include <linux/sched/wake_q.h>
34 #include <linux/sched/xacct.h>
35
36 #include <uapi/linux/sched/types.h>
37
38 #include <linux/binfmts.h>
39 #include <linux/blkdev.h>
40 #include <linux/compat.h>
41 #include <linux/context_tracking.h>
42 #include <linux/cpufreq.h>
43 #include <linux/cpuidle.h>
44 #include <linux/cpuset.h>
45 #include <linux/ctype.h>
46 #include <linux/debugfs.h>
47 #include <linux/delayacct.h>
48 #include <linux/energy_model.h>
49 #include <linux/init_task.h>
50 #include <linux/kprobes.h>
51 #include <linux/kthread.h>
52 #include <linux/membarrier.h>
53 #include <linux/migrate.h>
54 #include <linux/mmu_context.h>
55 #include <linux/nmi.h>
56 #include <linux/proc_fs.h>
57 #include <linux/prefetch.h>
58 #include <linux/profile.h>
59 #include <linux/psi.h>
60 #include <linux/rcupdate_wait.h>
61 #include <linux/security.h>
62 #include <linux/stop_machine.h>
63 #include <linux/suspend.h>
64 #include <linux/swait.h>
65 #include <linux/syscalls.h>
66 #include <linux/task_work.h>
67 #include <linux/tsacct_kern.h>
68
69 #include <asm/tlb.h>
70
71 #ifdef CONFIG_PARAVIRT
72 # include <asm/paravirt.h>
73 #endif
74
75 #include "cpupri.h"
76 #include "cpudeadline.h"
77
78 #ifdef CONFIG_SCHED_DEBUG
79 # define SCHED_WARN_ON(x) WARN_ONCE(x, #x)
80 #else
81 # define SCHED_WARN_ON(x) ({ (void)(x), 0; })
82 #endif
83
84 struct rq;
85 struct cpuidle_state;
86
87
88 #define TASK_ON_RQ_QUEUED 1
89 #define TASK_ON_RQ_MIGRATING 2
90
91 extern __read_mostly int scheduler_running;
92
93 extern unsigned long calc_load_update;
94 extern atomic_long_t calc_load_tasks;
95
96 extern void calc_global_load_tick(struct rq *this_rq);
97 extern long calc_load_fold_active(struct rq *this_rq, long adjust);
98
99
100
101
102 #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118 #ifdef CONFIG_64BIT
119 # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
120 # define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT)
121 # define scale_load_down(w) \
122 ({ \
123 unsigned long __w = (w); \
124 if (__w) \
125 __w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \
126 __w; \
127 })
128 #else
129 # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT)
130 # define scale_load(w) (w)
131 # define scale_load_down(w) (w)
132 #endif
133
134
135
136
137
138
139
140
141
142
143 #define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT)
144
145
146
147
148
149
150 #define DL_SCALE 10
151
152
153
154
155 #define RUNTIME_INF ((u64)~0ULL)
156
157 static inline int idle_policy(int policy)
158 {
159 return policy == SCHED_IDLE;
160 }
161 static inline int fair_policy(int policy)
162 {
163 return policy == SCHED_NORMAL || policy == SCHED_BATCH;
164 }
165
166 static inline int rt_policy(int policy)
167 {
168 return policy == SCHED_FIFO || policy == SCHED_RR;
169 }
170
171 static inline int dl_policy(int policy)
172 {
173 return policy == SCHED_DEADLINE;
174 }
175 static inline bool valid_policy(int policy)
176 {
177 return idle_policy(policy) || fair_policy(policy) ||
178 rt_policy(policy) || dl_policy(policy);
179 }
180
181 static inline int task_has_idle_policy(struct task_struct *p)
182 {
183 return idle_policy(p->policy);
184 }
185
186 static inline int task_has_rt_policy(struct task_struct *p)
187 {
188 return rt_policy(p->policy);
189 }
190
191 static inline int task_has_dl_policy(struct task_struct *p)
192 {
193 return dl_policy(p->policy);
194 }
195
196 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
197
198
199
200
201
202
203
204
205
206
207
208
209
210 #define SCHED_FLAG_SUGOV 0x10000000
211
212 static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se)
213 {
214 #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
215 return unlikely(dl_se->flags & SCHED_FLAG_SUGOV);
216 #else
217 return false;
218 #endif
219 }
220
221
222
223
224 static inline bool
225 dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b)
226 {
227 return dl_entity_is_special(a) ||
228 dl_time_before(a->deadline, b->deadline);
229 }
230
231
232
233
234 struct rt_prio_array {
235 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1);
236 struct list_head queue[MAX_RT_PRIO];
237 };
238
239 struct rt_bandwidth {
240
241 raw_spinlock_t rt_runtime_lock;
242 ktime_t rt_period;
243 u64 rt_runtime;
244 struct hrtimer rt_period_timer;
245 unsigned int rt_period_active;
246 };
247
248 void __dl_clear_params(struct task_struct *p);
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274 struct dl_bandwidth {
275 raw_spinlock_t dl_runtime_lock;
276 u64 dl_runtime;
277 u64 dl_period;
278 };
279
280 static inline int dl_bandwidth_enabled(void)
281 {
282 return sysctl_sched_rt_runtime >= 0;
283 }
284
285 struct dl_bw {
286 raw_spinlock_t lock;
287 u64 bw;
288 u64 total_bw;
289 };
290
291 static inline void __dl_update(struct dl_bw *dl_b, s64 bw);
292
293 static inline
294 void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
295 {
296 dl_b->total_bw -= tsk_bw;
297 __dl_update(dl_b, (s32)tsk_bw / cpus);
298 }
299
300 static inline
301 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
302 {
303 dl_b->total_bw += tsk_bw;
304 __dl_update(dl_b, -((s32)tsk_bw / cpus));
305 }
306
307 static inline
308 bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
309 {
310 return dl_b->bw != -1 &&
311 dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
312 }
313
314 extern void dl_change_utilization(struct task_struct *p, u64 new_bw);
315 extern void init_dl_bw(struct dl_bw *dl_b);
316 extern int sched_dl_global_validate(void);
317 extern void sched_dl_do_global(void);
318 extern int sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr);
319 extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr);
320 extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr);
321 extern bool __checkparam_dl(const struct sched_attr *attr);
322 extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr);
323 extern int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
324 extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
325 extern bool dl_cpu_busy(unsigned int cpu);
326
327 #ifdef CONFIG_CGROUP_SCHED
328
329 #include <linux/cgroup.h>
330 #include <linux/psi.h>
331
332 struct cfs_rq;
333 struct rt_rq;
334
335 extern struct list_head task_groups;
336
337 struct cfs_bandwidth {
338 #ifdef CONFIG_CFS_BANDWIDTH
339 raw_spinlock_t lock;
340 ktime_t period;
341 u64 quota;
342 u64 runtime;
343 s64 hierarchical_quota;
344
345 u8 idle;
346 u8 period_active;
347 u8 distribute_running;
348 u8 slack_started;
349 struct hrtimer period_timer;
350 struct hrtimer slack_timer;
351 struct list_head throttled_cfs_rq;
352
353
354 int nr_periods;
355 int nr_throttled;
356 u64 throttled_time;
357 #endif
358 };
359
360
361 struct task_group {
362 struct cgroup_subsys_state css;
363
364 #ifdef CONFIG_FAIR_GROUP_SCHED
365
366 struct sched_entity **se;
367
368 struct cfs_rq **cfs_rq;
369 unsigned long shares;
370
371 #ifdef CONFIG_SMP
372
373
374
375
376
377 atomic_long_t load_avg ____cacheline_aligned;
378 #endif
379 #endif
380
381 #ifdef CONFIG_RT_GROUP_SCHED
382 struct sched_rt_entity **rt_se;
383 struct rt_rq **rt_rq;
384
385 struct rt_bandwidth rt_bandwidth;
386 #endif
387
388 struct rcu_head rcu;
389 struct list_head list;
390
391 struct task_group *parent;
392 struct list_head siblings;
393 struct list_head children;
394
395 #ifdef CONFIG_SCHED_AUTOGROUP
396 struct autogroup *autogroup;
397 #endif
398
399 struct cfs_bandwidth cfs_bandwidth;
400
401 #ifdef CONFIG_UCLAMP_TASK_GROUP
402
403 unsigned int uclamp_pct[UCLAMP_CNT];
404
405 struct uclamp_se uclamp_req[UCLAMP_CNT];
406
407 struct uclamp_se uclamp[UCLAMP_CNT];
408 #endif
409
410 };
411
412 #ifdef CONFIG_FAIR_GROUP_SCHED
413 #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
414
415
416
417
418
419
420
421
422
423 #define MIN_SHARES (1UL << 1)
424 #define MAX_SHARES (1UL << 18)
425 #endif
426
427 typedef int (*tg_visitor)(struct task_group *, void *);
428
429 extern int walk_tg_tree_from(struct task_group *from,
430 tg_visitor down, tg_visitor up, void *data);
431
432
433
434
435
436
437
438 static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
439 {
440 return walk_tg_tree_from(&root_task_group, down, up, data);
441 }
442
443 extern int tg_nop(struct task_group *tg, void *data);
444
445 extern void free_fair_sched_group(struct task_group *tg);
446 extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
447 extern void online_fair_sched_group(struct task_group *tg);
448 extern void unregister_fair_sched_group(struct task_group *tg);
449 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
450 struct sched_entity *se, int cpu,
451 struct sched_entity *parent);
452 extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
453
454 extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
455 extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
456 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
457
458 extern void free_rt_sched_group(struct task_group *tg);
459 extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
460 extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
461 struct sched_rt_entity *rt_se, int cpu,
462 struct sched_rt_entity *parent);
463 extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us);
464 extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us);
465 extern long sched_group_rt_runtime(struct task_group *tg);
466 extern long sched_group_rt_period(struct task_group *tg);
467 extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
468
469 extern struct task_group *sched_create_group(struct task_group *parent);
470 extern void sched_online_group(struct task_group *tg,
471 struct task_group *parent);
472 extern void sched_destroy_group(struct task_group *tg);
473 extern void sched_offline_group(struct task_group *tg);
474
475 extern void sched_move_task(struct task_struct *tsk);
476
477 #ifdef CONFIG_FAIR_GROUP_SCHED
478 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
479
480 #ifdef CONFIG_SMP
481 extern void set_task_rq_fair(struct sched_entity *se,
482 struct cfs_rq *prev, struct cfs_rq *next);
483 #else
484 static inline void set_task_rq_fair(struct sched_entity *se,
485 struct cfs_rq *prev, struct cfs_rq *next) { }
486 #endif
487 #endif
488
489 #else
490
491 struct cfs_bandwidth { };
492
493 #endif
494
495
496 struct cfs_rq {
497 struct load_weight load;
498 unsigned long runnable_weight;
499 unsigned int nr_running;
500 unsigned int h_nr_running;
501 unsigned int idle_h_nr_running;
502
503 u64 exec_clock;
504 u64 min_vruntime;
505 #ifndef CONFIG_64BIT
506 u64 min_vruntime_copy;
507 #endif
508
509 struct rb_root_cached tasks_timeline;
510
511
512
513
514
515 struct sched_entity *curr;
516 struct sched_entity *next;
517 struct sched_entity *last;
518 struct sched_entity *skip;
519
520 #ifdef CONFIG_SCHED_DEBUG
521 unsigned int nr_spread_over;
522 #endif
523
524 #ifdef CONFIG_SMP
525
526
527
528 struct sched_avg avg;
529 #ifndef CONFIG_64BIT
530 u64 load_last_update_time_copy;
531 #endif
532 struct {
533 raw_spinlock_t lock ____cacheline_aligned;
534 int nr;
535 unsigned long load_avg;
536 unsigned long util_avg;
537 unsigned long runnable_sum;
538 } removed;
539
540 #ifdef CONFIG_FAIR_GROUP_SCHED
541 unsigned long tg_load_avg_contrib;
542 long propagate;
543 long prop_runnable_sum;
544
545
546
547
548
549
550
551 unsigned long h_load;
552 u64 last_h_load_update;
553 struct sched_entity *h_load_next;
554 #endif
555 #endif
556
557 #ifdef CONFIG_FAIR_GROUP_SCHED
558 struct rq *rq;
559
560
561
562
563
564
565
566
567
568 int on_list;
569 struct list_head leaf_cfs_rq_list;
570 struct task_group *tg;
571
572 #ifdef CONFIG_CFS_BANDWIDTH
573 int runtime_enabled;
574 s64 runtime_remaining;
575
576 u64 throttled_clock;
577 u64 throttled_clock_task;
578 u64 throttled_clock_task_time;
579 int throttled;
580 int throttle_count;
581 struct list_head throttled_list;
582 #endif
583 #endif
584 };
585
586 static inline int rt_bandwidth_enabled(void)
587 {
588 return sysctl_sched_rt_runtime >= 0;
589 }
590
591
592 #if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP)
593 # define HAVE_RT_PUSH_IPI
594 #endif
595
596
597 struct rt_rq {
598 struct rt_prio_array active;
599 unsigned int rt_nr_running;
600 unsigned int rr_nr_running;
601 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
602 struct {
603 int curr;
604 #ifdef CONFIG_SMP
605 int next;
606 #endif
607 } highest_prio;
608 #endif
609 #ifdef CONFIG_SMP
610 unsigned long rt_nr_migratory;
611 unsigned long rt_nr_total;
612 int overloaded;
613 struct plist_head pushable_tasks;
614
615 #endif
616 int rt_queued;
617
618 int rt_throttled;
619 u64 rt_time;
620 u64 rt_runtime;
621
622 raw_spinlock_t rt_runtime_lock;
623
624 #ifdef CONFIG_RT_GROUP_SCHED
625 unsigned long rt_nr_boosted;
626
627 struct rq *rq;
628 struct task_group *tg;
629 #endif
630 };
631
632 static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq)
633 {
634 return rt_rq->rt_queued && rt_rq->rt_nr_running;
635 }
636
637
638 struct dl_rq {
639
640 struct rb_root_cached root;
641
642 unsigned long dl_nr_running;
643
644 #ifdef CONFIG_SMP
645
646
647
648
649
650
651 struct {
652 u64 curr;
653 u64 next;
654 } earliest_dl;
655
656 unsigned long dl_nr_migratory;
657 int overloaded;
658
659
660
661
662
663
664 struct rb_root_cached pushable_dl_tasks_root;
665 #else
666 struct dl_bw dl_bw;
667 #endif
668
669
670
671
672
673 u64 running_bw;
674
675
676
677
678
679
680
681
682
683
684 u64 this_bw;
685 u64 extra_bw;
686
687
688
689
690
691 u64 bw_ratio;
692 };
693
694 #ifdef CONFIG_FAIR_GROUP_SCHED
695
696 #define entity_is_task(se) (!se->my_q)
697 #else
698 #define entity_is_task(se) 1
699 #endif
700
701 #ifdef CONFIG_SMP
702
703
704
705 static inline long se_weight(struct sched_entity *se)
706 {
707 return scale_load_down(se->load.weight);
708 }
709
710 static inline long se_runnable(struct sched_entity *se)
711 {
712 return scale_load_down(se->runnable_weight);
713 }
714
715 static inline bool sched_asym_prefer(int a, int b)
716 {
717 return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b);
718 }
719
720 struct perf_domain {
721 struct em_perf_domain *em_pd;
722 struct perf_domain *next;
723 struct rcu_head rcu;
724 };
725
726
727 #define SG_OVERLOAD 0x1
728 #define SG_OVERUTILIZED 0x2
729
730
731
732
733
734
735
736
737
738 struct root_domain {
739 atomic_t refcount;
740 atomic_t rto_count;
741 struct rcu_head rcu;
742 cpumask_var_t span;
743 cpumask_var_t online;
744
745
746
747
748
749
750 int overload;
751
752
753 int overutilized;
754
755
756
757
758
759 cpumask_var_t dlo_mask;
760 atomic_t dlo_count;
761 struct dl_bw dl_bw;
762 struct cpudl cpudl;
763
764 #ifdef HAVE_RT_PUSH_IPI
765
766
767
768 struct irq_work rto_push_work;
769 raw_spinlock_t rto_lock;
770
771 int rto_loop;
772 int rto_cpu;
773
774 atomic_t rto_loop_next;
775 atomic_t rto_loop_start;
776 #endif
777
778
779
780
781 cpumask_var_t rto_mask;
782 struct cpupri cpupri;
783
784 unsigned long max_cpu_capacity;
785
786
787
788
789
790 struct perf_domain __rcu *pd;
791 };
792
793 extern void init_defrootdomain(void);
794 extern int sched_init_domains(const struct cpumask *cpu_map);
795 extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
796 extern void sched_get_rd(struct root_domain *rd);
797 extern void sched_put_rd(struct root_domain *rd);
798
799 #ifdef HAVE_RT_PUSH_IPI
800 extern void rto_push_irq_work_func(struct irq_work *work);
801 #endif
802 #endif
803
804 #ifdef CONFIG_UCLAMP_TASK
805
806
807
808
809
810
811
812
813 struct uclamp_bucket {
814 unsigned long value : bits_per(SCHED_CAPACITY_SCALE);
815 unsigned long tasks : BITS_PER_LONG - bits_per(SCHED_CAPACITY_SCALE);
816 };
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840 struct uclamp_rq {
841 unsigned int value;
842 struct uclamp_bucket bucket[UCLAMP_BUCKETS];
843 };
844 #endif
845
846
847
848
849
850
851
852
853 struct rq {
854
855 raw_spinlock_t lock;
856
857
858
859
860
861 unsigned int nr_running;
862 #ifdef CONFIG_NUMA_BALANCING
863 unsigned int nr_numa_running;
864 unsigned int nr_preferred_running;
865 unsigned int numa_migrate_on;
866 #endif
867 #ifdef CONFIG_NO_HZ_COMMON
868 #ifdef CONFIG_SMP
869 unsigned long last_load_update_tick;
870 unsigned long last_blocked_load_update_tick;
871 unsigned int has_blocked_load;
872 #endif
873 unsigned int nohz_tick_stopped;
874 atomic_t nohz_flags;
875 #endif
876
877 unsigned long nr_load_updates;
878 u64 nr_switches;
879
880 #ifdef CONFIG_UCLAMP_TASK
881
882 struct uclamp_rq uclamp[UCLAMP_CNT] ____cacheline_aligned;
883 unsigned int uclamp_flags;
884 #define UCLAMP_FLAG_IDLE 0x01
885 #endif
886
887 struct cfs_rq cfs;
888 struct rt_rq rt;
889 struct dl_rq dl;
890
891 #ifdef CONFIG_FAIR_GROUP_SCHED
892
893 struct list_head leaf_cfs_rq_list;
894 struct list_head *tmp_alone_branch;
895 #endif
896
897
898
899
900
901
902
903 unsigned long nr_uninterruptible;
904
905 struct task_struct *curr;
906 struct task_struct *idle;
907 struct task_struct *stop;
908 unsigned long next_balance;
909 struct mm_struct *prev_mm;
910
911 unsigned int clock_update_flags;
912 u64 clock;
913
914 u64 clock_task ____cacheline_aligned;
915 u64 clock_pelt;
916 unsigned long lost_idle_time;
917
918 atomic_t nr_iowait;
919
920 #ifdef CONFIG_MEMBARRIER
921 int membarrier_state;
922 #endif
923
924 #ifdef CONFIG_SMP
925 struct root_domain *rd;
926 struct sched_domain __rcu *sd;
927
928 unsigned long cpu_capacity;
929 unsigned long cpu_capacity_orig;
930
931 struct callback_head *balance_callback;
932
933 unsigned char idle_balance;
934
935 unsigned long misfit_task_load;
936
937
938 int active_balance;
939 int push_cpu;
940 struct cpu_stop_work active_balance_work;
941
942
943 int cpu;
944 int online;
945
946 struct list_head cfs_tasks;
947
948 struct sched_avg avg_rt;
949 struct sched_avg avg_dl;
950 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
951 struct sched_avg avg_irq;
952 #endif
953 u64 idle_stamp;
954 u64 avg_idle;
955
956
957 u64 max_idle_balance_cost;
958 #endif
959
960 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
961 u64 prev_irq_time;
962 #endif
963 #ifdef CONFIG_PARAVIRT
964 u64 prev_steal_time;
965 #endif
966 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
967 u64 prev_steal_time_rq;
968 #endif
969
970
971 unsigned long calc_load_update;
972 long calc_load_active;
973
974 #ifdef CONFIG_SCHED_HRTICK
975 #ifdef CONFIG_SMP
976 int hrtick_csd_pending;
977 call_single_data_t hrtick_csd;
978 #endif
979 struct hrtimer hrtick_timer;
980 #endif
981
982 #ifdef CONFIG_SCHEDSTATS
983
984 struct sched_info rq_sched_info;
985 unsigned long long rq_cpu_time;
986
987
988
989 unsigned int yld_count;
990
991
992 unsigned int sched_count;
993 unsigned int sched_goidle;
994
995
996 unsigned int ttwu_count;
997 unsigned int ttwu_local;
998 #endif
999
1000 #ifdef CONFIG_SMP
1001 struct llist_head wake_list;
1002 #endif
1003
1004 #ifdef CONFIG_CPU_IDLE
1005
1006 struct cpuidle_state *idle_state;
1007 #endif
1008 };
1009
1010 #ifdef CONFIG_FAIR_GROUP_SCHED
1011
1012
1013 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
1014 {
1015 return cfs_rq->rq;
1016 }
1017
1018 #else
1019
1020 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
1021 {
1022 return container_of(cfs_rq, struct rq, cfs);
1023 }
1024 #endif
1025
1026 static inline int cpu_of(struct rq *rq)
1027 {
1028 #ifdef CONFIG_SMP
1029 return rq->cpu;
1030 #else
1031 return 0;
1032 #endif
1033 }
1034
1035
1036 #ifdef CONFIG_SCHED_SMT
1037 extern void __update_idle_core(struct rq *rq);
1038
1039 static inline void update_idle_core(struct rq *rq)
1040 {
1041 if (static_branch_unlikely(&sched_smt_present))
1042 __update_idle_core(rq);
1043 }
1044
1045 #else
1046 static inline void update_idle_core(struct rq *rq) { }
1047 #endif
1048
1049 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
1050
1051 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
1052 #define this_rq() this_cpu_ptr(&runqueues)
1053 #define task_rq(p) cpu_rq(task_cpu(p))
1054 #define cpu_curr(cpu) (cpu_rq(cpu)->curr)
1055 #define raw_rq() raw_cpu_ptr(&runqueues)
1056
1057 extern void update_rq_clock(struct rq *rq);
1058
1059 static inline u64 __rq_clock_broken(struct rq *rq)
1060 {
1061 return READ_ONCE(rq->clock);
1062 }
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087 #define RQCF_REQ_SKIP 0x01
1088 #define RQCF_ACT_SKIP 0x02
1089 #define RQCF_UPDATED 0x04
1090
1091 static inline void assert_clock_updated(struct rq *rq)
1092 {
1093
1094
1095
1096
1097 SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP);
1098 }
1099
1100 static inline u64 rq_clock(struct rq *rq)
1101 {
1102 lockdep_assert_held(&rq->lock);
1103 assert_clock_updated(rq);
1104
1105 return rq->clock;
1106 }
1107
1108 static inline u64 rq_clock_task(struct rq *rq)
1109 {
1110 lockdep_assert_held(&rq->lock);
1111 assert_clock_updated(rq);
1112
1113 return rq->clock_task;
1114 }
1115
1116 static inline void rq_clock_skip_update(struct rq *rq)
1117 {
1118 lockdep_assert_held(&rq->lock);
1119 rq->clock_update_flags |= RQCF_REQ_SKIP;
1120 }
1121
1122
1123
1124
1125
1126 static inline void rq_clock_cancel_skipupdate(struct rq *rq)
1127 {
1128 lockdep_assert_held(&rq->lock);
1129 rq->clock_update_flags &= ~RQCF_REQ_SKIP;
1130 }
1131
1132 struct rq_flags {
1133 unsigned long flags;
1134 struct pin_cookie cookie;
1135 #ifdef CONFIG_SCHED_DEBUG
1136
1137
1138
1139
1140
1141 unsigned int clock_update_flags;
1142 #endif
1143 };
1144
1145 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf)
1146 {
1147 rf->cookie = lockdep_pin_lock(&rq->lock);
1148
1149 #ifdef CONFIG_SCHED_DEBUG
1150 rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
1151 rf->clock_update_flags = 0;
1152 #endif
1153 }
1154
1155 static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf)
1156 {
1157 #ifdef CONFIG_SCHED_DEBUG
1158 if (rq->clock_update_flags > RQCF_ACT_SKIP)
1159 rf->clock_update_flags = RQCF_UPDATED;
1160 #endif
1161
1162 lockdep_unpin_lock(&rq->lock, rf->cookie);
1163 }
1164
1165 static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf)
1166 {
1167 lockdep_repin_lock(&rq->lock, rf->cookie);
1168
1169 #ifdef CONFIG_SCHED_DEBUG
1170
1171
1172
1173 rq->clock_update_flags |= rf->clock_update_flags;
1174 #endif
1175 }
1176
1177 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1178 __acquires(rq->lock);
1179
1180 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1181 __acquires(p->pi_lock)
1182 __acquires(rq->lock);
1183
1184 static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
1185 __releases(rq->lock)
1186 {
1187 rq_unpin_lock(rq, rf);
1188 raw_spin_unlock(&rq->lock);
1189 }
1190
1191 static inline void
1192 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1193 __releases(rq->lock)
1194 __releases(p->pi_lock)
1195 {
1196 rq_unpin_lock(rq, rf);
1197 raw_spin_unlock(&rq->lock);
1198 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
1199 }
1200
1201 static inline void
1202 rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
1203 __acquires(rq->lock)
1204 {
1205 raw_spin_lock_irqsave(&rq->lock, rf->flags);
1206 rq_pin_lock(rq, rf);
1207 }
1208
1209 static inline void
1210 rq_lock_irq(struct rq *rq, struct rq_flags *rf)
1211 __acquires(rq->lock)
1212 {
1213 raw_spin_lock_irq(&rq->lock);
1214 rq_pin_lock(rq, rf);
1215 }
1216
1217 static inline void
1218 rq_lock(struct rq *rq, struct rq_flags *rf)
1219 __acquires(rq->lock)
1220 {
1221 raw_spin_lock(&rq->lock);
1222 rq_pin_lock(rq, rf);
1223 }
1224
1225 static inline void
1226 rq_relock(struct rq *rq, struct rq_flags *rf)
1227 __acquires(rq->lock)
1228 {
1229 raw_spin_lock(&rq->lock);
1230 rq_repin_lock(rq, rf);
1231 }
1232
1233 static inline void
1234 rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
1235 __releases(rq->lock)
1236 {
1237 rq_unpin_lock(rq, rf);
1238 raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
1239 }
1240
1241 static inline void
1242 rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
1243 __releases(rq->lock)
1244 {
1245 rq_unpin_lock(rq, rf);
1246 raw_spin_unlock_irq(&rq->lock);
1247 }
1248
1249 static inline void
1250 rq_unlock(struct rq *rq, struct rq_flags *rf)
1251 __releases(rq->lock)
1252 {
1253 rq_unpin_lock(rq, rf);
1254 raw_spin_unlock(&rq->lock);
1255 }
1256
1257 static inline struct rq *
1258 this_rq_lock_irq(struct rq_flags *rf)
1259 __acquires(rq->lock)
1260 {
1261 struct rq *rq;
1262
1263 local_irq_disable();
1264 rq = this_rq();
1265 rq_lock(rq, rf);
1266 return rq;
1267 }
1268
1269 #ifdef CONFIG_NUMA
1270 enum numa_topology_type {
1271 NUMA_DIRECT,
1272 NUMA_GLUELESS_MESH,
1273 NUMA_BACKPLANE,
1274 };
1275 extern enum numa_topology_type sched_numa_topology_type;
1276 extern int sched_max_numa_distance;
1277 extern bool find_numa_distance(int distance);
1278 extern void sched_init_numa(void);
1279 extern void sched_domains_numa_masks_set(unsigned int cpu);
1280 extern void sched_domains_numa_masks_clear(unsigned int cpu);
1281 extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu);
1282 #else
1283 static inline void sched_init_numa(void) { }
1284 static inline void sched_domains_numa_masks_set(unsigned int cpu) { }
1285 static inline void sched_domains_numa_masks_clear(unsigned int cpu) { }
1286 static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
1287 {
1288 return nr_cpu_ids;
1289 }
1290 #endif
1291
1292 #ifdef CONFIG_NUMA_BALANCING
1293
1294 enum numa_faults_stats {
1295 NUMA_MEM = 0,
1296 NUMA_CPU,
1297 NUMA_MEMBUF,
1298 NUMA_CPUBUF
1299 };
1300 extern void sched_setnuma(struct task_struct *p, int node);
1301 extern int migrate_task_to(struct task_struct *p, int cpu);
1302 extern int migrate_swap(struct task_struct *p, struct task_struct *t,
1303 int cpu, int scpu);
1304 extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p);
1305 #else
1306 static inline void
1307 init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
1308 {
1309 }
1310 #endif
1311
1312 #ifdef CONFIG_SMP
1313
1314 static inline void
1315 queue_balance_callback(struct rq *rq,
1316 struct callback_head *head,
1317 void (*func)(struct rq *rq))
1318 {
1319 lockdep_assert_held(&rq->lock);
1320
1321 if (unlikely(head->next))
1322 return;
1323
1324 head->func = (void (*)(struct callback_head *))func;
1325 head->next = rq->balance_callback;
1326 rq->balance_callback = head;
1327 }
1328
1329 extern void sched_ttwu_pending(void);
1330
1331 #define rcu_dereference_check_sched_domain(p) \
1332 rcu_dereference_check((p), \
1333 lockdep_is_held(&sched_domains_mutex))
1334
1335
1336
1337
1338
1339
1340
1341
1342 #define for_each_domain(cpu, __sd) \
1343 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
1344 __sd; __sd = __sd->parent)
1345
1346 #define for_each_lower_domain(sd) for (; sd; sd = sd->child)
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357 static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
1358 {
1359 struct sched_domain *sd, *hsd = NULL;
1360
1361 for_each_domain(cpu, sd) {
1362 if (!(sd->flags & flag))
1363 break;
1364 hsd = sd;
1365 }
1366
1367 return hsd;
1368 }
1369
1370 static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
1371 {
1372 struct sched_domain *sd;
1373
1374 for_each_domain(cpu, sd) {
1375 if (sd->flags & flag)
1376 break;
1377 }
1378
1379 return sd;
1380 }
1381
1382 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc);
1383 DECLARE_PER_CPU(int, sd_llc_size);
1384 DECLARE_PER_CPU(int, sd_llc_id);
1385 DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared);
1386 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa);
1387 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
1388 DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity);
1389 extern struct static_key_false sched_asym_cpucapacity;
1390
1391 struct sched_group_capacity {
1392 atomic_t ref;
1393
1394
1395
1396
1397 unsigned long capacity;
1398 unsigned long min_capacity;
1399 unsigned long max_capacity;
1400 unsigned long next_update;
1401 int imbalance;
1402
1403 #ifdef CONFIG_SCHED_DEBUG
1404 int id;
1405 #endif
1406
1407 unsigned long cpumask[0];
1408 };
1409
1410 struct sched_group {
1411 struct sched_group *next;
1412 atomic_t ref;
1413
1414 unsigned int group_weight;
1415 struct sched_group_capacity *sgc;
1416 int asym_prefer_cpu;
1417
1418
1419
1420
1421
1422
1423
1424
1425 unsigned long cpumask[0];
1426 };
1427
1428 static inline struct cpumask *sched_group_span(struct sched_group *sg)
1429 {
1430 return to_cpumask(sg->cpumask);
1431 }
1432
1433
1434
1435
1436 static inline struct cpumask *group_balance_mask(struct sched_group *sg)
1437 {
1438 return to_cpumask(sg->sgc->cpumask);
1439 }
1440
1441
1442
1443
1444
1445 static inline unsigned int group_first_cpu(struct sched_group *group)
1446 {
1447 return cpumask_first(sched_group_span(group));
1448 }
1449
1450 extern int group_balance_cpu(struct sched_group *sg);
1451
1452 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
1453 void register_sched_domain_sysctl(void);
1454 void dirty_sched_domain_sysctl(int cpu);
1455 void unregister_sched_domain_sysctl(void);
1456 #else
1457 static inline void register_sched_domain_sysctl(void)
1458 {
1459 }
1460 static inline void dirty_sched_domain_sysctl(int cpu)
1461 {
1462 }
1463 static inline void unregister_sched_domain_sysctl(void)
1464 {
1465 }
1466 #endif
1467
1468 extern int newidle_balance(struct rq *this_rq, struct rq_flags *rf);
1469
1470 #else
1471
1472 static inline void sched_ttwu_pending(void) { }
1473
1474 static inline int newidle_balance(struct rq *this_rq, struct rq_flags *rf) { return 0; }
1475
1476 #endif
1477
1478 #include "stats.h"
1479 #include "autogroup.h"
1480
1481 #ifdef CONFIG_CGROUP_SCHED
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496 static inline struct task_group *task_group(struct task_struct *p)
1497 {
1498 return p->sched_task_group;
1499 }
1500
1501
1502 static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
1503 {
1504 #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
1505 struct task_group *tg = task_group(p);
1506 #endif
1507
1508 #ifdef CONFIG_FAIR_GROUP_SCHED
1509 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
1510 p->se.cfs_rq = tg->cfs_rq[cpu];
1511 p->se.parent = tg->se[cpu];
1512 #endif
1513
1514 #ifdef CONFIG_RT_GROUP_SCHED
1515 p->rt.rt_rq = tg->rt_rq[cpu];
1516 p->rt.parent = tg->rt_se[cpu];
1517 #endif
1518 }
1519
1520 #else
1521
1522 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
1523 static inline struct task_group *task_group(struct task_struct *p)
1524 {
1525 return NULL;
1526 }
1527
1528 #endif
1529
1530 static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1531 {
1532 set_task_rq(p, cpu);
1533 #ifdef CONFIG_SMP
1534
1535
1536
1537
1538
1539 smp_wmb();
1540 #ifdef CONFIG_THREAD_INFO_IN_TASK
1541 WRITE_ONCE(p->cpu, cpu);
1542 #else
1543 WRITE_ONCE(task_thread_info(p)->cpu, cpu);
1544 #endif
1545 p->wake_cpu = cpu;
1546 #endif
1547 }
1548
1549
1550
1551
1552 #ifdef CONFIG_SCHED_DEBUG
1553 # include <linux/static_key.h>
1554 # define const_debug __read_mostly
1555 #else
1556 # define const_debug const
1557 #endif
1558
1559 #define SCHED_FEAT(name, enabled) \
1560 __SCHED_FEAT_##name ,
1561
1562 enum {
1563 #include "features.h"
1564 __SCHED_FEAT_NR,
1565 };
1566
1567 #undef SCHED_FEAT
1568
1569 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL)
1570
1571
1572
1573
1574
1575 extern const_debug unsigned int sysctl_sched_features;
1576
1577 #define SCHED_FEAT(name, enabled) \
1578 static __always_inline bool static_branch_##name(struct static_key *key) \
1579 { \
1580 return static_key_##enabled(key); \
1581 }
1582
1583 #include "features.h"
1584 #undef SCHED_FEAT
1585
1586 extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
1587 #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
1588
1589 #else
1590
1591
1592
1593
1594
1595
1596 #define SCHED_FEAT(name, enabled) \
1597 (1UL << __SCHED_FEAT_##name) * enabled |
1598 static const_debug __maybe_unused unsigned int sysctl_sched_features =
1599 #include "features.h"
1600 0;
1601 #undef SCHED_FEAT
1602
1603 #define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
1604
1605 #endif
1606
1607 extern struct static_key_false sched_numa_balancing;
1608 extern struct static_key_false sched_schedstats;
1609
1610 static inline u64 global_rt_period(void)
1611 {
1612 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
1613 }
1614
1615 static inline u64 global_rt_runtime(void)
1616 {
1617 if (sysctl_sched_rt_runtime < 0)
1618 return RUNTIME_INF;
1619
1620 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
1621 }
1622
1623 static inline int task_current(struct rq *rq, struct task_struct *p)
1624 {
1625 return rq->curr == p;
1626 }
1627
1628 static inline int task_running(struct rq *rq, struct task_struct *p)
1629 {
1630 #ifdef CONFIG_SMP
1631 return p->on_cpu;
1632 #else
1633 return task_current(rq, p);
1634 #endif
1635 }
1636
1637 static inline int task_on_rq_queued(struct task_struct *p)
1638 {
1639 return p->on_rq == TASK_ON_RQ_QUEUED;
1640 }
1641
1642 static inline int task_on_rq_migrating(struct task_struct *p)
1643 {
1644 return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING;
1645 }
1646
1647
1648
1649
1650 #define WF_SYNC 0x01
1651 #define WF_FORK 0x02
1652 #define WF_MIGRATED 0x4
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663 #define WEIGHT_IDLEPRIO 3
1664 #define WMULT_IDLEPRIO 1431655765
1665
1666 extern const int sched_prio_to_weight[40];
1667 extern const u32 sched_prio_to_wmult[40];
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688 #define DEQUEUE_SLEEP 0x01
1689 #define DEQUEUE_SAVE 0x02
1690 #define DEQUEUE_MOVE 0x04
1691 #define DEQUEUE_NOCLOCK 0x08
1692
1693 #define ENQUEUE_WAKEUP 0x01
1694 #define ENQUEUE_RESTORE 0x02
1695 #define ENQUEUE_MOVE 0x04
1696 #define ENQUEUE_NOCLOCK 0x08
1697
1698 #define ENQUEUE_HEAD 0x10
1699 #define ENQUEUE_REPLENISH 0x20
1700 #ifdef CONFIG_SMP
1701 #define ENQUEUE_MIGRATED 0x40
1702 #else
1703 #define ENQUEUE_MIGRATED 0x00
1704 #endif
1705
1706 #define RETRY_TASK ((void *)-1UL)
1707
1708 struct sched_class {
1709 const struct sched_class *next;
1710
1711 #ifdef CONFIG_UCLAMP_TASK
1712 int uclamp_enabled;
1713 #endif
1714
1715 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1716 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1717 void (*yield_task) (struct rq *rq);
1718 bool (*yield_to_task)(struct rq *rq, struct task_struct *p, bool preempt);
1719
1720 void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags);
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733 struct task_struct * (*pick_next_task)(struct rq *rq,
1734 struct task_struct *prev,
1735 struct rq_flags *rf);
1736 void (*put_prev_task)(struct rq *rq, struct task_struct *p);
1737 void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first);
1738
1739 #ifdef CONFIG_SMP
1740 int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
1741 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
1742 void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
1743
1744 void (*task_woken)(struct rq *this_rq, struct task_struct *task);
1745
1746 void (*set_cpus_allowed)(struct task_struct *p,
1747 const struct cpumask *newmask);
1748
1749 void (*rq_online)(struct rq *rq);
1750 void (*rq_offline)(struct rq *rq);
1751 #endif
1752
1753 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
1754 void (*task_fork)(struct task_struct *p);
1755 void (*task_dead)(struct task_struct *p);
1756
1757
1758
1759
1760
1761
1762 void (*switched_from)(struct rq *this_rq, struct task_struct *task);
1763 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1764 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1765 int oldprio);
1766
1767 unsigned int (*get_rr_interval)(struct rq *rq,
1768 struct task_struct *task);
1769
1770 void (*update_curr)(struct rq *rq);
1771
1772 #define TASK_SET_GROUP 0
1773 #define TASK_MOVE_GROUP 1
1774
1775 #ifdef CONFIG_FAIR_GROUP_SCHED
1776 void (*task_change_group)(struct task_struct *p, int type);
1777 #endif
1778 };
1779
1780 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
1781 {
1782 WARN_ON_ONCE(rq->curr != prev);
1783 prev->sched_class->put_prev_task(rq, prev);
1784 }
1785
1786 static inline void set_next_task(struct rq *rq, struct task_struct *next)
1787 {
1788 WARN_ON_ONCE(rq->curr != next);
1789 next->sched_class->set_next_task(rq, next, false);
1790 }
1791
1792 #ifdef CONFIG_SMP
1793 #define sched_class_highest (&stop_sched_class)
1794 #else
1795 #define sched_class_highest (&dl_sched_class)
1796 #endif
1797
1798 #define for_class_range(class, _from, _to) \
1799 for (class = (_from); class != (_to); class = class->next)
1800
1801 #define for_each_class(class) \
1802 for_class_range(class, sched_class_highest, NULL)
1803
1804 extern const struct sched_class stop_sched_class;
1805 extern const struct sched_class dl_sched_class;
1806 extern const struct sched_class rt_sched_class;
1807 extern const struct sched_class fair_sched_class;
1808 extern const struct sched_class idle_sched_class;
1809
1810 static inline bool sched_stop_runnable(struct rq *rq)
1811 {
1812 return rq->stop && task_on_rq_queued(rq->stop);
1813 }
1814
1815 static inline bool sched_dl_runnable(struct rq *rq)
1816 {
1817 return rq->dl.dl_nr_running > 0;
1818 }
1819
1820 static inline bool sched_rt_runnable(struct rq *rq)
1821 {
1822 return rq->rt.rt_queued > 0;
1823 }
1824
1825 static inline bool sched_fair_runnable(struct rq *rq)
1826 {
1827 return rq->cfs.nr_running > 0;
1828 }
1829
1830 #ifdef CONFIG_SMP
1831
1832 extern void update_group_capacity(struct sched_domain *sd, int cpu);
1833
1834 extern void trigger_load_balance(struct rq *rq);
1835
1836 extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
1837
1838 #endif
1839
1840 #ifdef CONFIG_CPU_IDLE
1841 static inline void idle_set_state(struct rq *rq,
1842 struct cpuidle_state *idle_state)
1843 {
1844 rq->idle_state = idle_state;
1845 }
1846
1847 static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1848 {
1849 SCHED_WARN_ON(!rcu_read_lock_held());
1850
1851 return rq->idle_state;
1852 }
1853 #else
1854 static inline void idle_set_state(struct rq *rq,
1855 struct cpuidle_state *idle_state)
1856 {
1857 }
1858
1859 static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1860 {
1861 return NULL;
1862 }
1863 #endif
1864
1865 extern void schedule_idle(void);
1866
1867 extern void sysrq_sched_debug_show(void);
1868 extern void sched_init_granularity(void);
1869 extern void update_max_interval(void);
1870
1871 extern void init_sched_dl_class(void);
1872 extern void init_sched_rt_class(void);
1873 extern void init_sched_fair_class(void);
1874
1875 extern void reweight_task(struct task_struct *p, int prio);
1876
1877 extern void resched_curr(struct rq *rq);
1878 extern void resched_cpu(int cpu);
1879
1880 extern struct rt_bandwidth def_rt_bandwidth;
1881 extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
1882
1883 extern struct dl_bandwidth def_dl_bandwidth;
1884 extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
1885 extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
1886 extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se);
1887 extern void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
1888
1889 #define BW_SHIFT 20
1890 #define BW_UNIT (1 << BW_SHIFT)
1891 #define RATIO_SHIFT 8
1892 unsigned long to_ratio(u64 period, u64 runtime);
1893
1894 extern void init_entity_runnable_average(struct sched_entity *se);
1895 extern void post_init_entity_util_avg(struct task_struct *p);
1896
1897 #ifdef CONFIG_NO_HZ_FULL
1898 extern bool sched_can_stop_tick(struct rq *rq);
1899 extern int __init sched_tick_offload_init(void);
1900
1901
1902
1903
1904
1905
1906 static inline void sched_update_tick_dependency(struct rq *rq)
1907 {
1908 int cpu;
1909
1910 if (!tick_nohz_full_enabled())
1911 return;
1912
1913 cpu = cpu_of(rq);
1914
1915 if (!tick_nohz_full_cpu(cpu))
1916 return;
1917
1918 if (sched_can_stop_tick(rq))
1919 tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
1920 else
1921 tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
1922 }
1923 #else
1924 static inline int sched_tick_offload_init(void) { return 0; }
1925 static inline void sched_update_tick_dependency(struct rq *rq) { }
1926 #endif
1927
1928 static inline void add_nr_running(struct rq *rq, unsigned count)
1929 {
1930 unsigned prev_nr = rq->nr_running;
1931
1932 rq->nr_running = prev_nr + count;
1933
1934 #ifdef CONFIG_SMP
1935 if (prev_nr < 2 && rq->nr_running >= 2) {
1936 if (!READ_ONCE(rq->rd->overload))
1937 WRITE_ONCE(rq->rd->overload, 1);
1938 }
1939 #endif
1940
1941 sched_update_tick_dependency(rq);
1942 }
1943
1944 static inline void sub_nr_running(struct rq *rq, unsigned count)
1945 {
1946 rq->nr_running -= count;
1947
1948 sched_update_tick_dependency(rq);
1949 }
1950
1951 extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
1952 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
1953
1954 extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
1955
1956 extern const_debug unsigned int sysctl_sched_nr_migrate;
1957 extern const_debug unsigned int sysctl_sched_migration_cost;
1958
1959 #ifdef CONFIG_SCHED_HRTICK
1960
1961
1962
1963
1964
1965
1966 static inline int hrtick_enabled(struct rq *rq)
1967 {
1968 if (!sched_feat(HRTICK))
1969 return 0;
1970 if (!cpu_active(cpu_of(rq)))
1971 return 0;
1972 return hrtimer_is_hres_active(&rq->hrtick_timer);
1973 }
1974
1975 void hrtick_start(struct rq *rq, u64 delay);
1976
1977 #else
1978
1979 static inline int hrtick_enabled(struct rq *rq)
1980 {
1981 return 0;
1982 }
1983
1984 #endif
1985
1986 #ifndef arch_scale_freq_capacity
1987 static __always_inline
1988 unsigned long arch_scale_freq_capacity(int cpu)
1989 {
1990 return SCHED_CAPACITY_SCALE;
1991 }
1992 #endif
1993
1994 #ifdef CONFIG_SMP
1995 #ifdef CONFIG_PREEMPTION
1996
1997 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
2008 __releases(this_rq->lock)
2009 __acquires(busiest->lock)
2010 __acquires(this_rq->lock)
2011 {
2012 raw_spin_unlock(&this_rq->lock);
2013 double_rq_lock(this_rq, busiest);
2014
2015 return 1;
2016 }
2017
2018 #else
2019
2020
2021
2022
2023
2024
2025
2026 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
2027 __releases(this_rq->lock)
2028 __acquires(busiest->lock)
2029 __acquires(this_rq->lock)
2030 {
2031 int ret = 0;
2032
2033 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
2034 if (busiest < this_rq) {
2035 raw_spin_unlock(&this_rq->lock);
2036 raw_spin_lock(&busiest->lock);
2037 raw_spin_lock_nested(&this_rq->lock,
2038 SINGLE_DEPTH_NESTING);
2039 ret = 1;
2040 } else
2041 raw_spin_lock_nested(&busiest->lock,
2042 SINGLE_DEPTH_NESTING);
2043 }
2044 return ret;
2045 }
2046
2047 #endif
2048
2049
2050
2051
2052 static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
2053 {
2054 if (unlikely(!irqs_disabled())) {
2055
2056 raw_spin_unlock(&this_rq->lock);
2057 BUG_ON(1);
2058 }
2059
2060 return _double_lock_balance(this_rq, busiest);
2061 }
2062
2063 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
2064 __releases(busiest->lock)
2065 {
2066 raw_spin_unlock(&busiest->lock);
2067 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
2068 }
2069
2070 static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
2071 {
2072 if (l1 > l2)
2073 swap(l1, l2);
2074
2075 spin_lock(l1);
2076 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
2077 }
2078
2079 static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2)
2080 {
2081 if (l1 > l2)
2082 swap(l1, l2);
2083
2084 spin_lock_irq(l1);
2085 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
2086 }
2087
2088 static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
2089 {
2090 if (l1 > l2)
2091 swap(l1, l2);
2092
2093 raw_spin_lock(l1);
2094 raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
2095 }
2096
2097
2098
2099
2100
2101
2102
2103 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
2104 __acquires(rq1->lock)
2105 __acquires(rq2->lock)
2106 {
2107 BUG_ON(!irqs_disabled());
2108 if (rq1 == rq2) {
2109 raw_spin_lock(&rq1->lock);
2110 __acquire(rq2->lock);
2111 } else {
2112 if (rq1 < rq2) {
2113 raw_spin_lock(&rq1->lock);
2114 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
2115 } else {
2116 raw_spin_lock(&rq2->lock);
2117 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
2118 }
2119 }
2120 }
2121
2122
2123
2124
2125
2126
2127
2128 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
2129 __releases(rq1->lock)
2130 __releases(rq2->lock)
2131 {
2132 raw_spin_unlock(&rq1->lock);
2133 if (rq1 != rq2)
2134 raw_spin_unlock(&rq2->lock);
2135 else
2136 __release(rq2->lock);
2137 }
2138
2139 extern void set_rq_online (struct rq *rq);
2140 extern void set_rq_offline(struct rq *rq);
2141 extern bool sched_smp_initialized;
2142
2143 #else
2144
2145
2146
2147
2148
2149
2150
2151 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
2152 __acquires(rq1->lock)
2153 __acquires(rq2->lock)
2154 {
2155 BUG_ON(!irqs_disabled());
2156 BUG_ON(rq1 != rq2);
2157 raw_spin_lock(&rq1->lock);
2158 __acquire(rq2->lock);
2159 }
2160
2161
2162
2163
2164
2165
2166
2167 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
2168 __releases(rq1->lock)
2169 __releases(rq2->lock)
2170 {
2171 BUG_ON(rq1 != rq2);
2172 raw_spin_unlock(&rq1->lock);
2173 __release(rq2->lock);
2174 }
2175
2176 #endif
2177
2178 extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
2179 extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
2180
2181 #ifdef CONFIG_SCHED_DEBUG
2182 extern bool sched_debug_enabled;
2183
2184 extern void print_cfs_stats(struct seq_file *m, int cpu);
2185 extern void print_rt_stats(struct seq_file *m, int cpu);
2186 extern void print_dl_stats(struct seq_file *m, int cpu);
2187 extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
2188 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2189 extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
2190 #ifdef CONFIG_NUMA_BALANCING
2191 extern void
2192 show_numa_stats(struct task_struct *p, struct seq_file *m);
2193 extern void
2194 print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
2195 unsigned long tpf, unsigned long gsf, unsigned long gpf);
2196 #endif
2197 #endif
2198
2199 extern void init_cfs_rq(struct cfs_rq *cfs_rq);
2200 extern void init_rt_rq(struct rt_rq *rt_rq);
2201 extern void init_dl_rq(struct dl_rq *dl_rq);
2202
2203 extern void cfs_bandwidth_usage_inc(void);
2204 extern void cfs_bandwidth_usage_dec(void);
2205
2206 #ifdef CONFIG_NO_HZ_COMMON
2207 #define NOHZ_BALANCE_KICK_BIT 0
2208 #define NOHZ_STATS_KICK_BIT 1
2209
2210 #define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT)
2211 #define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT)
2212
2213 #define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK)
2214
2215 #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
2216
2217 extern void nohz_balance_exit_idle(struct rq *rq);
2218 #else
2219 static inline void nohz_balance_exit_idle(struct rq *rq) { }
2220 #endif
2221
2222
2223 #ifdef CONFIG_SMP
2224 static inline
2225 void __dl_update(struct dl_bw *dl_b, s64 bw)
2226 {
2227 struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw);
2228 int i;
2229
2230 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
2231 "sched RCU must be held");
2232 for_each_cpu_and(i, rd->span, cpu_active_mask) {
2233 struct rq *rq = cpu_rq(i);
2234
2235 rq->dl.extra_bw += bw;
2236 }
2237 }
2238 #else
2239 static inline
2240 void __dl_update(struct dl_bw *dl_b, s64 bw)
2241 {
2242 struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw);
2243
2244 dl->extra_bw += bw;
2245 }
2246 #endif
2247
2248
2249 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
2250 struct irqtime {
2251 u64 total;
2252 u64 tick_delta;
2253 u64 irq_start_time;
2254 struct u64_stats_sync sync;
2255 };
2256
2257 DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
2258
2259
2260
2261
2262
2263
2264 static inline u64 irq_time_read(int cpu)
2265 {
2266 struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
2267 unsigned int seq;
2268 u64 total;
2269
2270 do {
2271 seq = __u64_stats_fetch_begin(&irqtime->sync);
2272 total = irqtime->total;
2273 } while (__u64_stats_fetch_retry(&irqtime->sync, seq));
2274
2275 return total;
2276 }
2277 #endif
2278
2279 #ifdef CONFIG_CPU_FREQ
2280 DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
2305 {
2306 struct update_util_data *data;
2307
2308 data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
2309 cpu_of(rq)));
2310 if (data)
2311 data->func(data, rq_clock(rq), flags);
2312 }
2313 #else
2314 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
2315 #endif
2316
2317 #ifdef CONFIG_UCLAMP_TASK
2318 unsigned int uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id);
2319
2320 static __always_inline
2321 unsigned int uclamp_util_with(struct rq *rq, unsigned int util,
2322 struct task_struct *p)
2323 {
2324 unsigned int min_util = READ_ONCE(rq->uclamp[UCLAMP_MIN].value);
2325 unsigned int max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value);
2326
2327 if (p) {
2328 min_util = max(min_util, uclamp_eff_value(p, UCLAMP_MIN));
2329 max_util = max(max_util, uclamp_eff_value(p, UCLAMP_MAX));
2330 }
2331
2332
2333
2334
2335
2336
2337 if (unlikely(min_util >= max_util))
2338 return min_util;
2339
2340 return clamp(util, min_util, max_util);
2341 }
2342
2343 static inline unsigned int uclamp_util(struct rq *rq, unsigned int util)
2344 {
2345 return uclamp_util_with(rq, util, NULL);
2346 }
2347 #else
2348 static inline unsigned int uclamp_util_with(struct rq *rq, unsigned int util,
2349 struct task_struct *p)
2350 {
2351 return util;
2352 }
2353 static inline unsigned int uclamp_util(struct rq *rq, unsigned int util)
2354 {
2355 return util;
2356 }
2357 #endif
2358
2359 #ifdef arch_scale_freq_capacity
2360 # ifndef arch_scale_freq_invariant
2361 # define arch_scale_freq_invariant() true
2362 # endif
2363 #else
2364 # define arch_scale_freq_invariant() false
2365 #endif
2366
2367 #ifdef CONFIG_SMP
2368 static inline unsigned long capacity_orig_of(int cpu)
2369 {
2370 return cpu_rq(cpu)->cpu_capacity_orig;
2371 }
2372 #endif
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384 enum schedutil_type {
2385 FREQUENCY_UTIL,
2386 ENERGY_UTIL,
2387 };
2388
2389 #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
2390
2391 unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
2392 unsigned long max, enum schedutil_type type,
2393 struct task_struct *p);
2394
2395 static inline unsigned long cpu_bw_dl(struct rq *rq)
2396 {
2397 return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT;
2398 }
2399
2400 static inline unsigned long cpu_util_dl(struct rq *rq)
2401 {
2402 return READ_ONCE(rq->avg_dl.util_avg);
2403 }
2404
2405 static inline unsigned long cpu_util_cfs(struct rq *rq)
2406 {
2407 unsigned long util = READ_ONCE(rq->cfs.avg.util_avg);
2408
2409 if (sched_feat(UTIL_EST)) {
2410 util = max_t(unsigned long, util,
2411 READ_ONCE(rq->cfs.avg.util_est.enqueued));
2412 }
2413
2414 return util;
2415 }
2416
2417 static inline unsigned long cpu_util_rt(struct rq *rq)
2418 {
2419 return READ_ONCE(rq->avg_rt.util_avg);
2420 }
2421 #else
2422 static inline unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
2423 unsigned long max, enum schedutil_type type,
2424 struct task_struct *p)
2425 {
2426 return 0;
2427 }
2428 #endif
2429
2430 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
2431 static inline unsigned long cpu_util_irq(struct rq *rq)
2432 {
2433 return rq->avg_irq.util_avg;
2434 }
2435
2436 static inline
2437 unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max)
2438 {
2439 util *= (max - irq);
2440 util /= max;
2441
2442 return util;
2443
2444 }
2445 #else
2446 static inline unsigned long cpu_util_irq(struct rq *rq)
2447 {
2448 return 0;
2449 }
2450
2451 static inline
2452 unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max)
2453 {
2454 return util;
2455 }
2456 #endif
2457
2458 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
2459
2460 #define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus)))
2461
2462 DECLARE_STATIC_KEY_FALSE(sched_energy_present);
2463
2464 static inline bool sched_energy_enabled(void)
2465 {
2466 return static_branch_unlikely(&sched_energy_present);
2467 }
2468
2469 #else
2470
2471 #define perf_domain_span(pd) NULL
2472 static inline bool sched_energy_enabled(void) { return false; }
2473
2474 #endif
2475
2476 #ifdef CONFIG_MEMBARRIER
2477
2478
2479
2480
2481
2482
2483 static inline void membarrier_switch_mm(struct rq *rq,
2484 struct mm_struct *prev_mm,
2485 struct mm_struct *next_mm)
2486 {
2487 int membarrier_state;
2488
2489 if (prev_mm == next_mm)
2490 return;
2491
2492 membarrier_state = atomic_read(&next_mm->membarrier_state);
2493 if (READ_ONCE(rq->membarrier_state) == membarrier_state)
2494 return;
2495
2496 WRITE_ONCE(rq->membarrier_state, membarrier_state);
2497 }
2498 #else
2499 static inline void membarrier_switch_mm(struct rq *rq,
2500 struct mm_struct *prev_mm,
2501 struct mm_struct *next_mm)
2502 {
2503 }
2504 #endif