root/kernel/sched/deadline.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. dl_task_of
  2. rq_of_dl_rq
  3. dl_rq_of_se
  4. on_dl_rq
  5. dl_bw_of
  6. dl_bw_cpus
  7. dl_bw_of
  8. dl_bw_cpus
  9. __add_running_bw
  10. __sub_running_bw
  11. __add_rq_bw
  12. __sub_rq_bw
  13. add_rq_bw
  14. sub_rq_bw
  15. add_running_bw
  16. sub_running_bw
  17. dl_change_utilization
  18. task_non_contending
  19. task_contending
  20. is_leftmost
  21. init_dl_bandwidth
  22. init_dl_bw
  23. init_dl_rq
  24. dl_overloaded
  25. dl_set_overload
  26. dl_clear_overload
  27. update_dl_migration
  28. inc_dl_migration
  29. dec_dl_migration
  30. enqueue_pushable_dl_task
  31. dequeue_pushable_dl_task
  32. has_pushable_dl_tasks
  33. need_pull_dl_task
  34. deadline_queue_push_tasks
  35. deadline_queue_pull_task
  36. dl_task_offline_migration
  37. enqueue_pushable_dl_task
  38. dequeue_pushable_dl_task
  39. inc_dl_migration
  40. dec_dl_migration
  41. need_pull_dl_task
  42. pull_dl_task
  43. deadline_queue_push_tasks
  44. deadline_queue_pull_task
  45. setup_new_dl_entity
  46. replenish_dl_entity
  47. dl_entity_overflow
  48. update_dl_revised_wakeup
  49. dl_is_implicit
  50. update_dl_entity
  51. dl_next_period
  52. start_dl_timer
  53. dl_task_timer
  54. init_dl_task_timer
  55. dl_check_constrained_dl
  56. dl_runtime_exceeded
  57. grub_reclaim
  58. update_curr_dl
  59. inactive_task_timer
  60. init_dl_inactive_task_timer
  61. inc_dl_deadline
  62. dec_dl_deadline
  63. inc_dl_deadline
  64. dec_dl_deadline
  65. inc_dl_tasks
  66. dec_dl_tasks
  67. __enqueue_dl_entity
  68. __dequeue_dl_entity
  69. enqueue_dl_entity
  70. dequeue_dl_entity
  71. enqueue_task_dl
  72. __dequeue_task_dl
  73. dequeue_task_dl
  74. yield_task_dl
  75. select_task_rq_dl
  76. migrate_task_rq_dl
  77. check_preempt_equal_dl
  78. balance_dl
  79. check_preempt_curr_dl
  80. start_hrtick_dl
  81. start_hrtick_dl
  82. set_next_task_dl
  83. pick_next_dl_entity
  84. pick_next_task_dl
  85. put_prev_task_dl
  86. task_tick_dl
  87. task_fork_dl
  88. pick_dl_task
  89. pick_earliest_pushable_dl_task
  90. find_later_rq
  91. find_lock_later_rq
  92. pick_next_pushable_dl_task
  93. push_dl_task
  94. push_dl_tasks
  95. pull_dl_task
  96. task_woken_dl
  97. set_cpus_allowed_dl
  98. rq_online_dl
  99. rq_offline_dl
  100. init_sched_dl_class
  101. dl_add_task_root_domain
  102. dl_clear_root_domain
  103. switched_from_dl
  104. switched_to_dl
  105. prio_changed_dl
  106. sched_dl_global_validate
  107. init_dl_rq_bw_ratio
  108. sched_dl_do_global
  109. sched_dl_overflow
  110. __setparam_dl
  111. __getparam_dl
  112. __checkparam_dl
  113. __dl_clear_params
  114. dl_param_changed
  115. dl_task_can_attach
  116. dl_cpuset_cpumask_can_shrink
  117. dl_cpu_busy
  118. print_dl_stats

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Deadline Scheduling Class (SCHED_DEADLINE)
   4  *
   5  * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
   6  *
   7  * Tasks that periodically executes their instances for less than their
   8  * runtime won't miss any of their deadlines.
   9  * Tasks that are not periodic or sporadic or that tries to execute more
  10  * than their reserved bandwidth will be slowed down (and may potentially
  11  * miss some of their deadlines), and won't affect any other task.
  12  *
  13  * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
  14  *                    Juri Lelli <juri.lelli@gmail.com>,
  15  *                    Michael Trimarchi <michael@amarulasolutions.com>,
  16  *                    Fabio Checconi <fchecconi@gmail.com>
  17  */
  18 #include "sched.h"
  19 #include "pelt.h"
  20 
  21 struct dl_bandwidth def_dl_bandwidth;
  22 
  23 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
  24 {
  25         return container_of(dl_se, struct task_struct, dl);
  26 }
  27 
  28 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
  29 {
  30         return container_of(dl_rq, struct rq, dl);
  31 }
  32 
  33 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
  34 {
  35         struct task_struct *p = dl_task_of(dl_se);
  36         struct rq *rq = task_rq(p);
  37 
  38         return &rq->dl;
  39 }
  40 
  41 static inline int on_dl_rq(struct sched_dl_entity *dl_se)
  42 {
  43         return !RB_EMPTY_NODE(&dl_se->rb_node);
  44 }
  45 
  46 #ifdef CONFIG_SMP
  47 static inline struct dl_bw *dl_bw_of(int i)
  48 {
  49         RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
  50                          "sched RCU must be held");
  51         return &cpu_rq(i)->rd->dl_bw;
  52 }
  53 
  54 static inline int dl_bw_cpus(int i)
  55 {
  56         struct root_domain *rd = cpu_rq(i)->rd;
  57         int cpus = 0;
  58 
  59         RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
  60                          "sched RCU must be held");
  61         for_each_cpu_and(i, rd->span, cpu_active_mask)
  62                 cpus++;
  63 
  64         return cpus;
  65 }
  66 #else
  67 static inline struct dl_bw *dl_bw_of(int i)
  68 {
  69         return &cpu_rq(i)->dl.dl_bw;
  70 }
  71 
  72 static inline int dl_bw_cpus(int i)
  73 {
  74         return 1;
  75 }
  76 #endif
  77 
  78 static inline
  79 void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
  80 {
  81         u64 old = dl_rq->running_bw;
  82 
  83         lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
  84         dl_rq->running_bw += dl_bw;
  85         SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
  86         SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
  87         /* kick cpufreq (see the comment in kernel/sched/sched.h). */
  88         cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
  89 }
  90 
  91 static inline
  92 void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
  93 {
  94         u64 old = dl_rq->running_bw;
  95 
  96         lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
  97         dl_rq->running_bw -= dl_bw;
  98         SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
  99         if (dl_rq->running_bw > old)
 100                 dl_rq->running_bw = 0;
 101         /* kick cpufreq (see the comment in kernel/sched/sched.h). */
 102         cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
 103 }
 104 
 105 static inline
 106 void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
 107 {
 108         u64 old = dl_rq->this_bw;
 109 
 110         lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
 111         dl_rq->this_bw += dl_bw;
 112         SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */
 113 }
 114 
 115 static inline
 116 void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
 117 {
 118         u64 old = dl_rq->this_bw;
 119 
 120         lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
 121         dl_rq->this_bw -= dl_bw;
 122         SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */
 123         if (dl_rq->this_bw > old)
 124                 dl_rq->this_bw = 0;
 125         SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
 126 }
 127 
 128 static inline
 129 void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 130 {
 131         if (!dl_entity_is_special(dl_se))
 132                 __add_rq_bw(dl_se->dl_bw, dl_rq);
 133 }
 134 
 135 static inline
 136 void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 137 {
 138         if (!dl_entity_is_special(dl_se))
 139                 __sub_rq_bw(dl_se->dl_bw, dl_rq);
 140 }
 141 
 142 static inline
 143 void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 144 {
 145         if (!dl_entity_is_special(dl_se))
 146                 __add_running_bw(dl_se->dl_bw, dl_rq);
 147 }
 148 
 149 static inline
 150 void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 151 {
 152         if (!dl_entity_is_special(dl_se))
 153                 __sub_running_bw(dl_se->dl_bw, dl_rq);
 154 }
 155 
 156 void dl_change_utilization(struct task_struct *p, u64 new_bw)
 157 {
 158         struct rq *rq;
 159 
 160         BUG_ON(p->dl.flags & SCHED_FLAG_SUGOV);
 161 
 162         if (task_on_rq_queued(p))
 163                 return;
 164 
 165         rq = task_rq(p);
 166         if (p->dl.dl_non_contending) {
 167                 sub_running_bw(&p->dl, &rq->dl);
 168                 p->dl.dl_non_contending = 0;
 169                 /*
 170                  * If the timer handler is currently running and the
 171                  * timer cannot be cancelled, inactive_task_timer()
 172                  * will see that dl_not_contending is not set, and
 173                  * will not touch the rq's active utilization,
 174                  * so we are still safe.
 175                  */
 176                 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
 177                         put_task_struct(p);
 178         }
 179         __sub_rq_bw(p->dl.dl_bw, &rq->dl);
 180         __add_rq_bw(new_bw, &rq->dl);
 181 }
 182 
 183 /*
 184  * The utilization of a task cannot be immediately removed from
 185  * the rq active utilization (running_bw) when the task blocks.
 186  * Instead, we have to wait for the so called "0-lag time".
 187  *
 188  * If a task blocks before the "0-lag time", a timer (the inactive
 189  * timer) is armed, and running_bw is decreased when the timer
 190  * fires.
 191  *
 192  * If the task wakes up again before the inactive timer fires,
 193  * the timer is cancelled, whereas if the task wakes up after the
 194  * inactive timer fired (and running_bw has been decreased) the
 195  * task's utilization has to be added to running_bw again.
 196  * A flag in the deadline scheduling entity (dl_non_contending)
 197  * is used to avoid race conditions between the inactive timer handler
 198  * and task wakeups.
 199  *
 200  * The following diagram shows how running_bw is updated. A task is
 201  * "ACTIVE" when its utilization contributes to running_bw; an
 202  * "ACTIVE contending" task is in the TASK_RUNNING state, while an
 203  * "ACTIVE non contending" task is a blocked task for which the "0-lag time"
 204  * has not passed yet. An "INACTIVE" task is a task for which the "0-lag"
 205  * time already passed, which does not contribute to running_bw anymore.
 206  *                              +------------------+
 207  *             wakeup           |    ACTIVE        |
 208  *          +------------------>+   contending     |
 209  *          | add_running_bw    |                  |
 210  *          |                   +----+------+------+
 211  *          |                        |      ^
 212  *          |                dequeue |      |
 213  * +--------+-------+                |      |
 214  * |                |   t >= 0-lag   |      | wakeup
 215  * |    INACTIVE    |<---------------+      |
 216  * |                | sub_running_bw |      |
 217  * +--------+-------+                |      |
 218  *          ^                        |      |
 219  *          |              t < 0-lag |      |
 220  *          |                        |      |
 221  *          |                        V      |
 222  *          |                   +----+------+------+
 223  *          | sub_running_bw    |    ACTIVE        |
 224  *          +-------------------+                  |
 225  *            inactive timer    |  non contending  |
 226  *            fired             +------------------+
 227  *
 228  * The task_non_contending() function is invoked when a task
 229  * blocks, and checks if the 0-lag time already passed or
 230  * not (in the first case, it directly updates running_bw;
 231  * in the second case, it arms the inactive timer).
 232  *
 233  * The task_contending() function is invoked when a task wakes
 234  * up, and checks if the task is still in the "ACTIVE non contending"
 235  * state or not (in the second case, it updates running_bw).
 236  */
 237 static void task_non_contending(struct task_struct *p)
 238 {
 239         struct sched_dl_entity *dl_se = &p->dl;
 240         struct hrtimer *timer = &dl_se->inactive_timer;
 241         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 242         struct rq *rq = rq_of_dl_rq(dl_rq);
 243         s64 zerolag_time;
 244 
 245         /*
 246          * If this is a non-deadline task that has been boosted,
 247          * do nothing
 248          */
 249         if (dl_se->dl_runtime == 0)
 250                 return;
 251 
 252         if (dl_entity_is_special(dl_se))
 253                 return;
 254 
 255         WARN_ON(dl_se->dl_non_contending);
 256 
 257         zerolag_time = dl_se->deadline -
 258                  div64_long((dl_se->runtime * dl_se->dl_period),
 259                         dl_se->dl_runtime);
 260 
 261         /*
 262          * Using relative times instead of the absolute "0-lag time"
 263          * allows to simplify the code
 264          */
 265         zerolag_time -= rq_clock(rq);
 266 
 267         /*
 268          * If the "0-lag time" already passed, decrease the active
 269          * utilization now, instead of starting a timer
 270          */
 271         if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
 272                 if (dl_task(p))
 273                         sub_running_bw(dl_se, dl_rq);
 274                 if (!dl_task(p) || p->state == TASK_DEAD) {
 275                         struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
 276 
 277                         if (p->state == TASK_DEAD)
 278                                 sub_rq_bw(&p->dl, &rq->dl);
 279                         raw_spin_lock(&dl_b->lock);
 280                         __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
 281                         __dl_clear_params(p);
 282                         raw_spin_unlock(&dl_b->lock);
 283                 }
 284 
 285                 return;
 286         }
 287 
 288         dl_se->dl_non_contending = 1;
 289         get_task_struct(p);
 290         hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD);
 291 }
 292 
 293 static void task_contending(struct sched_dl_entity *dl_se, int flags)
 294 {
 295         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 296 
 297         /*
 298          * If this is a non-deadline task that has been boosted,
 299          * do nothing
 300          */
 301         if (dl_se->dl_runtime == 0)
 302                 return;
 303 
 304         if (flags & ENQUEUE_MIGRATED)
 305                 add_rq_bw(dl_se, dl_rq);
 306 
 307         if (dl_se->dl_non_contending) {
 308                 dl_se->dl_non_contending = 0;
 309                 /*
 310                  * If the timer handler is currently running and the
 311                  * timer cannot be cancelled, inactive_task_timer()
 312                  * will see that dl_not_contending is not set, and
 313                  * will not touch the rq's active utilization,
 314                  * so we are still safe.
 315                  */
 316                 if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1)
 317                         put_task_struct(dl_task_of(dl_se));
 318         } else {
 319                 /*
 320                  * Since "dl_non_contending" is not set, the
 321                  * task's utilization has already been removed from
 322                  * active utilization (either when the task blocked,
 323                  * when the "inactive timer" fired).
 324                  * So, add it back.
 325                  */
 326                 add_running_bw(dl_se, dl_rq);
 327         }
 328 }
 329 
 330 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
 331 {
 332         struct sched_dl_entity *dl_se = &p->dl;
 333 
 334         return dl_rq->root.rb_leftmost == &dl_se->rb_node;
 335 }
 336 
 337 void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
 338 {
 339         raw_spin_lock_init(&dl_b->dl_runtime_lock);
 340         dl_b->dl_period = period;
 341         dl_b->dl_runtime = runtime;
 342 }
 343 
 344 void init_dl_bw(struct dl_bw *dl_b)
 345 {
 346         raw_spin_lock_init(&dl_b->lock);
 347         raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
 348         if (global_rt_runtime() == RUNTIME_INF)
 349                 dl_b->bw = -1;
 350         else
 351                 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
 352         raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
 353         dl_b->total_bw = 0;
 354 }
 355 
 356 void init_dl_rq(struct dl_rq *dl_rq)
 357 {
 358         dl_rq->root = RB_ROOT_CACHED;
 359 
 360 #ifdef CONFIG_SMP
 361         /* zero means no -deadline tasks */
 362         dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
 363 
 364         dl_rq->dl_nr_migratory = 0;
 365         dl_rq->overloaded = 0;
 366         dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
 367 #else
 368         init_dl_bw(&dl_rq->dl_bw);
 369 #endif
 370 
 371         dl_rq->running_bw = 0;
 372         dl_rq->this_bw = 0;
 373         init_dl_rq_bw_ratio(dl_rq);
 374 }
 375 
 376 #ifdef CONFIG_SMP
 377 
 378 static inline int dl_overloaded(struct rq *rq)
 379 {
 380         return atomic_read(&rq->rd->dlo_count);
 381 }
 382 
 383 static inline void dl_set_overload(struct rq *rq)
 384 {
 385         if (!rq->online)
 386                 return;
 387 
 388         cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
 389         /*
 390          * Must be visible before the overload count is
 391          * set (as in sched_rt.c).
 392          *
 393          * Matched by the barrier in pull_dl_task().
 394          */
 395         smp_wmb();
 396         atomic_inc(&rq->rd->dlo_count);
 397 }
 398 
 399 static inline void dl_clear_overload(struct rq *rq)
 400 {
 401         if (!rq->online)
 402                 return;
 403 
 404         atomic_dec(&rq->rd->dlo_count);
 405         cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
 406 }
 407 
 408 static void update_dl_migration(struct dl_rq *dl_rq)
 409 {
 410         if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
 411                 if (!dl_rq->overloaded) {
 412                         dl_set_overload(rq_of_dl_rq(dl_rq));
 413                         dl_rq->overloaded = 1;
 414                 }
 415         } else if (dl_rq->overloaded) {
 416                 dl_clear_overload(rq_of_dl_rq(dl_rq));
 417                 dl_rq->overloaded = 0;
 418         }
 419 }
 420 
 421 static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 422 {
 423         struct task_struct *p = dl_task_of(dl_se);
 424 
 425         if (p->nr_cpus_allowed > 1)
 426                 dl_rq->dl_nr_migratory++;
 427 
 428         update_dl_migration(dl_rq);
 429 }
 430 
 431 static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 432 {
 433         struct task_struct *p = dl_task_of(dl_se);
 434 
 435         if (p->nr_cpus_allowed > 1)
 436                 dl_rq->dl_nr_migratory--;
 437 
 438         update_dl_migration(dl_rq);
 439 }
 440 
 441 /*
 442  * The list of pushable -deadline task is not a plist, like in
 443  * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
 444  */
 445 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 446 {
 447         struct dl_rq *dl_rq = &rq->dl;
 448         struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_root.rb_node;
 449         struct rb_node *parent = NULL;
 450         struct task_struct *entry;
 451         bool leftmost = true;
 452 
 453         BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
 454 
 455         while (*link) {
 456                 parent = *link;
 457                 entry = rb_entry(parent, struct task_struct,
 458                                  pushable_dl_tasks);
 459                 if (dl_entity_preempt(&p->dl, &entry->dl))
 460                         link = &parent->rb_left;
 461                 else {
 462                         link = &parent->rb_right;
 463                         leftmost = false;
 464                 }
 465         }
 466 
 467         if (leftmost)
 468                 dl_rq->earliest_dl.next = p->dl.deadline;
 469 
 470         rb_link_node(&p->pushable_dl_tasks, parent, link);
 471         rb_insert_color_cached(&p->pushable_dl_tasks,
 472                                &dl_rq->pushable_dl_tasks_root, leftmost);
 473 }
 474 
 475 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 476 {
 477         struct dl_rq *dl_rq = &rq->dl;
 478 
 479         if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
 480                 return;
 481 
 482         if (dl_rq->pushable_dl_tasks_root.rb_leftmost == &p->pushable_dl_tasks) {
 483                 struct rb_node *next_node;
 484 
 485                 next_node = rb_next(&p->pushable_dl_tasks);
 486                 if (next_node) {
 487                         dl_rq->earliest_dl.next = rb_entry(next_node,
 488                                 struct task_struct, pushable_dl_tasks)->dl.deadline;
 489                 }
 490         }
 491 
 492         rb_erase_cached(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
 493         RB_CLEAR_NODE(&p->pushable_dl_tasks);
 494 }
 495 
 496 static inline int has_pushable_dl_tasks(struct rq *rq)
 497 {
 498         return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
 499 }
 500 
 501 static int push_dl_task(struct rq *rq);
 502 
 503 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
 504 {
 505         return dl_task(prev);
 506 }
 507 
 508 static DEFINE_PER_CPU(struct callback_head, dl_push_head);
 509 static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
 510 
 511 static void push_dl_tasks(struct rq *);
 512 static void pull_dl_task(struct rq *);
 513 
 514 static inline void deadline_queue_push_tasks(struct rq *rq)
 515 {
 516         if (!has_pushable_dl_tasks(rq))
 517                 return;
 518 
 519         queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
 520 }
 521 
 522 static inline void deadline_queue_pull_task(struct rq *rq)
 523 {
 524         queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
 525 }
 526 
 527 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
 528 
 529 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
 530 {
 531         struct rq *later_rq = NULL;
 532         struct dl_bw *dl_b;
 533 
 534         later_rq = find_lock_later_rq(p, rq);
 535         if (!later_rq) {
 536                 int cpu;
 537 
 538                 /*
 539                  * If we cannot preempt any rq, fall back to pick any
 540                  * online CPU:
 541                  */
 542                 cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr);
 543                 if (cpu >= nr_cpu_ids) {
 544                         /*
 545                          * Failed to find any suitable CPU.
 546                          * The task will never come back!
 547                          */
 548                         BUG_ON(dl_bandwidth_enabled());
 549 
 550                         /*
 551                          * If admission control is disabled we
 552                          * try a little harder to let the task
 553                          * run.
 554                          */
 555                         cpu = cpumask_any(cpu_active_mask);
 556                 }
 557                 later_rq = cpu_rq(cpu);
 558                 double_lock_balance(rq, later_rq);
 559         }
 560 
 561         if (p->dl.dl_non_contending || p->dl.dl_throttled) {
 562                 /*
 563                  * Inactive timer is armed (or callback is running, but
 564                  * waiting for us to release rq locks). In any case, when it
 565                  * will fire (or continue), it will see running_bw of this
 566                  * task migrated to later_rq (and correctly handle it).
 567                  */
 568                 sub_running_bw(&p->dl, &rq->dl);
 569                 sub_rq_bw(&p->dl, &rq->dl);
 570 
 571                 add_rq_bw(&p->dl, &later_rq->dl);
 572                 add_running_bw(&p->dl, &later_rq->dl);
 573         } else {
 574                 sub_rq_bw(&p->dl, &rq->dl);
 575                 add_rq_bw(&p->dl, &later_rq->dl);
 576         }
 577 
 578         /*
 579          * And we finally need to fixup root_domain(s) bandwidth accounting,
 580          * since p is still hanging out in the old (now moved to default) root
 581          * domain.
 582          */
 583         dl_b = &rq->rd->dl_bw;
 584         raw_spin_lock(&dl_b->lock);
 585         __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
 586         raw_spin_unlock(&dl_b->lock);
 587 
 588         dl_b = &later_rq->rd->dl_bw;
 589         raw_spin_lock(&dl_b->lock);
 590         __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
 591         raw_spin_unlock(&dl_b->lock);
 592 
 593         set_task_cpu(p, later_rq->cpu);
 594         double_unlock_balance(later_rq, rq);
 595 
 596         return later_rq;
 597 }
 598 
 599 #else
 600 
 601 static inline
 602 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 603 {
 604 }
 605 
 606 static inline
 607 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 608 {
 609 }
 610 
 611 static inline
 612 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 613 {
 614 }
 615 
 616 static inline
 617 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 618 {
 619 }
 620 
 621 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
 622 {
 623         return false;
 624 }
 625 
 626 static inline void pull_dl_task(struct rq *rq)
 627 {
 628 }
 629 
 630 static inline void deadline_queue_push_tasks(struct rq *rq)
 631 {
 632 }
 633 
 634 static inline void deadline_queue_pull_task(struct rq *rq)
 635 {
 636 }
 637 #endif /* CONFIG_SMP */
 638 
 639 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
 640 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
 641 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags);
 642 
 643 /*
 644  * We are being explicitly informed that a new instance is starting,
 645  * and this means that:
 646  *  - the absolute deadline of the entity has to be placed at
 647  *    current time + relative deadline;
 648  *  - the runtime of the entity has to be set to the maximum value.
 649  *
 650  * The capability of specifying such event is useful whenever a -deadline
 651  * entity wants to (try to!) synchronize its behaviour with the scheduler's
 652  * one, and to (try to!) reconcile itself with its own scheduling
 653  * parameters.
 654  */
 655 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
 656 {
 657         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 658         struct rq *rq = rq_of_dl_rq(dl_rq);
 659 
 660         WARN_ON(dl_se->dl_boosted);
 661         WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
 662 
 663         /*
 664          * We are racing with the deadline timer. So, do nothing because
 665          * the deadline timer handler will take care of properly recharging
 666          * the runtime and postponing the deadline
 667          */
 668         if (dl_se->dl_throttled)
 669                 return;
 670 
 671         /*
 672          * We use the regular wall clock time to set deadlines in the
 673          * future; in fact, we must consider execution overheads (time
 674          * spent on hardirq context, etc.).
 675          */
 676         dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline;
 677         dl_se->runtime = dl_se->dl_runtime;
 678 }
 679 
 680 /*
 681  * Pure Earliest Deadline First (EDF) scheduling does not deal with the
 682  * possibility of a entity lasting more than what it declared, and thus
 683  * exhausting its runtime.
 684  *
 685  * Here we are interested in making runtime overrun possible, but we do
 686  * not want a entity which is misbehaving to affect the scheduling of all
 687  * other entities.
 688  * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
 689  * is used, in order to confine each entity within its own bandwidth.
 690  *
 691  * This function deals exactly with that, and ensures that when the runtime
 692  * of a entity is replenished, its deadline is also postponed. That ensures
 693  * the overrunning entity can't interfere with other entity in the system and
 694  * can't make them miss their deadlines. Reasons why this kind of overruns
 695  * could happen are, typically, a entity voluntarily trying to overcome its
 696  * runtime, or it just underestimated it during sched_setattr().
 697  */
 698 static void replenish_dl_entity(struct sched_dl_entity *dl_se,
 699                                 struct sched_dl_entity *pi_se)
 700 {
 701         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 702         struct rq *rq = rq_of_dl_rq(dl_rq);
 703 
 704         BUG_ON(pi_se->dl_runtime <= 0);
 705 
 706         /*
 707          * This could be the case for a !-dl task that is boosted.
 708          * Just go with full inherited parameters.
 709          */
 710         if (dl_se->dl_deadline == 0) {
 711                 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
 712                 dl_se->runtime = pi_se->dl_runtime;
 713         }
 714 
 715         if (dl_se->dl_yielded && dl_se->runtime > 0)
 716                 dl_se->runtime = 0;
 717 
 718         /*
 719          * We keep moving the deadline away until we get some
 720          * available runtime for the entity. This ensures correct
 721          * handling of situations where the runtime overrun is
 722          * arbitrary large.
 723          */
 724         while (dl_se->runtime <= 0) {
 725                 dl_se->deadline += pi_se->dl_period;
 726                 dl_se->runtime += pi_se->dl_runtime;
 727         }
 728 
 729         /*
 730          * At this point, the deadline really should be "in
 731          * the future" with respect to rq->clock. If it's
 732          * not, we are, for some reason, lagging too much!
 733          * Anyway, after having warn userspace abut that,
 734          * we still try to keep the things running by
 735          * resetting the deadline and the budget of the
 736          * entity.
 737          */
 738         if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
 739                 printk_deferred_once("sched: DL replenish lagged too much\n");
 740                 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
 741                 dl_se->runtime = pi_se->dl_runtime;
 742         }
 743 
 744         if (dl_se->dl_yielded)
 745                 dl_se->dl_yielded = 0;
 746         if (dl_se->dl_throttled)
 747                 dl_se->dl_throttled = 0;
 748 }
 749 
 750 /*
 751  * Here we check if --at time t-- an entity (which is probably being
 752  * [re]activated or, in general, enqueued) can use its remaining runtime
 753  * and its current deadline _without_ exceeding the bandwidth it is
 754  * assigned (function returns true if it can't). We are in fact applying
 755  * one of the CBS rules: when a task wakes up, if the residual runtime
 756  * over residual deadline fits within the allocated bandwidth, then we
 757  * can keep the current (absolute) deadline and residual budget without
 758  * disrupting the schedulability of the system. Otherwise, we should
 759  * refill the runtime and set the deadline a period in the future,
 760  * because keeping the current (absolute) deadline of the task would
 761  * result in breaking guarantees promised to other tasks (refer to
 762  * Documentation/scheduler/sched-deadline.rst for more information).
 763  *
 764  * This function returns true if:
 765  *
 766  *   runtime / (deadline - t) > dl_runtime / dl_deadline ,
 767  *
 768  * IOW we can't recycle current parameters.
 769  *
 770  * Notice that the bandwidth check is done against the deadline. For
 771  * task with deadline equal to period this is the same of using
 772  * dl_period instead of dl_deadline in the equation above.
 773  */
 774 static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
 775                                struct sched_dl_entity *pi_se, u64 t)
 776 {
 777         u64 left, right;
 778 
 779         /*
 780          * left and right are the two sides of the equation above,
 781          * after a bit of shuffling to use multiplications instead
 782          * of divisions.
 783          *
 784          * Note that none of the time values involved in the two
 785          * multiplications are absolute: dl_deadline and dl_runtime
 786          * are the relative deadline and the maximum runtime of each
 787          * instance, runtime is the runtime left for the last instance
 788          * and (deadline - t), since t is rq->clock, is the time left
 789          * to the (absolute) deadline. Even if overflowing the u64 type
 790          * is very unlikely to occur in both cases, here we scale down
 791          * as we want to avoid that risk at all. Scaling down by 10
 792          * means that we reduce granularity to 1us. We are fine with it,
 793          * since this is only a true/false check and, anyway, thinking
 794          * of anything below microseconds resolution is actually fiction
 795          * (but still we want to give the user that illusion >;).
 796          */
 797         left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
 798         right = ((dl_se->deadline - t) >> DL_SCALE) *
 799                 (pi_se->dl_runtime >> DL_SCALE);
 800 
 801         return dl_time_before(right, left);
 802 }
 803 
 804 /*
 805  * Revised wakeup rule [1]: For self-suspending tasks, rather then
 806  * re-initializing task's runtime and deadline, the revised wakeup
 807  * rule adjusts the task's runtime to avoid the task to overrun its
 808  * density.
 809  *
 810  * Reasoning: a task may overrun the density if:
 811  *    runtime / (deadline - t) > dl_runtime / dl_deadline
 812  *
 813  * Therefore, runtime can be adjusted to:
 814  *     runtime = (dl_runtime / dl_deadline) * (deadline - t)
 815  *
 816  * In such way that runtime will be equal to the maximum density
 817  * the task can use without breaking any rule.
 818  *
 819  * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
 820  * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
 821  */
 822 static void
 823 update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
 824 {
 825         u64 laxity = dl_se->deadline - rq_clock(rq);
 826 
 827         /*
 828          * If the task has deadline < period, and the deadline is in the past,
 829          * it should already be throttled before this check.
 830          *
 831          * See update_dl_entity() comments for further details.
 832          */
 833         WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
 834 
 835         dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
 836 }
 837 
 838 /*
 839  * Regarding the deadline, a task with implicit deadline has a relative
 840  * deadline == relative period. A task with constrained deadline has a
 841  * relative deadline <= relative period.
 842  *
 843  * We support constrained deadline tasks. However, there are some restrictions
 844  * applied only for tasks which do not have an implicit deadline. See
 845  * update_dl_entity() to know more about such restrictions.
 846  *
 847  * The dl_is_implicit() returns true if the task has an implicit deadline.
 848  */
 849 static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
 850 {
 851         return dl_se->dl_deadline == dl_se->dl_period;
 852 }
 853 
 854 /*
 855  * When a deadline entity is placed in the runqueue, its runtime and deadline
 856  * might need to be updated. This is done by a CBS wake up rule. There are two
 857  * different rules: 1) the original CBS; and 2) the Revisited CBS.
 858  *
 859  * When the task is starting a new period, the Original CBS is used. In this
 860  * case, the runtime is replenished and a new absolute deadline is set.
 861  *
 862  * When a task is queued before the begin of the next period, using the
 863  * remaining runtime and deadline could make the entity to overflow, see
 864  * dl_entity_overflow() to find more about runtime overflow. When such case
 865  * is detected, the runtime and deadline need to be updated.
 866  *
 867  * If the task has an implicit deadline, i.e., deadline == period, the Original
 868  * CBS is applied. the runtime is replenished and a new absolute deadline is
 869  * set, as in the previous cases.
 870  *
 871  * However, the Original CBS does not work properly for tasks with
 872  * deadline < period, which are said to have a constrained deadline. By
 873  * applying the Original CBS, a constrained deadline task would be able to run
 874  * runtime/deadline in a period. With deadline < period, the task would
 875  * overrun the runtime/period allowed bandwidth, breaking the admission test.
 876  *
 877  * In order to prevent this misbehave, the Revisited CBS is used for
 878  * constrained deadline tasks when a runtime overflow is detected. In the
 879  * Revisited CBS, rather than replenishing & setting a new absolute deadline,
 880  * the remaining runtime of the task is reduced to avoid runtime overflow.
 881  * Please refer to the comments update_dl_revised_wakeup() function to find
 882  * more about the Revised CBS rule.
 883  */
 884 static void update_dl_entity(struct sched_dl_entity *dl_se,
 885                              struct sched_dl_entity *pi_se)
 886 {
 887         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 888         struct rq *rq = rq_of_dl_rq(dl_rq);
 889 
 890         if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
 891             dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
 892 
 893                 if (unlikely(!dl_is_implicit(dl_se) &&
 894                              !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
 895                              !dl_se->dl_boosted)){
 896                         update_dl_revised_wakeup(dl_se, rq);
 897                         return;
 898                 }
 899 
 900                 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
 901                 dl_se->runtime = pi_se->dl_runtime;
 902         }
 903 }
 904 
 905 static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
 906 {
 907         return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
 908 }
 909 
 910 /*
 911  * If the entity depleted all its runtime, and if we want it to sleep
 912  * while waiting for some new execution time to become available, we
 913  * set the bandwidth replenishment timer to the replenishment instant
 914  * and try to activate it.
 915  *
 916  * Notice that it is important for the caller to know if the timer
 917  * actually started or not (i.e., the replenishment instant is in
 918  * the future or in the past).
 919  */
 920 static int start_dl_timer(struct task_struct *p)
 921 {
 922         struct sched_dl_entity *dl_se = &p->dl;
 923         struct hrtimer *timer = &dl_se->dl_timer;
 924         struct rq *rq = task_rq(p);
 925         ktime_t now, act;
 926         s64 delta;
 927 
 928         lockdep_assert_held(&rq->lock);
 929 
 930         /*
 931          * We want the timer to fire at the deadline, but considering
 932          * that it is actually coming from rq->clock and not from
 933          * hrtimer's time base reading.
 934          */
 935         act = ns_to_ktime(dl_next_period(dl_se));
 936         now = hrtimer_cb_get_time(timer);
 937         delta = ktime_to_ns(now) - rq_clock(rq);
 938         act = ktime_add_ns(act, delta);
 939 
 940         /*
 941          * If the expiry time already passed, e.g., because the value
 942          * chosen as the deadline is too small, don't even try to
 943          * start the timer in the past!
 944          */
 945         if (ktime_us_delta(act, now) < 0)
 946                 return 0;
 947 
 948         /*
 949          * !enqueued will guarantee another callback; even if one is already in
 950          * progress. This ensures a balanced {get,put}_task_struct().
 951          *
 952          * The race against __run_timer() clearing the enqueued state is
 953          * harmless because we're holding task_rq()->lock, therefore the timer
 954          * expiring after we've done the check will wait on its task_rq_lock()
 955          * and observe our state.
 956          */
 957         if (!hrtimer_is_queued(timer)) {
 958                 get_task_struct(p);
 959                 hrtimer_start(timer, act, HRTIMER_MODE_ABS_HARD);
 960         }
 961 
 962         return 1;
 963 }
 964 
 965 /*
 966  * This is the bandwidth enforcement timer callback. If here, we know
 967  * a task is not on its dl_rq, since the fact that the timer was running
 968  * means the task is throttled and needs a runtime replenishment.
 969  *
 970  * However, what we actually do depends on the fact the task is active,
 971  * (it is on its rq) or has been removed from there by a call to
 972  * dequeue_task_dl(). In the former case we must issue the runtime
 973  * replenishment and add the task back to the dl_rq; in the latter, we just
 974  * do nothing but clearing dl_throttled, so that runtime and deadline
 975  * updating (and the queueing back to dl_rq) will be done by the
 976  * next call to enqueue_task_dl().
 977  */
 978 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
 979 {
 980         struct sched_dl_entity *dl_se = container_of(timer,
 981                                                      struct sched_dl_entity,
 982                                                      dl_timer);
 983         struct task_struct *p = dl_task_of(dl_se);
 984         struct rq_flags rf;
 985         struct rq *rq;
 986 
 987         rq = task_rq_lock(p, &rf);
 988 
 989         /*
 990          * The task might have changed its scheduling policy to something
 991          * different than SCHED_DEADLINE (through switched_from_dl()).
 992          */
 993         if (!dl_task(p))
 994                 goto unlock;
 995 
 996         /*
 997          * The task might have been boosted by someone else and might be in the
 998          * boosting/deboosting path, its not throttled.
 999          */
1000         if (dl_se->dl_boosted)
1001                 goto unlock;
1002 
1003         /*
1004          * Spurious timer due to start_dl_timer() race; or we already received
1005          * a replenishment from rt_mutex_setprio().
1006          */
1007         if (!dl_se->dl_throttled)
1008                 goto unlock;
1009 
1010         sched_clock_tick();
1011         update_rq_clock(rq);
1012 
1013         /*
1014          * If the throttle happened during sched-out; like:
1015          *
1016          *   schedule()
1017          *     deactivate_task()
1018          *       dequeue_task_dl()
1019          *         update_curr_dl()
1020          *           start_dl_timer()
1021          *         __dequeue_task_dl()
1022          *     prev->on_rq = 0;
1023          *
1024          * We can be both throttled and !queued. Replenish the counter
1025          * but do not enqueue -- wait for our wakeup to do that.
1026          */
1027         if (!task_on_rq_queued(p)) {
1028                 replenish_dl_entity(dl_se, dl_se);
1029                 goto unlock;
1030         }
1031 
1032 #ifdef CONFIG_SMP
1033         if (unlikely(!rq->online)) {
1034                 /*
1035                  * If the runqueue is no longer available, migrate the
1036                  * task elsewhere. This necessarily changes rq.
1037                  */
1038                 lockdep_unpin_lock(&rq->lock, rf.cookie);
1039                 rq = dl_task_offline_migration(rq, p);
1040                 rf.cookie = lockdep_pin_lock(&rq->lock);
1041                 update_rq_clock(rq);
1042 
1043                 /*
1044                  * Now that the task has been migrated to the new RQ and we
1045                  * have that locked, proceed as normal and enqueue the task
1046                  * there.
1047                  */
1048         }
1049 #endif
1050 
1051         enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
1052         if (dl_task(rq->curr))
1053                 check_preempt_curr_dl(rq, p, 0);
1054         else
1055                 resched_curr(rq);
1056 
1057 #ifdef CONFIG_SMP
1058         /*
1059          * Queueing this task back might have overloaded rq, check if we need
1060          * to kick someone away.
1061          */
1062         if (has_pushable_dl_tasks(rq)) {
1063                 /*
1064                  * Nothing relies on rq->lock after this, so its safe to drop
1065                  * rq->lock.
1066                  */
1067                 rq_unpin_lock(rq, &rf);
1068                 push_dl_task(rq);
1069                 rq_repin_lock(rq, &rf);
1070         }
1071 #endif
1072 
1073 unlock:
1074         task_rq_unlock(rq, p, &rf);
1075 
1076         /*
1077          * This can free the task_struct, including this hrtimer, do not touch
1078          * anything related to that after this.
1079          */
1080         put_task_struct(p);
1081 
1082         return HRTIMER_NORESTART;
1083 }
1084 
1085 void init_dl_task_timer(struct sched_dl_entity *dl_se)
1086 {
1087         struct hrtimer *timer = &dl_se->dl_timer;
1088 
1089         hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1090         timer->function = dl_task_timer;
1091 }
1092 
1093 /*
1094  * During the activation, CBS checks if it can reuse the current task's
1095  * runtime and period. If the deadline of the task is in the past, CBS
1096  * cannot use the runtime, and so it replenishes the task. This rule
1097  * works fine for implicit deadline tasks (deadline == period), and the
1098  * CBS was designed for implicit deadline tasks. However, a task with
1099  * constrained deadline (deadine < period) might be awakened after the
1100  * deadline, but before the next period. In this case, replenishing the
1101  * task would allow it to run for runtime / deadline. As in this case
1102  * deadline < period, CBS enables a task to run for more than the
1103  * runtime / period. In a very loaded system, this can cause a domino
1104  * effect, making other tasks miss their deadlines.
1105  *
1106  * To avoid this problem, in the activation of a constrained deadline
1107  * task after the deadline but before the next period, throttle the
1108  * task and set the replenishing timer to the begin of the next period,
1109  * unless it is boosted.
1110  */
1111 static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
1112 {
1113         struct task_struct *p = dl_task_of(dl_se);
1114         struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
1115 
1116         if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1117             dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
1118                 if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
1119                         return;
1120                 dl_se->dl_throttled = 1;
1121                 if (dl_se->runtime > 0)
1122                         dl_se->runtime = 0;
1123         }
1124 }
1125 
1126 static
1127 int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
1128 {
1129         return (dl_se->runtime <= 0);
1130 }
1131 
1132 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
1133 
1134 /*
1135  * This function implements the GRUB accounting rule:
1136  * according to the GRUB reclaiming algorithm, the runtime is
1137  * not decreased as "dq = -dt", but as
1138  * "dq = -max{u / Umax, (1 - Uinact - Uextra)} dt",
1139  * where u is the utilization of the task, Umax is the maximum reclaimable
1140  * utilization, Uinact is the (per-runqueue) inactive utilization, computed
1141  * as the difference between the "total runqueue utilization" and the
1142  * runqueue active utilization, and Uextra is the (per runqueue) extra
1143  * reclaimable utilization.
1144  * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations
1145  * multiplied by 2^BW_SHIFT, the result has to be shifted right by
1146  * BW_SHIFT.
1147  * Since rq->dl.bw_ratio contains 1 / Umax multipled by 2^RATIO_SHIFT,
1148  * dl_bw is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
1149  * Since delta is a 64 bit variable, to have an overflow its value
1150  * should be larger than 2^(64 - 20 - 8), which is more than 64 seconds.
1151  * So, overflow is not an issue here.
1152  */
1153 static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
1154 {
1155         u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
1156         u64 u_act;
1157         u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT;
1158 
1159         /*
1160          * Instead of computing max{u * bw_ratio, (1 - u_inact - u_extra)},
1161          * we compare u_inact + rq->dl.extra_bw with
1162          * 1 - (u * rq->dl.bw_ratio >> RATIO_SHIFT), because
1163          * u_inact + rq->dl.extra_bw can be larger than
1164          * 1 * (so, 1 - u_inact - rq->dl.extra_bw would be negative
1165          * leading to wrong results)
1166          */
1167         if (u_inact + rq->dl.extra_bw > BW_UNIT - u_act_min)
1168                 u_act = u_act_min;
1169         else
1170                 u_act = BW_UNIT - u_inact - rq->dl.extra_bw;
1171 
1172         return (delta * u_act) >> BW_SHIFT;
1173 }
1174 
1175 /*
1176  * Update the current task's runtime statistics (provided it is still
1177  * a -deadline task and has not been removed from the dl_rq).
1178  */
1179 static void update_curr_dl(struct rq *rq)
1180 {
1181         struct task_struct *curr = rq->curr;
1182         struct sched_dl_entity *dl_se = &curr->dl;
1183         u64 delta_exec, scaled_delta_exec;
1184         int cpu = cpu_of(rq);
1185         u64 now;
1186 
1187         if (!dl_task(curr) || !on_dl_rq(dl_se))
1188                 return;
1189 
1190         /*
1191          * Consumed budget is computed considering the time as
1192          * observed by schedulable tasks (excluding time spent
1193          * in hardirq context, etc.). Deadlines are instead
1194          * computed using hard walltime. This seems to be the more
1195          * natural solution, but the full ramifications of this
1196          * approach need further study.
1197          */
1198         now = rq_clock_task(rq);
1199         delta_exec = now - curr->se.exec_start;
1200         if (unlikely((s64)delta_exec <= 0)) {
1201                 if (unlikely(dl_se->dl_yielded))
1202                         goto throttle;
1203                 return;
1204         }
1205 
1206         schedstat_set(curr->se.statistics.exec_max,
1207                       max(curr->se.statistics.exec_max, delta_exec));
1208 
1209         curr->se.sum_exec_runtime += delta_exec;
1210         account_group_exec_runtime(curr, delta_exec);
1211 
1212         curr->se.exec_start = now;
1213         cgroup_account_cputime(curr, delta_exec);
1214 
1215         if (dl_entity_is_special(dl_se))
1216                 return;
1217 
1218         /*
1219          * For tasks that participate in GRUB, we implement GRUB-PA: the
1220          * spare reclaimed bandwidth is used to clock down frequency.
1221          *
1222          * For the others, we still need to scale reservation parameters
1223          * according to current frequency and CPU maximum capacity.
1224          */
1225         if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) {
1226                 scaled_delta_exec = grub_reclaim(delta_exec,
1227                                                  rq,
1228                                                  &curr->dl);
1229         } else {
1230                 unsigned long scale_freq = arch_scale_freq_capacity(cpu);
1231                 unsigned long scale_cpu = arch_scale_cpu_capacity(cpu);
1232 
1233                 scaled_delta_exec = cap_scale(delta_exec, scale_freq);
1234                 scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
1235         }
1236 
1237         dl_se->runtime -= scaled_delta_exec;
1238 
1239 throttle:
1240         if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
1241                 dl_se->dl_throttled = 1;
1242 
1243                 /* If requested, inform the user about runtime overruns. */
1244                 if (dl_runtime_exceeded(dl_se) &&
1245                     (dl_se->flags & SCHED_FLAG_DL_OVERRUN))
1246                         dl_se->dl_overrun = 1;
1247 
1248                 __dequeue_task_dl(rq, curr, 0);
1249                 if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
1250                         enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
1251 
1252                 if (!is_leftmost(curr, &rq->dl))
1253                         resched_curr(rq);
1254         }
1255 
1256         /*
1257          * Because -- for now -- we share the rt bandwidth, we need to
1258          * account our runtime there too, otherwise actual rt tasks
1259          * would be able to exceed the shared quota.
1260          *
1261          * Account to the root rt group for now.
1262          *
1263          * The solution we're working towards is having the RT groups scheduled
1264          * using deadline servers -- however there's a few nasties to figure
1265          * out before that can happen.
1266          */
1267         if (rt_bandwidth_enabled()) {
1268                 struct rt_rq *rt_rq = &rq->rt;
1269 
1270                 raw_spin_lock(&rt_rq->rt_runtime_lock);
1271                 /*
1272                  * We'll let actual RT tasks worry about the overflow here, we
1273                  * have our own CBS to keep us inline; only account when RT
1274                  * bandwidth is relevant.
1275                  */
1276                 if (sched_rt_bandwidth_account(rt_rq))
1277                         rt_rq->rt_time += delta_exec;
1278                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
1279         }
1280 }
1281 
1282 static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
1283 {
1284         struct sched_dl_entity *dl_se = container_of(timer,
1285                                                      struct sched_dl_entity,
1286                                                      inactive_timer);
1287         struct task_struct *p = dl_task_of(dl_se);
1288         struct rq_flags rf;
1289         struct rq *rq;
1290 
1291         rq = task_rq_lock(p, &rf);
1292 
1293         sched_clock_tick();
1294         update_rq_clock(rq);
1295 
1296         if (!dl_task(p) || p->state == TASK_DEAD) {
1297                 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1298 
1299                 if (p->state == TASK_DEAD && dl_se->dl_non_contending) {
1300                         sub_running_bw(&p->dl, dl_rq_of_se(&p->dl));
1301                         sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl));
1302                         dl_se->dl_non_contending = 0;
1303                 }
1304 
1305                 raw_spin_lock(&dl_b->lock);
1306                 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
1307                 raw_spin_unlock(&dl_b->lock);
1308                 __dl_clear_params(p);
1309 
1310                 goto unlock;
1311         }
1312         if (dl_se->dl_non_contending == 0)
1313                 goto unlock;
1314 
1315         sub_running_bw(dl_se, &rq->dl);
1316         dl_se->dl_non_contending = 0;
1317 unlock:
1318         task_rq_unlock(rq, p, &rf);
1319         put_task_struct(p);
1320 
1321         return HRTIMER_NORESTART;
1322 }
1323 
1324 void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
1325 {
1326         struct hrtimer *timer = &dl_se->inactive_timer;
1327 
1328         hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1329         timer->function = inactive_task_timer;
1330 }
1331 
1332 #ifdef CONFIG_SMP
1333 
1334 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1335 {
1336         struct rq *rq = rq_of_dl_rq(dl_rq);
1337 
1338         if (dl_rq->earliest_dl.curr == 0 ||
1339             dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
1340                 dl_rq->earliest_dl.curr = deadline;
1341                 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
1342         }
1343 }
1344 
1345 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1346 {
1347         struct rq *rq = rq_of_dl_rq(dl_rq);
1348 
1349         /*
1350          * Since we may have removed our earliest (and/or next earliest)
1351          * task we must recompute them.
1352          */
1353         if (!dl_rq->dl_nr_running) {
1354                 dl_rq->earliest_dl.curr = 0;
1355                 dl_rq->earliest_dl.next = 0;
1356                 cpudl_clear(&rq->rd->cpudl, rq->cpu);
1357         } else {
1358                 struct rb_node *leftmost = dl_rq->root.rb_leftmost;
1359                 struct sched_dl_entity *entry;
1360 
1361                 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
1362                 dl_rq->earliest_dl.curr = entry->deadline;
1363                 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
1364         }
1365 }
1366 
1367 #else
1368 
1369 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1370 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1371 
1372 #endif /* CONFIG_SMP */
1373 
1374 static inline
1375 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1376 {
1377         int prio = dl_task_of(dl_se)->prio;
1378         u64 deadline = dl_se->deadline;
1379 
1380         WARN_ON(!dl_prio(prio));
1381         dl_rq->dl_nr_running++;
1382         add_nr_running(rq_of_dl_rq(dl_rq), 1);
1383 
1384         inc_dl_deadline(dl_rq, deadline);
1385         inc_dl_migration(dl_se, dl_rq);
1386 }
1387 
1388 static inline
1389 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1390 {
1391         int prio = dl_task_of(dl_se)->prio;
1392 
1393         WARN_ON(!dl_prio(prio));
1394         WARN_ON(!dl_rq->dl_nr_running);
1395         dl_rq->dl_nr_running--;
1396         sub_nr_running(rq_of_dl_rq(dl_rq), 1);
1397 
1398         dec_dl_deadline(dl_rq, dl_se->deadline);
1399         dec_dl_migration(dl_se, dl_rq);
1400 }
1401 
1402 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
1403 {
1404         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1405         struct rb_node **link = &dl_rq->root.rb_root.rb_node;
1406         struct rb_node *parent = NULL;
1407         struct sched_dl_entity *entry;
1408         int leftmost = 1;
1409 
1410         BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
1411 
1412         while (*link) {
1413                 parent = *link;
1414                 entry = rb_entry(parent, struct sched_dl_entity, rb_node);
1415                 if (dl_time_before(dl_se->deadline, entry->deadline))
1416                         link = &parent->rb_left;
1417                 else {
1418                         link = &parent->rb_right;
1419                         leftmost = 0;
1420                 }
1421         }
1422 
1423         rb_link_node(&dl_se->rb_node, parent, link);
1424         rb_insert_color_cached(&dl_se->rb_node, &dl_rq->root, leftmost);
1425 
1426         inc_dl_tasks(dl_se, dl_rq);
1427 }
1428 
1429 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
1430 {
1431         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1432 
1433         if (RB_EMPTY_NODE(&dl_se->rb_node))
1434                 return;
1435 
1436         rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
1437         RB_CLEAR_NODE(&dl_se->rb_node);
1438 
1439         dec_dl_tasks(dl_se, dl_rq);
1440 }
1441 
1442 static void
1443 enqueue_dl_entity(struct sched_dl_entity *dl_se,
1444                   struct sched_dl_entity *pi_se, int flags)
1445 {
1446         BUG_ON(on_dl_rq(dl_se));
1447 
1448         /*
1449          * If this is a wakeup or a new instance, the scheduling
1450          * parameters of the task might need updating. Otherwise,
1451          * we want a replenishment of its runtime.
1452          */
1453         if (flags & ENQUEUE_WAKEUP) {
1454                 task_contending(dl_se, flags);
1455                 update_dl_entity(dl_se, pi_se);
1456         } else if (flags & ENQUEUE_REPLENISH) {
1457                 replenish_dl_entity(dl_se, pi_se);
1458         } else if ((flags & ENQUEUE_RESTORE) &&
1459                   dl_time_before(dl_se->deadline,
1460                                  rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) {
1461                 setup_new_dl_entity(dl_se);
1462         }
1463 
1464         __enqueue_dl_entity(dl_se);
1465 }
1466 
1467 static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
1468 {
1469         __dequeue_dl_entity(dl_se);
1470 }
1471 
1472 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1473 {
1474         struct task_struct *pi_task = rt_mutex_get_top_task(p);
1475         struct sched_dl_entity *pi_se = &p->dl;
1476 
1477         /*
1478          * Use the scheduling parameters of the top pi-waiter task if:
1479          * - we have a top pi-waiter which is a SCHED_DEADLINE task AND
1480          * - our dl_boosted is set (i.e. the pi-waiter's (absolute) deadline is
1481          *   smaller than our deadline OR we are a !SCHED_DEADLINE task getting
1482          *   boosted due to a SCHED_DEADLINE pi-waiter).
1483          * Otherwise we keep our runtime and deadline.
1484          */
1485         if (pi_task && dl_prio(pi_task->normal_prio) && p->dl.dl_boosted) {
1486                 pi_se = &pi_task->dl;
1487         } else if (!dl_prio(p->normal_prio)) {
1488                 /*
1489                  * Special case in which we have a !SCHED_DEADLINE task
1490                  * that is going to be deboosted, but exceeds its
1491                  * runtime while doing so. No point in replenishing
1492                  * it, as it's going to return back to its original
1493                  * scheduling class after this.
1494                  */
1495                 BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
1496                 return;
1497         }
1498 
1499         /*
1500          * Check if a constrained deadline task was activated
1501          * after the deadline but before the next period.
1502          * If that is the case, the task will be throttled and
1503          * the replenishment timer will be set to the next period.
1504          */
1505         if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
1506                 dl_check_constrained_dl(&p->dl);
1507 
1508         if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) {
1509                 add_rq_bw(&p->dl, &rq->dl);
1510                 add_running_bw(&p->dl, &rq->dl);
1511         }
1512 
1513         /*
1514          * If p is throttled, we do not enqueue it. In fact, if it exhausted
1515          * its budget it needs a replenishment and, since it now is on
1516          * its rq, the bandwidth timer callback (which clearly has not
1517          * run yet) will take care of this.
1518          * However, the active utilization does not depend on the fact
1519          * that the task is on the runqueue or not (but depends on the
1520          * task's state - in GRUB parlance, "inactive" vs "active contending").
1521          * In other words, even if a task is throttled its utilization must
1522          * be counted in the active utilization; hence, we need to call
1523          * add_running_bw().
1524          */
1525         if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
1526                 if (flags & ENQUEUE_WAKEUP)
1527                         task_contending(&p->dl, flags);
1528 
1529                 return;
1530         }
1531 
1532         enqueue_dl_entity(&p->dl, pi_se, flags);
1533 
1534         if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1535                 enqueue_pushable_dl_task(rq, p);
1536 }
1537 
1538 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1539 {
1540         dequeue_dl_entity(&p->dl);
1541         dequeue_pushable_dl_task(rq, p);
1542 }
1543 
1544 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1545 {
1546         update_curr_dl(rq);
1547         __dequeue_task_dl(rq, p, flags);
1548 
1549         if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) {
1550                 sub_running_bw(&p->dl, &rq->dl);
1551                 sub_rq_bw(&p->dl, &rq->dl);
1552         }
1553 
1554         /*
1555          * This check allows to start the inactive timer (or to immediately
1556          * decrease the active utilization, if needed) in two cases:
1557          * when the task blocks and when it is terminating
1558          * (p->state == TASK_DEAD). We can handle the two cases in the same
1559          * way, because from GRUB's point of view the same thing is happening
1560          * (the task moves from "active contending" to "active non contending"
1561          * or "inactive")
1562          */
1563         if (flags & DEQUEUE_SLEEP)
1564                 task_non_contending(p);
1565 }
1566 
1567 /*
1568  * Yield task semantic for -deadline tasks is:
1569  *
1570  *   get off from the CPU until our next instance, with
1571  *   a new runtime. This is of little use now, since we
1572  *   don't have a bandwidth reclaiming mechanism. Anyway,
1573  *   bandwidth reclaiming is planned for the future, and
1574  *   yield_task_dl will indicate that some spare budget
1575  *   is available for other task instances to use it.
1576  */
1577 static void yield_task_dl(struct rq *rq)
1578 {
1579         /*
1580          * We make the task go to sleep until its current deadline by
1581          * forcing its runtime to zero. This way, update_curr_dl() stops
1582          * it and the bandwidth timer will wake it up and will give it
1583          * new scheduling parameters (thanks to dl_yielded=1).
1584          */
1585         rq->curr->dl.dl_yielded = 1;
1586 
1587         update_rq_clock(rq);
1588         update_curr_dl(rq);
1589         /*
1590          * Tell update_rq_clock() that we've just updated,
1591          * so we don't do microscopic update in schedule()
1592          * and double the fastpath cost.
1593          */
1594         rq_clock_skip_update(rq);
1595 }
1596 
1597 #ifdef CONFIG_SMP
1598 
1599 static int find_later_rq(struct task_struct *task);
1600 
1601 static int
1602 select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
1603 {
1604         struct task_struct *curr;
1605         struct rq *rq;
1606 
1607         if (sd_flag != SD_BALANCE_WAKE)
1608                 goto out;
1609 
1610         rq = cpu_rq(cpu);
1611 
1612         rcu_read_lock();
1613         curr = READ_ONCE(rq->curr); /* unlocked access */
1614 
1615         /*
1616          * If we are dealing with a -deadline task, we must
1617          * decide where to wake it up.
1618          * If it has a later deadline and the current task
1619          * on this rq can't move (provided the waking task
1620          * can!) we prefer to send it somewhere else. On the
1621          * other hand, if it has a shorter deadline, we
1622          * try to make it stay here, it might be important.
1623          */
1624         if (unlikely(dl_task(curr)) &&
1625             (curr->nr_cpus_allowed < 2 ||
1626              !dl_entity_preempt(&p->dl, &curr->dl)) &&
1627             (p->nr_cpus_allowed > 1)) {
1628                 int target = find_later_rq(p);
1629 
1630                 if (target != -1 &&
1631                                 (dl_time_before(p->dl.deadline,
1632                                         cpu_rq(target)->dl.earliest_dl.curr) ||
1633                                 (cpu_rq(target)->dl.dl_nr_running == 0)))
1634                         cpu = target;
1635         }
1636         rcu_read_unlock();
1637 
1638 out:
1639         return cpu;
1640 }
1641 
1642 static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
1643 {
1644         struct rq *rq;
1645 
1646         if (p->state != TASK_WAKING)
1647                 return;
1648 
1649         rq = task_rq(p);
1650         /*
1651          * Since p->state == TASK_WAKING, set_task_cpu() has been called
1652          * from try_to_wake_up(). Hence, p->pi_lock is locked, but
1653          * rq->lock is not... So, lock it
1654          */
1655         raw_spin_lock(&rq->lock);
1656         if (p->dl.dl_non_contending) {
1657                 sub_running_bw(&p->dl, &rq->dl);
1658                 p->dl.dl_non_contending = 0;
1659                 /*
1660                  * If the timer handler is currently running and the
1661                  * timer cannot be cancelled, inactive_task_timer()
1662                  * will see that dl_not_contending is not set, and
1663                  * will not touch the rq's active utilization,
1664                  * so we are still safe.
1665                  */
1666                 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
1667                         put_task_struct(p);
1668         }
1669         sub_rq_bw(&p->dl, &rq->dl);
1670         raw_spin_unlock(&rq->lock);
1671 }
1672 
1673 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1674 {
1675         /*
1676          * Current can't be migrated, useless to reschedule,
1677          * let's hope p can move out.
1678          */
1679         if (rq->curr->nr_cpus_allowed == 1 ||
1680             !cpudl_find(&rq->rd->cpudl, rq->curr, NULL))
1681                 return;
1682 
1683         /*
1684          * p is migratable, so let's not schedule it and
1685          * see if it is pushed or pulled somewhere else.
1686          */
1687         if (p->nr_cpus_allowed != 1 &&
1688             cpudl_find(&rq->rd->cpudl, p, NULL))
1689                 return;
1690 
1691         resched_curr(rq);
1692 }
1693 
1694 static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1695 {
1696         if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
1697                 /*
1698                  * This is OK, because current is on_cpu, which avoids it being
1699                  * picked for load-balance and preemption/IRQs are still
1700                  * disabled avoiding further scheduler activity on it and we've
1701                  * not yet started the picking loop.
1702                  */
1703                 rq_unpin_lock(rq, rf);
1704                 pull_dl_task(rq);
1705                 rq_repin_lock(rq, rf);
1706         }
1707 
1708         return sched_stop_runnable(rq) || sched_dl_runnable(rq);
1709 }
1710 #endif /* CONFIG_SMP */
1711 
1712 /*
1713  * Only called when both the current and waking task are -deadline
1714  * tasks.
1715  */
1716 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1717                                   int flags)
1718 {
1719         if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1720                 resched_curr(rq);
1721                 return;
1722         }
1723 
1724 #ifdef CONFIG_SMP
1725         /*
1726          * In the unlikely case current and p have the same deadline
1727          * let us try to decide what's the best thing to do...
1728          */
1729         if ((p->dl.deadline == rq->curr->dl.deadline) &&
1730             !test_tsk_need_resched(rq->curr))
1731                 check_preempt_equal_dl(rq, p);
1732 #endif /* CONFIG_SMP */
1733 }
1734 
1735 #ifdef CONFIG_SCHED_HRTICK
1736 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1737 {
1738         hrtick_start(rq, p->dl.runtime);
1739 }
1740 #else /* !CONFIG_SCHED_HRTICK */
1741 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1742 {
1743 }
1744 #endif
1745 
1746 static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
1747 {
1748         p->se.exec_start = rq_clock_task(rq);
1749 
1750         /* You can't push away the running task */
1751         dequeue_pushable_dl_task(rq, p);
1752 
1753         if (!first)
1754                 return;
1755 
1756         if (hrtick_enabled(rq))
1757                 start_hrtick_dl(rq, p);
1758 
1759         if (rq->curr->sched_class != &dl_sched_class)
1760                 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
1761 
1762         deadline_queue_push_tasks(rq);
1763 }
1764 
1765 static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1766                                                    struct dl_rq *dl_rq)
1767 {
1768         struct rb_node *left = rb_first_cached(&dl_rq->root);
1769 
1770         if (!left)
1771                 return NULL;
1772 
1773         return rb_entry(left, struct sched_dl_entity, rb_node);
1774 }
1775 
1776 static struct task_struct *
1777 pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
1778 {
1779         struct sched_dl_entity *dl_se;
1780         struct dl_rq *dl_rq = &rq->dl;
1781         struct task_struct *p;
1782 
1783         WARN_ON_ONCE(prev || rf);
1784 
1785         if (!sched_dl_runnable(rq))
1786                 return NULL;
1787 
1788         dl_se = pick_next_dl_entity(rq, dl_rq);
1789         BUG_ON(!dl_se);
1790         p = dl_task_of(dl_se);
1791         set_next_task_dl(rq, p, true);
1792         return p;
1793 }
1794 
1795 static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1796 {
1797         update_curr_dl(rq);
1798 
1799         update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1800         if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1801                 enqueue_pushable_dl_task(rq, p);
1802 }
1803 
1804 /*
1805  * scheduler tick hitting a task of our scheduling class.
1806  *
1807  * NOTE: This function can be called remotely by the tick offload that
1808  * goes along full dynticks. Therefore no local assumption can be made
1809  * and everything must be accessed through the @rq and @curr passed in
1810  * parameters.
1811  */
1812 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1813 {
1814         update_curr_dl(rq);
1815 
1816         update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1817         /*
1818          * Even when we have runtime, update_curr_dl() might have resulted in us
1819          * not being the leftmost task anymore. In that case NEED_RESCHED will
1820          * be set and schedule() will start a new hrtick for the next task.
1821          */
1822         if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
1823             is_leftmost(p, &rq->dl))
1824                 start_hrtick_dl(rq, p);
1825 }
1826 
1827 static void task_fork_dl(struct task_struct *p)
1828 {
1829         /*
1830          * SCHED_DEADLINE tasks cannot fork and this is achieved through
1831          * sched_fork()
1832          */
1833 }
1834 
1835 #ifdef CONFIG_SMP
1836 
1837 /* Only try algorithms three times */
1838 #define DL_MAX_TRIES 3
1839 
1840 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1841 {
1842         if (!task_running(rq, p) &&
1843             cpumask_test_cpu(cpu, p->cpus_ptr))
1844                 return 1;
1845         return 0;
1846 }
1847 
1848 /*
1849  * Return the earliest pushable rq's task, which is suitable to be executed
1850  * on the CPU, NULL otherwise:
1851  */
1852 static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
1853 {
1854         struct rb_node *next_node = rq->dl.pushable_dl_tasks_root.rb_leftmost;
1855         struct task_struct *p = NULL;
1856 
1857         if (!has_pushable_dl_tasks(rq))
1858                 return NULL;
1859 
1860 next_node:
1861         if (next_node) {
1862                 p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
1863 
1864                 if (pick_dl_task(rq, p, cpu))
1865                         return p;
1866 
1867                 next_node = rb_next(next_node);
1868                 goto next_node;
1869         }
1870 
1871         return NULL;
1872 }
1873 
1874 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1875 
1876 static int find_later_rq(struct task_struct *task)
1877 {
1878         struct sched_domain *sd;
1879         struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1880         int this_cpu = smp_processor_id();
1881         int cpu = task_cpu(task);
1882 
1883         /* Make sure the mask is initialized first */
1884         if (unlikely(!later_mask))
1885                 return -1;
1886 
1887         if (task->nr_cpus_allowed == 1)
1888                 return -1;
1889 
1890         /*
1891          * We have to consider system topology and task affinity
1892          * first, then we can look for a suitable CPU.
1893          */
1894         if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
1895                 return -1;
1896 
1897         /*
1898          * If we are here, some targets have been found, including
1899          * the most suitable which is, among the runqueues where the
1900          * current tasks have later deadlines than the task's one, the
1901          * rq with the latest possible one.
1902          *
1903          * Now we check how well this matches with task's
1904          * affinity and system topology.
1905          *
1906          * The last CPU where the task run is our first
1907          * guess, since it is most likely cache-hot there.
1908          */
1909         if (cpumask_test_cpu(cpu, later_mask))
1910                 return cpu;
1911         /*
1912          * Check if this_cpu is to be skipped (i.e., it is
1913          * not in the mask) or not.
1914          */
1915         if (!cpumask_test_cpu(this_cpu, later_mask))
1916                 this_cpu = -1;
1917 
1918         rcu_read_lock();
1919         for_each_domain(cpu, sd) {
1920                 if (sd->flags & SD_WAKE_AFFINE) {
1921                         int best_cpu;
1922 
1923                         /*
1924                          * If possible, preempting this_cpu is
1925                          * cheaper than migrating.
1926                          */
1927                         if (this_cpu != -1 &&
1928                             cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1929                                 rcu_read_unlock();
1930                                 return this_cpu;
1931                         }
1932 
1933                         best_cpu = cpumask_first_and(later_mask,
1934                                                         sched_domain_span(sd));
1935                         /*
1936                          * Last chance: if a CPU being in both later_mask
1937                          * and current sd span is valid, that becomes our
1938                          * choice. Of course, the latest possible CPU is
1939                          * already under consideration through later_mask.
1940                          */
1941                         if (best_cpu < nr_cpu_ids) {
1942                                 rcu_read_unlock();
1943                                 return best_cpu;
1944                         }
1945                 }
1946         }
1947         rcu_read_unlock();
1948 
1949         /*
1950          * At this point, all our guesses failed, we just return
1951          * 'something', and let the caller sort the things out.
1952          */
1953         if (this_cpu != -1)
1954                 return this_cpu;
1955 
1956         cpu = cpumask_any(later_mask);
1957         if (cpu < nr_cpu_ids)
1958                 return cpu;
1959 
1960         return -1;
1961 }
1962 
1963 /* Locks the rq it finds */
1964 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1965 {
1966         struct rq *later_rq = NULL;
1967         int tries;
1968         int cpu;
1969 
1970         for (tries = 0; tries < DL_MAX_TRIES; tries++) {
1971                 cpu = find_later_rq(task);
1972 
1973                 if ((cpu == -1) || (cpu == rq->cpu))
1974                         break;
1975 
1976                 later_rq = cpu_rq(cpu);
1977 
1978                 if (later_rq->dl.dl_nr_running &&
1979                     !dl_time_before(task->dl.deadline,
1980                                         later_rq->dl.earliest_dl.curr)) {
1981                         /*
1982                          * Target rq has tasks of equal or earlier deadline,
1983                          * retrying does not release any lock and is unlikely
1984                          * to yield a different result.
1985                          */
1986                         later_rq = NULL;
1987                         break;
1988                 }
1989 
1990                 /* Retry if something changed. */
1991                 if (double_lock_balance(rq, later_rq)) {
1992                         if (unlikely(task_rq(task) != rq ||
1993                                      !cpumask_test_cpu(later_rq->cpu, task->cpus_ptr) ||
1994                                      task_running(rq, task) ||
1995                                      !dl_task(task) ||
1996                                      !task_on_rq_queued(task))) {
1997                                 double_unlock_balance(rq, later_rq);
1998                                 later_rq = NULL;
1999                                 break;
2000                         }
2001                 }
2002 
2003                 /*
2004                  * If the rq we found has no -deadline task, or
2005                  * its earliest one has a later deadline than our
2006                  * task, the rq is a good one.
2007                  */
2008                 if (!later_rq->dl.dl_nr_running ||
2009                     dl_time_before(task->dl.deadline,
2010                                    later_rq->dl.earliest_dl.curr))
2011                         break;
2012 
2013                 /* Otherwise we try again. */
2014                 double_unlock_balance(rq, later_rq);
2015                 later_rq = NULL;
2016         }
2017 
2018         return later_rq;
2019 }
2020 
2021 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
2022 {
2023         struct task_struct *p;
2024 
2025         if (!has_pushable_dl_tasks(rq))
2026                 return NULL;
2027 
2028         p = rb_entry(rq->dl.pushable_dl_tasks_root.rb_leftmost,
2029                      struct task_struct, pushable_dl_tasks);
2030 
2031         BUG_ON(rq->cpu != task_cpu(p));
2032         BUG_ON(task_current(rq, p));
2033         BUG_ON(p->nr_cpus_allowed <= 1);
2034 
2035         BUG_ON(!task_on_rq_queued(p));
2036         BUG_ON(!dl_task(p));
2037 
2038         return p;
2039 }
2040 
2041 /*
2042  * See if the non running -deadline tasks on this rq
2043  * can be sent to some other CPU where they can preempt
2044  * and start executing.
2045  */
2046 static int push_dl_task(struct rq *rq)
2047 {
2048         struct task_struct *next_task;
2049         struct rq *later_rq;
2050         int ret = 0;
2051 
2052         if (!rq->dl.overloaded)
2053                 return 0;
2054 
2055         next_task = pick_next_pushable_dl_task(rq);
2056         if (!next_task)
2057                 return 0;
2058 
2059 retry:
2060         if (WARN_ON(next_task == rq->curr))
2061                 return 0;
2062 
2063         /*
2064          * If next_task preempts rq->curr, and rq->curr
2065          * can move away, it makes sense to just reschedule
2066          * without going further in pushing next_task.
2067          */
2068         if (dl_task(rq->curr) &&
2069             dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
2070             rq->curr->nr_cpus_allowed > 1) {
2071                 resched_curr(rq);
2072                 return 0;
2073         }
2074 
2075         /* We might release rq lock */
2076         get_task_struct(next_task);
2077 
2078         /* Will lock the rq it'll find */
2079         later_rq = find_lock_later_rq(next_task, rq);
2080         if (!later_rq) {
2081                 struct task_struct *task;
2082 
2083                 /*
2084                  * We must check all this again, since
2085                  * find_lock_later_rq releases rq->lock and it is
2086                  * then possible that next_task has migrated.
2087                  */
2088                 task = pick_next_pushable_dl_task(rq);
2089                 if (task == next_task) {
2090                         /*
2091                          * The task is still there. We don't try
2092                          * again, some other CPU will pull it when ready.
2093                          */
2094                         goto out;
2095                 }
2096 
2097                 if (!task)
2098                         /* No more tasks */
2099                         goto out;
2100 
2101                 put_task_struct(next_task);
2102                 next_task = task;
2103                 goto retry;
2104         }
2105 
2106         deactivate_task(rq, next_task, 0);
2107         set_task_cpu(next_task, later_rq->cpu);
2108 
2109         /*
2110          * Update the later_rq clock here, because the clock is used
2111          * by the cpufreq_update_util() inside __add_running_bw().
2112          */
2113         update_rq_clock(later_rq);
2114         activate_task(later_rq, next_task, ENQUEUE_NOCLOCK);
2115         ret = 1;
2116 
2117         resched_curr(later_rq);
2118 
2119         double_unlock_balance(rq, later_rq);
2120 
2121 out:
2122         put_task_struct(next_task);
2123 
2124         return ret;
2125 }
2126 
2127 static void push_dl_tasks(struct rq *rq)
2128 {
2129         /* push_dl_task() will return true if it moved a -deadline task */
2130         while (push_dl_task(rq))
2131                 ;
2132 }
2133 
2134 static void pull_dl_task(struct rq *this_rq)
2135 {
2136         int this_cpu = this_rq->cpu, cpu;
2137         struct task_struct *p;
2138         bool resched = false;
2139         struct rq *src_rq;
2140         u64 dmin = LONG_MAX;
2141 
2142         if (likely(!dl_overloaded(this_rq)))
2143                 return;
2144 
2145         /*
2146          * Match the barrier from dl_set_overloaded; this guarantees that if we
2147          * see overloaded we must also see the dlo_mask bit.
2148          */
2149         smp_rmb();
2150 
2151         for_each_cpu(cpu, this_rq->rd->dlo_mask) {
2152                 if (this_cpu == cpu)
2153                         continue;
2154 
2155                 src_rq = cpu_rq(cpu);
2156 
2157                 /*
2158                  * It looks racy, abd it is! However, as in sched_rt.c,
2159                  * we are fine with this.
2160                  */
2161                 if (this_rq->dl.dl_nr_running &&
2162                     dl_time_before(this_rq->dl.earliest_dl.curr,
2163                                    src_rq->dl.earliest_dl.next))
2164                         continue;
2165 
2166                 /* Might drop this_rq->lock */
2167                 double_lock_balance(this_rq, src_rq);
2168 
2169                 /*
2170                  * If there are no more pullable tasks on the
2171                  * rq, we're done with it.
2172                  */
2173                 if (src_rq->dl.dl_nr_running <= 1)
2174                         goto skip;
2175 
2176                 p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
2177 
2178                 /*
2179                  * We found a task to be pulled if:
2180                  *  - it preempts our current (if there's one),
2181                  *  - it will preempt the last one we pulled (if any).
2182                  */
2183                 if (p && dl_time_before(p->dl.deadline, dmin) &&
2184                     (!this_rq->dl.dl_nr_running ||
2185                      dl_time_before(p->dl.deadline,
2186                                     this_rq->dl.earliest_dl.curr))) {
2187                         WARN_ON(p == src_rq->curr);
2188                         WARN_ON(!task_on_rq_queued(p));
2189 
2190                         /*
2191                          * Then we pull iff p has actually an earlier
2192                          * deadline than the current task of its runqueue.
2193                          */
2194                         if (dl_time_before(p->dl.deadline,
2195                                            src_rq->curr->dl.deadline))
2196                                 goto skip;
2197 
2198                         resched = true;
2199 
2200                         deactivate_task(src_rq, p, 0);
2201                         set_task_cpu(p, this_cpu);
2202                         activate_task(this_rq, p, 0);
2203                         dmin = p->dl.deadline;
2204 
2205                         /* Is there any other task even earlier? */
2206                 }
2207 skip:
2208                 double_unlock_balance(this_rq, src_rq);
2209         }
2210 
2211         if (resched)
2212                 resched_curr(this_rq);
2213 }
2214 
2215 /*
2216  * Since the task is not running and a reschedule is not going to happen
2217  * anytime soon on its runqueue, we try pushing it away now.
2218  */
2219 static void task_woken_dl(struct rq *rq, struct task_struct *p)
2220 {
2221         if (!task_running(rq, p) &&
2222             !test_tsk_need_resched(rq->curr) &&
2223             p->nr_cpus_allowed > 1 &&
2224             dl_task(rq->curr) &&
2225             (rq->curr->nr_cpus_allowed < 2 ||
2226              !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
2227                 push_dl_tasks(rq);
2228         }
2229 }
2230 
2231 static void set_cpus_allowed_dl(struct task_struct *p,
2232                                 const struct cpumask *new_mask)
2233 {
2234         struct root_domain *src_rd;
2235         struct rq *rq;
2236 
2237         BUG_ON(!dl_task(p));
2238 
2239         rq = task_rq(p);
2240         src_rd = rq->rd;
2241         /*
2242          * Migrating a SCHED_DEADLINE task between exclusive
2243          * cpusets (different root_domains) entails a bandwidth
2244          * update. We already made space for us in the destination
2245          * domain (see cpuset_can_attach()).
2246          */
2247         if (!cpumask_intersects(src_rd->span, new_mask)) {
2248                 struct dl_bw *src_dl_b;
2249 
2250                 src_dl_b = dl_bw_of(cpu_of(rq));
2251                 /*
2252                  * We now free resources of the root_domain we are migrating
2253                  * off. In the worst case, sched_setattr() may temporary fail
2254                  * until we complete the update.
2255                  */
2256                 raw_spin_lock(&src_dl_b->lock);
2257                 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
2258                 raw_spin_unlock(&src_dl_b->lock);
2259         }
2260 
2261         set_cpus_allowed_common(p, new_mask);
2262 }
2263 
2264 /* Assumes rq->lock is held */
2265 static void rq_online_dl(struct rq *rq)
2266 {
2267         if (rq->dl.overloaded)
2268                 dl_set_overload(rq);
2269 
2270         cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
2271         if (rq->dl.dl_nr_running > 0)
2272                 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
2273 }
2274 
2275 /* Assumes rq->lock is held */
2276 static void rq_offline_dl(struct rq *rq)
2277 {
2278         if (rq->dl.overloaded)
2279                 dl_clear_overload(rq);
2280 
2281         cpudl_clear(&rq->rd->cpudl, rq->cpu);
2282         cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
2283 }
2284 
2285 void __init init_sched_dl_class(void)
2286 {
2287         unsigned int i;
2288 
2289         for_each_possible_cpu(i)
2290                 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
2291                                         GFP_KERNEL, cpu_to_node(i));
2292 }
2293 
2294 void dl_add_task_root_domain(struct task_struct *p)
2295 {
2296         struct rq_flags rf;
2297         struct rq *rq;
2298         struct dl_bw *dl_b;
2299 
2300         rq = task_rq_lock(p, &rf);
2301         if (!dl_task(p))
2302                 goto unlock;
2303 
2304         dl_b = &rq->rd->dl_bw;
2305         raw_spin_lock(&dl_b->lock);
2306 
2307         __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
2308 
2309         raw_spin_unlock(&dl_b->lock);
2310 
2311 unlock:
2312         task_rq_unlock(rq, p, &rf);
2313 }
2314 
2315 void dl_clear_root_domain(struct root_domain *rd)
2316 {
2317         unsigned long flags;
2318 
2319         raw_spin_lock_irqsave(&rd->dl_bw.lock, flags);
2320         rd->dl_bw.total_bw = 0;
2321         raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags);
2322 }
2323 
2324 #endif /* CONFIG_SMP */
2325 
2326 static void switched_from_dl(struct rq *rq, struct task_struct *p)
2327 {
2328         /*
2329          * task_non_contending() can start the "inactive timer" (if the 0-lag
2330          * time is in the future). If the task switches back to dl before
2331          * the "inactive timer" fires, it can continue to consume its current
2332          * runtime using its current deadline. If it stays outside of
2333          * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer()
2334          * will reset the task parameters.
2335          */
2336         if (task_on_rq_queued(p) && p->dl.dl_runtime)
2337                 task_non_contending(p);
2338 
2339         if (!task_on_rq_queued(p)) {
2340                 /*
2341                  * Inactive timer is armed. However, p is leaving DEADLINE and
2342                  * might migrate away from this rq while continuing to run on
2343                  * some other class. We need to remove its contribution from
2344                  * this rq running_bw now, or sub_rq_bw (below) will complain.
2345                  */
2346                 if (p->dl.dl_non_contending)
2347                         sub_running_bw(&p->dl, &rq->dl);
2348                 sub_rq_bw(&p->dl, &rq->dl);
2349         }
2350 
2351         /*
2352          * We cannot use inactive_task_timer() to invoke sub_running_bw()
2353          * at the 0-lag time, because the task could have been migrated
2354          * while SCHED_OTHER in the meanwhile.
2355          */
2356         if (p->dl.dl_non_contending)
2357                 p->dl.dl_non_contending = 0;
2358 
2359         /*
2360          * Since this might be the only -deadline task on the rq,
2361          * this is the right place to try to pull some other one
2362          * from an overloaded CPU, if any.
2363          */
2364         if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
2365                 return;
2366 
2367         deadline_queue_pull_task(rq);
2368 }
2369 
2370 /*
2371  * When switching to -deadline, we may overload the rq, then
2372  * we try to push someone off, if possible.
2373  */
2374 static void switched_to_dl(struct rq *rq, struct task_struct *p)
2375 {
2376         if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
2377                 put_task_struct(p);
2378 
2379         /* If p is not queued we will update its parameters at next wakeup. */
2380         if (!task_on_rq_queued(p)) {
2381                 add_rq_bw(&p->dl, &rq->dl);
2382 
2383                 return;
2384         }
2385 
2386         if (rq->curr != p) {
2387 #ifdef CONFIG_SMP
2388                 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
2389                         deadline_queue_push_tasks(rq);
2390 #endif
2391                 if (dl_task(rq->curr))
2392                         check_preempt_curr_dl(rq, p, 0);
2393                 else
2394                         resched_curr(rq);
2395         }
2396 }
2397 
2398 /*
2399  * If the scheduling parameters of a -deadline task changed,
2400  * a push or pull operation might be needed.
2401  */
2402 static void prio_changed_dl(struct rq *rq, struct task_struct *p,
2403                             int oldprio)
2404 {
2405         if (task_on_rq_queued(p) || rq->curr == p) {
2406 #ifdef CONFIG_SMP
2407                 /*
2408                  * This might be too much, but unfortunately
2409                  * we don't have the old deadline value, and
2410                  * we can't argue if the task is increasing
2411                  * or lowering its prio, so...
2412                  */
2413                 if (!rq->dl.overloaded)
2414                         deadline_queue_pull_task(rq);
2415 
2416                 /*
2417                  * If we now have a earlier deadline task than p,
2418                  * then reschedule, provided p is still on this
2419                  * runqueue.
2420                  */
2421                 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
2422                         resched_curr(rq);
2423 #else
2424                 /*
2425                  * Again, we don't know if p has a earlier
2426                  * or later deadline, so let's blindly set a
2427                  * (maybe not needed) rescheduling point.
2428                  */
2429                 resched_curr(rq);
2430 #endif /* CONFIG_SMP */
2431         }
2432 }
2433 
2434 const struct sched_class dl_sched_class = {
2435         .next                   = &rt_sched_class,
2436         .enqueue_task           = enqueue_task_dl,
2437         .dequeue_task           = dequeue_task_dl,
2438         .yield_task             = yield_task_dl,
2439 
2440         .check_preempt_curr     = check_preempt_curr_dl,
2441 
2442         .pick_next_task         = pick_next_task_dl,
2443         .put_prev_task          = put_prev_task_dl,
2444         .set_next_task          = set_next_task_dl,
2445 
2446 #ifdef CONFIG_SMP
2447         .balance                = balance_dl,
2448         .select_task_rq         = select_task_rq_dl,
2449         .migrate_task_rq        = migrate_task_rq_dl,
2450         .set_cpus_allowed       = set_cpus_allowed_dl,
2451         .rq_online              = rq_online_dl,
2452         .rq_offline             = rq_offline_dl,
2453         .task_woken             = task_woken_dl,
2454 #endif
2455 
2456         .task_tick              = task_tick_dl,
2457         .task_fork              = task_fork_dl,
2458 
2459         .prio_changed           = prio_changed_dl,
2460         .switched_from          = switched_from_dl,
2461         .switched_to            = switched_to_dl,
2462 
2463         .update_curr            = update_curr_dl,
2464 };
2465 
2466 int sched_dl_global_validate(void)
2467 {
2468         u64 runtime = global_rt_runtime();
2469         u64 period = global_rt_period();
2470         u64 new_bw = to_ratio(period, runtime);
2471         struct dl_bw *dl_b;
2472         int cpu, ret = 0;
2473         unsigned long flags;
2474 
2475         /*
2476          * Here we want to check the bandwidth not being set to some
2477          * value smaller than the currently allocated bandwidth in
2478          * any of the root_domains.
2479          *
2480          * FIXME: Cycling on all the CPUs is overdoing, but simpler than
2481          * cycling on root_domains... Discussion on different/better
2482          * solutions is welcome!
2483          */
2484         for_each_possible_cpu(cpu) {
2485                 rcu_read_lock_sched();
2486                 dl_b = dl_bw_of(cpu);
2487 
2488                 raw_spin_lock_irqsave(&dl_b->lock, flags);
2489                 if (new_bw < dl_b->total_bw)
2490                         ret = -EBUSY;
2491                 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2492 
2493                 rcu_read_unlock_sched();
2494 
2495                 if (ret)
2496                         break;
2497         }
2498 
2499         return ret;
2500 }
2501 
2502 void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
2503 {
2504         if (global_rt_runtime() == RUNTIME_INF) {
2505                 dl_rq->bw_ratio = 1 << RATIO_SHIFT;
2506                 dl_rq->extra_bw = 1 << BW_SHIFT;
2507         } else {
2508                 dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
2509                           global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
2510                 dl_rq->extra_bw = to_ratio(global_rt_period(),
2511                                                     global_rt_runtime());
2512         }
2513 }
2514 
2515 void sched_dl_do_global(void)
2516 {
2517         u64 new_bw = -1;
2518         struct dl_bw *dl_b;
2519         int cpu;
2520         unsigned long flags;
2521 
2522         def_dl_bandwidth.dl_period = global_rt_period();
2523         def_dl_bandwidth.dl_runtime = global_rt_runtime();
2524 
2525         if (global_rt_runtime() != RUNTIME_INF)
2526                 new_bw = to_ratio(global_rt_period(), global_rt_runtime());
2527 
2528         /*
2529          * FIXME: As above...
2530          */
2531         for_each_possible_cpu(cpu) {
2532                 rcu_read_lock_sched();
2533                 dl_b = dl_bw_of(cpu);
2534 
2535                 raw_spin_lock_irqsave(&dl_b->lock, flags);
2536                 dl_b->bw = new_bw;
2537                 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2538 
2539                 rcu_read_unlock_sched();
2540                 init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
2541         }
2542 }
2543 
2544 /*
2545  * We must be sure that accepting a new task (or allowing changing the
2546  * parameters of an existing one) is consistent with the bandwidth
2547  * constraints. If yes, this function also accordingly updates the currently
2548  * allocated bandwidth to reflect the new situation.
2549  *
2550  * This function is called while holding p's rq->lock.
2551  */
2552 int sched_dl_overflow(struct task_struct *p, int policy,
2553                       const struct sched_attr *attr)
2554 {
2555         struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
2556         u64 period = attr->sched_period ?: attr->sched_deadline;
2557         u64 runtime = attr->sched_runtime;
2558         u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
2559         int cpus, err = -1;
2560 
2561         if (attr->sched_flags & SCHED_FLAG_SUGOV)
2562                 return 0;
2563 
2564         /* !deadline task may carry old deadline bandwidth */
2565         if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
2566                 return 0;
2567 
2568         /*
2569          * Either if a task, enters, leave, or stays -deadline but changes
2570          * its parameters, we may need to update accordingly the total
2571          * allocated bandwidth of the container.
2572          */
2573         raw_spin_lock(&dl_b->lock);
2574         cpus = dl_bw_cpus(task_cpu(p));
2575         if (dl_policy(policy) && !task_has_dl_policy(p) &&
2576             !__dl_overflow(dl_b, cpus, 0, new_bw)) {
2577                 if (hrtimer_active(&p->dl.inactive_timer))
2578                         __dl_sub(dl_b, p->dl.dl_bw, cpus);
2579                 __dl_add(dl_b, new_bw, cpus);
2580                 err = 0;
2581         } else if (dl_policy(policy) && task_has_dl_policy(p) &&
2582                    !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
2583                 /*
2584                  * XXX this is slightly incorrect: when the task
2585                  * utilization decreases, we should delay the total
2586                  * utilization change until the task's 0-lag point.
2587                  * But this would require to set the task's "inactive
2588                  * timer" when the task is not inactive.
2589                  */
2590                 __dl_sub(dl_b, p->dl.dl_bw, cpus);
2591                 __dl_add(dl_b, new_bw, cpus);
2592                 dl_change_utilization(p, new_bw);
2593                 err = 0;
2594         } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
2595                 /*
2596                  * Do not decrease the total deadline utilization here,
2597                  * switched_from_dl() will take care to do it at the correct
2598                  * (0-lag) time.
2599                  */
2600                 err = 0;
2601         }
2602         raw_spin_unlock(&dl_b->lock);
2603 
2604         return err;
2605 }
2606 
2607 /*
2608  * This function initializes the sched_dl_entity of a newly becoming
2609  * SCHED_DEADLINE task.
2610  *
2611  * Only the static values are considered here, the actual runtime and the
2612  * absolute deadline will be properly calculated when the task is enqueued
2613  * for the first time with its new policy.
2614  */
2615 void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
2616 {
2617         struct sched_dl_entity *dl_se = &p->dl;
2618 
2619         dl_se->dl_runtime = attr->sched_runtime;
2620         dl_se->dl_deadline = attr->sched_deadline;
2621         dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
2622         dl_se->flags = attr->sched_flags;
2623         dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
2624         dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
2625 }
2626 
2627 void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
2628 {
2629         struct sched_dl_entity *dl_se = &p->dl;
2630 
2631         attr->sched_priority = p->rt_priority;
2632         attr->sched_runtime = dl_se->dl_runtime;
2633         attr->sched_deadline = dl_se->dl_deadline;
2634         attr->sched_period = dl_se->dl_period;
2635         attr->sched_flags = dl_se->flags;
2636 }
2637 
2638 /*
2639  * This function validates the new parameters of a -deadline task.
2640  * We ask for the deadline not being zero, and greater or equal
2641  * than the runtime, as well as the period of being zero or
2642  * greater than deadline. Furthermore, we have to be sure that
2643  * user parameters are above the internal resolution of 1us (we
2644  * check sched_runtime only since it is always the smaller one) and
2645  * below 2^63 ns (we have to check both sched_deadline and
2646  * sched_period, as the latter can be zero).
2647  */
2648 bool __checkparam_dl(const struct sched_attr *attr)
2649 {
2650         /* special dl tasks don't actually use any parameter */
2651         if (attr->sched_flags & SCHED_FLAG_SUGOV)
2652                 return true;
2653 
2654         /* deadline != 0 */
2655         if (attr->sched_deadline == 0)
2656                 return false;
2657 
2658         /*
2659          * Since we truncate DL_SCALE bits, make sure we're at least
2660          * that big.
2661          */
2662         if (attr->sched_runtime < (1ULL << DL_SCALE))
2663                 return false;
2664 
2665         /*
2666          * Since we use the MSB for wrap-around and sign issues, make
2667          * sure it's not set (mind that period can be equal to zero).
2668          */
2669         if (attr->sched_deadline & (1ULL << 63) ||
2670             attr->sched_period & (1ULL << 63))
2671                 return false;
2672 
2673         /* runtime <= deadline <= period (if period != 0) */
2674         if ((attr->sched_period != 0 &&
2675              attr->sched_period < attr->sched_deadline) ||
2676             attr->sched_deadline < attr->sched_runtime)
2677                 return false;
2678 
2679         return true;
2680 }
2681 
2682 /*
2683  * This function clears the sched_dl_entity static params.
2684  */
2685 void __dl_clear_params(struct task_struct *p)
2686 {
2687         struct sched_dl_entity *dl_se = &p->dl;
2688 
2689         dl_se->dl_runtime               = 0;
2690         dl_se->dl_deadline              = 0;
2691         dl_se->dl_period                = 0;
2692         dl_se->flags                    = 0;
2693         dl_se->dl_bw                    = 0;
2694         dl_se->dl_density               = 0;
2695 
2696         dl_se->dl_throttled             = 0;
2697         dl_se->dl_yielded               = 0;
2698         dl_se->dl_non_contending        = 0;
2699         dl_se->dl_overrun               = 0;
2700 }
2701 
2702 bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
2703 {
2704         struct sched_dl_entity *dl_se = &p->dl;
2705 
2706         if (dl_se->dl_runtime != attr->sched_runtime ||
2707             dl_se->dl_deadline != attr->sched_deadline ||
2708             dl_se->dl_period != attr->sched_period ||
2709             dl_se->flags != attr->sched_flags)
2710                 return true;
2711 
2712         return false;
2713 }
2714 
2715 #ifdef CONFIG_SMP
2716 int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed)
2717 {
2718         unsigned int dest_cpu;
2719         struct dl_bw *dl_b;
2720         bool overflow;
2721         int cpus, ret;
2722         unsigned long flags;
2723 
2724         dest_cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed);
2725 
2726         rcu_read_lock_sched();
2727         dl_b = dl_bw_of(dest_cpu);
2728         raw_spin_lock_irqsave(&dl_b->lock, flags);
2729         cpus = dl_bw_cpus(dest_cpu);
2730         overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
2731         if (overflow) {
2732                 ret = -EBUSY;
2733         } else {
2734                 /*
2735                  * We reserve space for this task in the destination
2736                  * root_domain, as we can't fail after this point.
2737                  * We will free resources in the source root_domain
2738                  * later on (see set_cpus_allowed_dl()).
2739                  */
2740                 __dl_add(dl_b, p->dl.dl_bw, cpus);
2741                 ret = 0;
2742         }
2743         raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2744         rcu_read_unlock_sched();
2745 
2746         return ret;
2747 }
2748 
2749 int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
2750                                  const struct cpumask *trial)
2751 {
2752         int ret = 1, trial_cpus;
2753         struct dl_bw *cur_dl_b;
2754         unsigned long flags;
2755 
2756         rcu_read_lock_sched();
2757         cur_dl_b = dl_bw_of(cpumask_any(cur));
2758         trial_cpus = cpumask_weight(trial);
2759 
2760         raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
2761         if (cur_dl_b->bw != -1 &&
2762             cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
2763                 ret = 0;
2764         raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
2765         rcu_read_unlock_sched();
2766 
2767         return ret;
2768 }
2769 
2770 bool dl_cpu_busy(unsigned int cpu)
2771 {
2772         unsigned long flags;
2773         struct dl_bw *dl_b;
2774         bool overflow;
2775         int cpus;
2776 
2777         rcu_read_lock_sched();
2778         dl_b = dl_bw_of(cpu);
2779         raw_spin_lock_irqsave(&dl_b->lock, flags);
2780         cpus = dl_bw_cpus(cpu);
2781         overflow = __dl_overflow(dl_b, cpus, 0, 0);
2782         raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2783         rcu_read_unlock_sched();
2784 
2785         return overflow;
2786 }
2787 #endif
2788 
2789 #ifdef CONFIG_SCHED_DEBUG
2790 void print_dl_stats(struct seq_file *m, int cpu)
2791 {
2792         print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
2793 }
2794 #endif /* CONFIG_SCHED_DEBUG */

/* [<][>][^][v][top][bottom][index][help] */