This source file includes following definitions.
- dl_task_of
- rq_of_dl_rq
- dl_rq_of_se
- on_dl_rq
- dl_bw_of
- dl_bw_cpus
- dl_bw_of
- dl_bw_cpus
- __add_running_bw
- __sub_running_bw
- __add_rq_bw
- __sub_rq_bw
- add_rq_bw
- sub_rq_bw
- add_running_bw
- sub_running_bw
- dl_change_utilization
- task_non_contending
- task_contending
- is_leftmost
- init_dl_bandwidth
- init_dl_bw
- init_dl_rq
- dl_overloaded
- dl_set_overload
- dl_clear_overload
- update_dl_migration
- inc_dl_migration
- dec_dl_migration
- enqueue_pushable_dl_task
- dequeue_pushable_dl_task
- has_pushable_dl_tasks
- need_pull_dl_task
- deadline_queue_push_tasks
- deadline_queue_pull_task
- dl_task_offline_migration
- enqueue_pushable_dl_task
- dequeue_pushable_dl_task
- inc_dl_migration
- dec_dl_migration
- need_pull_dl_task
- pull_dl_task
- deadline_queue_push_tasks
- deadline_queue_pull_task
- setup_new_dl_entity
- replenish_dl_entity
- dl_entity_overflow
- update_dl_revised_wakeup
- dl_is_implicit
- update_dl_entity
- dl_next_period
- start_dl_timer
- dl_task_timer
- init_dl_task_timer
- dl_check_constrained_dl
- dl_runtime_exceeded
- grub_reclaim
- update_curr_dl
- inactive_task_timer
- init_dl_inactive_task_timer
- inc_dl_deadline
- dec_dl_deadline
- inc_dl_deadline
- dec_dl_deadline
- inc_dl_tasks
- dec_dl_tasks
- __enqueue_dl_entity
- __dequeue_dl_entity
- enqueue_dl_entity
- dequeue_dl_entity
- enqueue_task_dl
- __dequeue_task_dl
- dequeue_task_dl
- yield_task_dl
- select_task_rq_dl
- migrate_task_rq_dl
- check_preempt_equal_dl
- balance_dl
- check_preempt_curr_dl
- start_hrtick_dl
- start_hrtick_dl
- set_next_task_dl
- pick_next_dl_entity
- pick_next_task_dl
- put_prev_task_dl
- task_tick_dl
- task_fork_dl
- pick_dl_task
- pick_earliest_pushable_dl_task
- find_later_rq
- find_lock_later_rq
- pick_next_pushable_dl_task
- push_dl_task
- push_dl_tasks
- pull_dl_task
- task_woken_dl
- set_cpus_allowed_dl
- rq_online_dl
- rq_offline_dl
- init_sched_dl_class
- dl_add_task_root_domain
- dl_clear_root_domain
- switched_from_dl
- switched_to_dl
- prio_changed_dl
- sched_dl_global_validate
- init_dl_rq_bw_ratio
- sched_dl_do_global
- sched_dl_overflow
- __setparam_dl
- __getparam_dl
- __checkparam_dl
- __dl_clear_params
- dl_param_changed
- dl_task_can_attach
- dl_cpuset_cpumask_can_shrink
- dl_cpu_busy
- print_dl_stats
   1 
   2 
   3 
   4 
   5 
   6 
   7 
   8 
   9 
  10 
  11 
  12 
  13 
  14 
  15 
  16 
  17 
  18 #include "sched.h"
  19 #include "pelt.h"
  20 
  21 struct dl_bandwidth def_dl_bandwidth;
  22 
  23 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
  24 {
  25         return container_of(dl_se, struct task_struct, dl);
  26 }
  27 
  28 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
  29 {
  30         return container_of(dl_rq, struct rq, dl);
  31 }
  32 
  33 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
  34 {
  35         struct task_struct *p = dl_task_of(dl_se);
  36         struct rq *rq = task_rq(p);
  37 
  38         return &rq->dl;
  39 }
  40 
  41 static inline int on_dl_rq(struct sched_dl_entity *dl_se)
  42 {
  43         return !RB_EMPTY_NODE(&dl_se->rb_node);
  44 }
  45 
  46 #ifdef CONFIG_SMP
  47 static inline struct dl_bw *dl_bw_of(int i)
  48 {
  49         RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
  50                          "sched RCU must be held");
  51         return &cpu_rq(i)->rd->dl_bw;
  52 }
  53 
  54 static inline int dl_bw_cpus(int i)
  55 {
  56         struct root_domain *rd = cpu_rq(i)->rd;
  57         int cpus = 0;
  58 
  59         RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
  60                          "sched RCU must be held");
  61         for_each_cpu_and(i, rd->span, cpu_active_mask)
  62                 cpus++;
  63 
  64         return cpus;
  65 }
  66 #else
  67 static inline struct dl_bw *dl_bw_of(int i)
  68 {
  69         return &cpu_rq(i)->dl.dl_bw;
  70 }
  71 
  72 static inline int dl_bw_cpus(int i)
  73 {
  74         return 1;
  75 }
  76 #endif
  77 
  78 static inline
  79 void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
  80 {
  81         u64 old = dl_rq->running_bw;
  82 
  83         lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
  84         dl_rq->running_bw += dl_bw;
  85         SCHED_WARN_ON(dl_rq->running_bw < old); 
  86         SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
  87         
  88         cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
  89 }
  90 
  91 static inline
  92 void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
  93 {
  94         u64 old = dl_rq->running_bw;
  95 
  96         lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
  97         dl_rq->running_bw -= dl_bw;
  98         SCHED_WARN_ON(dl_rq->running_bw > old); 
  99         if (dl_rq->running_bw > old)
 100                 dl_rq->running_bw = 0;
 101         
 102         cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
 103 }
 104 
 105 static inline
 106 void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
 107 {
 108         u64 old = dl_rq->this_bw;
 109 
 110         lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
 111         dl_rq->this_bw += dl_bw;
 112         SCHED_WARN_ON(dl_rq->this_bw < old); 
 113 }
 114 
 115 static inline
 116 void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
 117 {
 118         u64 old = dl_rq->this_bw;
 119 
 120         lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
 121         dl_rq->this_bw -= dl_bw;
 122         SCHED_WARN_ON(dl_rq->this_bw > old); 
 123         if (dl_rq->this_bw > old)
 124                 dl_rq->this_bw = 0;
 125         SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
 126 }
 127 
 128 static inline
 129 void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 130 {
 131         if (!dl_entity_is_special(dl_se))
 132                 __add_rq_bw(dl_se->dl_bw, dl_rq);
 133 }
 134 
 135 static inline
 136 void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 137 {
 138         if (!dl_entity_is_special(dl_se))
 139                 __sub_rq_bw(dl_se->dl_bw, dl_rq);
 140 }
 141 
 142 static inline
 143 void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 144 {
 145         if (!dl_entity_is_special(dl_se))
 146                 __add_running_bw(dl_se->dl_bw, dl_rq);
 147 }
 148 
 149 static inline
 150 void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 151 {
 152         if (!dl_entity_is_special(dl_se))
 153                 __sub_running_bw(dl_se->dl_bw, dl_rq);
 154 }
 155 
 156 void dl_change_utilization(struct task_struct *p, u64 new_bw)
 157 {
 158         struct rq *rq;
 159 
 160         BUG_ON(p->dl.flags & SCHED_FLAG_SUGOV);
 161 
 162         if (task_on_rq_queued(p))
 163                 return;
 164 
 165         rq = task_rq(p);
 166         if (p->dl.dl_non_contending) {
 167                 sub_running_bw(&p->dl, &rq->dl);
 168                 p->dl.dl_non_contending = 0;
 169                 
 170 
 171 
 172 
 173 
 174 
 175 
 176                 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
 177                         put_task_struct(p);
 178         }
 179         __sub_rq_bw(p->dl.dl_bw, &rq->dl);
 180         __add_rq_bw(new_bw, &rq->dl);
 181 }
 182 
 183 
 184 
 185 
 186 
 187 
 188 
 189 
 190 
 191 
 192 
 193 
 194 
 195 
 196 
 197 
 198 
 199 
 200 
 201 
 202 
 203 
 204 
 205 
 206 
 207 
 208 
 209 
 210 
 211 
 212 
 213 
 214 
 215 
 216 
 217 
 218 
 219 
 220 
 221 
 222 
 223 
 224 
 225 
 226 
 227 
 228 
 229 
 230 
 231 
 232 
 233 
 234 
 235 
 236 
 237 static void task_non_contending(struct task_struct *p)
 238 {
 239         struct sched_dl_entity *dl_se = &p->dl;
 240         struct hrtimer *timer = &dl_se->inactive_timer;
 241         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 242         struct rq *rq = rq_of_dl_rq(dl_rq);
 243         s64 zerolag_time;
 244 
 245         
 246 
 247 
 248 
 249         if (dl_se->dl_runtime == 0)
 250                 return;
 251 
 252         if (dl_entity_is_special(dl_se))
 253                 return;
 254 
 255         WARN_ON(dl_se->dl_non_contending);
 256 
 257         zerolag_time = dl_se->deadline -
 258                  div64_long((dl_se->runtime * dl_se->dl_period),
 259                         dl_se->dl_runtime);
 260 
 261         
 262 
 263 
 264 
 265         zerolag_time -= rq_clock(rq);
 266 
 267         
 268 
 269 
 270 
 271         if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
 272                 if (dl_task(p))
 273                         sub_running_bw(dl_se, dl_rq);
 274                 if (!dl_task(p) || p->state == TASK_DEAD) {
 275                         struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
 276 
 277                         if (p->state == TASK_DEAD)
 278                                 sub_rq_bw(&p->dl, &rq->dl);
 279                         raw_spin_lock(&dl_b->lock);
 280                         __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
 281                         __dl_clear_params(p);
 282                         raw_spin_unlock(&dl_b->lock);
 283                 }
 284 
 285                 return;
 286         }
 287 
 288         dl_se->dl_non_contending = 1;
 289         get_task_struct(p);
 290         hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD);
 291 }
 292 
 293 static void task_contending(struct sched_dl_entity *dl_se, int flags)
 294 {
 295         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 296 
 297         
 298 
 299 
 300 
 301         if (dl_se->dl_runtime == 0)
 302                 return;
 303 
 304         if (flags & ENQUEUE_MIGRATED)
 305                 add_rq_bw(dl_se, dl_rq);
 306 
 307         if (dl_se->dl_non_contending) {
 308                 dl_se->dl_non_contending = 0;
 309                 
 310 
 311 
 312 
 313 
 314 
 315 
 316                 if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1)
 317                         put_task_struct(dl_task_of(dl_se));
 318         } else {
 319                 
 320 
 321 
 322 
 323 
 324 
 325 
 326                 add_running_bw(dl_se, dl_rq);
 327         }
 328 }
 329 
 330 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
 331 {
 332         struct sched_dl_entity *dl_se = &p->dl;
 333 
 334         return dl_rq->root.rb_leftmost == &dl_se->rb_node;
 335 }
 336 
 337 void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
 338 {
 339         raw_spin_lock_init(&dl_b->dl_runtime_lock);
 340         dl_b->dl_period = period;
 341         dl_b->dl_runtime = runtime;
 342 }
 343 
 344 void init_dl_bw(struct dl_bw *dl_b)
 345 {
 346         raw_spin_lock_init(&dl_b->lock);
 347         raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
 348         if (global_rt_runtime() == RUNTIME_INF)
 349                 dl_b->bw = -1;
 350         else
 351                 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
 352         raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
 353         dl_b->total_bw = 0;
 354 }
 355 
 356 void init_dl_rq(struct dl_rq *dl_rq)
 357 {
 358         dl_rq->root = RB_ROOT_CACHED;
 359 
 360 #ifdef CONFIG_SMP
 361         
 362         dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
 363 
 364         dl_rq->dl_nr_migratory = 0;
 365         dl_rq->overloaded = 0;
 366         dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
 367 #else
 368         init_dl_bw(&dl_rq->dl_bw);
 369 #endif
 370 
 371         dl_rq->running_bw = 0;
 372         dl_rq->this_bw = 0;
 373         init_dl_rq_bw_ratio(dl_rq);
 374 }
 375 
 376 #ifdef CONFIG_SMP
 377 
 378 static inline int dl_overloaded(struct rq *rq)
 379 {
 380         return atomic_read(&rq->rd->dlo_count);
 381 }
 382 
 383 static inline void dl_set_overload(struct rq *rq)
 384 {
 385         if (!rq->online)
 386                 return;
 387 
 388         cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
 389         
 390 
 391 
 392 
 393 
 394 
 395         smp_wmb();
 396         atomic_inc(&rq->rd->dlo_count);
 397 }
 398 
 399 static inline void dl_clear_overload(struct rq *rq)
 400 {
 401         if (!rq->online)
 402                 return;
 403 
 404         atomic_dec(&rq->rd->dlo_count);
 405         cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
 406 }
 407 
 408 static void update_dl_migration(struct dl_rq *dl_rq)
 409 {
 410         if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
 411                 if (!dl_rq->overloaded) {
 412                         dl_set_overload(rq_of_dl_rq(dl_rq));
 413                         dl_rq->overloaded = 1;
 414                 }
 415         } else if (dl_rq->overloaded) {
 416                 dl_clear_overload(rq_of_dl_rq(dl_rq));
 417                 dl_rq->overloaded = 0;
 418         }
 419 }
 420 
 421 static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 422 {
 423         struct task_struct *p = dl_task_of(dl_se);
 424 
 425         if (p->nr_cpus_allowed > 1)
 426                 dl_rq->dl_nr_migratory++;
 427 
 428         update_dl_migration(dl_rq);
 429 }
 430 
 431 static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 432 {
 433         struct task_struct *p = dl_task_of(dl_se);
 434 
 435         if (p->nr_cpus_allowed > 1)
 436                 dl_rq->dl_nr_migratory--;
 437 
 438         update_dl_migration(dl_rq);
 439 }
 440 
 441 
 442 
 443 
 444 
 445 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 446 {
 447         struct dl_rq *dl_rq = &rq->dl;
 448         struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_root.rb_node;
 449         struct rb_node *parent = NULL;
 450         struct task_struct *entry;
 451         bool leftmost = true;
 452 
 453         BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
 454 
 455         while (*link) {
 456                 parent = *link;
 457                 entry = rb_entry(parent, struct task_struct,
 458                                  pushable_dl_tasks);
 459                 if (dl_entity_preempt(&p->dl, &entry->dl))
 460                         link = &parent->rb_left;
 461                 else {
 462                         link = &parent->rb_right;
 463                         leftmost = false;
 464                 }
 465         }
 466 
 467         if (leftmost)
 468                 dl_rq->earliest_dl.next = p->dl.deadline;
 469 
 470         rb_link_node(&p->pushable_dl_tasks, parent, link);
 471         rb_insert_color_cached(&p->pushable_dl_tasks,
 472                                &dl_rq->pushable_dl_tasks_root, leftmost);
 473 }
 474 
 475 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 476 {
 477         struct dl_rq *dl_rq = &rq->dl;
 478 
 479         if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
 480                 return;
 481 
 482         if (dl_rq->pushable_dl_tasks_root.rb_leftmost == &p->pushable_dl_tasks) {
 483                 struct rb_node *next_node;
 484 
 485                 next_node = rb_next(&p->pushable_dl_tasks);
 486                 if (next_node) {
 487                         dl_rq->earliest_dl.next = rb_entry(next_node,
 488                                 struct task_struct, pushable_dl_tasks)->dl.deadline;
 489                 }
 490         }
 491 
 492         rb_erase_cached(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
 493         RB_CLEAR_NODE(&p->pushable_dl_tasks);
 494 }
 495 
 496 static inline int has_pushable_dl_tasks(struct rq *rq)
 497 {
 498         return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
 499 }
 500 
 501 static int push_dl_task(struct rq *rq);
 502 
 503 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
 504 {
 505         return dl_task(prev);
 506 }
 507 
 508 static DEFINE_PER_CPU(struct callback_head, dl_push_head);
 509 static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
 510 
 511 static void push_dl_tasks(struct rq *);
 512 static void pull_dl_task(struct rq *);
 513 
 514 static inline void deadline_queue_push_tasks(struct rq *rq)
 515 {
 516         if (!has_pushable_dl_tasks(rq))
 517                 return;
 518 
 519         queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
 520 }
 521 
 522 static inline void deadline_queue_pull_task(struct rq *rq)
 523 {
 524         queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
 525 }
 526 
 527 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
 528 
 529 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
 530 {
 531         struct rq *later_rq = NULL;
 532         struct dl_bw *dl_b;
 533 
 534         later_rq = find_lock_later_rq(p, rq);
 535         if (!later_rq) {
 536                 int cpu;
 537 
 538                 
 539 
 540 
 541 
 542                 cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr);
 543                 if (cpu >= nr_cpu_ids) {
 544                         
 545 
 546 
 547 
 548                         BUG_ON(dl_bandwidth_enabled());
 549 
 550                         
 551 
 552 
 553 
 554 
 555                         cpu = cpumask_any(cpu_active_mask);
 556                 }
 557                 later_rq = cpu_rq(cpu);
 558                 double_lock_balance(rq, later_rq);
 559         }
 560 
 561         if (p->dl.dl_non_contending || p->dl.dl_throttled) {
 562                 
 563 
 564 
 565 
 566 
 567 
 568                 sub_running_bw(&p->dl, &rq->dl);
 569                 sub_rq_bw(&p->dl, &rq->dl);
 570 
 571                 add_rq_bw(&p->dl, &later_rq->dl);
 572                 add_running_bw(&p->dl, &later_rq->dl);
 573         } else {
 574                 sub_rq_bw(&p->dl, &rq->dl);
 575                 add_rq_bw(&p->dl, &later_rq->dl);
 576         }
 577 
 578         
 579 
 580 
 581 
 582 
 583         dl_b = &rq->rd->dl_bw;
 584         raw_spin_lock(&dl_b->lock);
 585         __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
 586         raw_spin_unlock(&dl_b->lock);
 587 
 588         dl_b = &later_rq->rd->dl_bw;
 589         raw_spin_lock(&dl_b->lock);
 590         __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
 591         raw_spin_unlock(&dl_b->lock);
 592 
 593         set_task_cpu(p, later_rq->cpu);
 594         double_unlock_balance(later_rq, rq);
 595 
 596         return later_rq;
 597 }
 598 
 599 #else
 600 
 601 static inline
 602 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 603 {
 604 }
 605 
 606 static inline
 607 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
 608 {
 609 }
 610 
 611 static inline
 612 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 613 {
 614 }
 615 
 616 static inline
 617 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 618 {
 619 }
 620 
 621 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
 622 {
 623         return false;
 624 }
 625 
 626 static inline void pull_dl_task(struct rq *rq)
 627 {
 628 }
 629 
 630 static inline void deadline_queue_push_tasks(struct rq *rq)
 631 {
 632 }
 633 
 634 static inline void deadline_queue_pull_task(struct rq *rq)
 635 {
 636 }
 637 #endif 
 638 
 639 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
 640 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
 641 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags);
 642 
 643 
 644 
 645 
 646 
 647 
 648 
 649 
 650 
 651 
 652 
 653 
 654 
 655 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
 656 {
 657         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 658         struct rq *rq = rq_of_dl_rq(dl_rq);
 659 
 660         WARN_ON(dl_se->dl_boosted);
 661         WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
 662 
 663         
 664 
 665 
 666 
 667 
 668         if (dl_se->dl_throttled)
 669                 return;
 670 
 671         
 672 
 673 
 674 
 675 
 676         dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline;
 677         dl_se->runtime = dl_se->dl_runtime;
 678 }
 679 
 680 
 681 
 682 
 683 
 684 
 685 
 686 
 687 
 688 
 689 
 690 
 691 
 692 
 693 
 694 
 695 
 696 
 697 
 698 static void replenish_dl_entity(struct sched_dl_entity *dl_se,
 699                                 struct sched_dl_entity *pi_se)
 700 {
 701         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 702         struct rq *rq = rq_of_dl_rq(dl_rq);
 703 
 704         BUG_ON(pi_se->dl_runtime <= 0);
 705 
 706         
 707 
 708 
 709 
 710         if (dl_se->dl_deadline == 0) {
 711                 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
 712                 dl_se->runtime = pi_se->dl_runtime;
 713         }
 714 
 715         if (dl_se->dl_yielded && dl_se->runtime > 0)
 716                 dl_se->runtime = 0;
 717 
 718         
 719 
 720 
 721 
 722 
 723 
 724         while (dl_se->runtime <= 0) {
 725                 dl_se->deadline += pi_se->dl_period;
 726                 dl_se->runtime += pi_se->dl_runtime;
 727         }
 728 
 729         
 730 
 731 
 732 
 733 
 734 
 735 
 736 
 737 
 738         if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
 739                 printk_deferred_once("sched: DL replenish lagged too much\n");
 740                 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
 741                 dl_se->runtime = pi_se->dl_runtime;
 742         }
 743 
 744         if (dl_se->dl_yielded)
 745                 dl_se->dl_yielded = 0;
 746         if (dl_se->dl_throttled)
 747                 dl_se->dl_throttled = 0;
 748 }
 749 
 750 
 751 
 752 
 753 
 754 
 755 
 756 
 757 
 758 
 759 
 760 
 761 
 762 
 763 
 764 
 765 
 766 
 767 
 768 
 769 
 770 
 771 
 772 
 773 
 774 static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
 775                                struct sched_dl_entity *pi_se, u64 t)
 776 {
 777         u64 left, right;
 778 
 779         
 780 
 781 
 782 
 783 
 784 
 785 
 786 
 787 
 788 
 789 
 790 
 791 
 792 
 793 
 794 
 795 
 796 
 797         left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
 798         right = ((dl_se->deadline - t) >> DL_SCALE) *
 799                 (pi_se->dl_runtime >> DL_SCALE);
 800 
 801         return dl_time_before(right, left);
 802 }
 803 
 804 
 805 
 806 
 807 
 808 
 809 
 810 
 811 
 812 
 813 
 814 
 815 
 816 
 817 
 818 
 819 
 820 
 821 
 822 static void
 823 update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
 824 {
 825         u64 laxity = dl_se->deadline - rq_clock(rq);
 826 
 827         
 828 
 829 
 830 
 831 
 832 
 833         WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
 834 
 835         dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
 836 }
 837 
 838 
 839 
 840 
 841 
 842 
 843 
 844 
 845 
 846 
 847 
 848 
 849 static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
 850 {
 851         return dl_se->dl_deadline == dl_se->dl_period;
 852 }
 853 
 854 
 855 
 856 
 857 
 858 
 859 
 860 
 861 
 862 
 863 
 864 
 865 
 866 
 867 
 868 
 869 
 870 
 871 
 872 
 873 
 874 
 875 
 876 
 877 
 878 
 879 
 880 
 881 
 882 
 883 
 884 static void update_dl_entity(struct sched_dl_entity *dl_se,
 885                              struct sched_dl_entity *pi_se)
 886 {
 887         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
 888         struct rq *rq = rq_of_dl_rq(dl_rq);
 889 
 890         if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
 891             dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
 892 
 893                 if (unlikely(!dl_is_implicit(dl_se) &&
 894                              !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
 895                              !dl_se->dl_boosted)){
 896                         update_dl_revised_wakeup(dl_se, rq);
 897                         return;
 898                 }
 899 
 900                 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
 901                 dl_se->runtime = pi_se->dl_runtime;
 902         }
 903 }
 904 
 905 static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
 906 {
 907         return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
 908 }
 909 
 910 
 911 
 912 
 913 
 914 
 915 
 916 
 917 
 918 
 919 
 920 static int start_dl_timer(struct task_struct *p)
 921 {
 922         struct sched_dl_entity *dl_se = &p->dl;
 923         struct hrtimer *timer = &dl_se->dl_timer;
 924         struct rq *rq = task_rq(p);
 925         ktime_t now, act;
 926         s64 delta;
 927 
 928         lockdep_assert_held(&rq->lock);
 929 
 930         
 931 
 932 
 933 
 934 
 935         act = ns_to_ktime(dl_next_period(dl_se));
 936         now = hrtimer_cb_get_time(timer);
 937         delta = ktime_to_ns(now) - rq_clock(rq);
 938         act = ktime_add_ns(act, delta);
 939 
 940         
 941 
 942 
 943 
 944 
 945         if (ktime_us_delta(act, now) < 0)
 946                 return 0;
 947 
 948         
 949 
 950 
 951 
 952 
 953 
 954 
 955 
 956 
 957         if (!hrtimer_is_queued(timer)) {
 958                 get_task_struct(p);
 959                 hrtimer_start(timer, act, HRTIMER_MODE_ABS_HARD);
 960         }
 961 
 962         return 1;
 963 }
 964 
 965 
 966 
 967 
 968 
 969 
 970 
 971 
 972 
 973 
 974 
 975 
 976 
 977 
 978 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
 979 {
 980         struct sched_dl_entity *dl_se = container_of(timer,
 981                                                      struct sched_dl_entity,
 982                                                      dl_timer);
 983         struct task_struct *p = dl_task_of(dl_se);
 984         struct rq_flags rf;
 985         struct rq *rq;
 986 
 987         rq = task_rq_lock(p, &rf);
 988 
 989         
 990 
 991 
 992 
 993         if (!dl_task(p))
 994                 goto unlock;
 995 
 996         
 997 
 998 
 999 
1000         if (dl_se->dl_boosted)
1001                 goto unlock;
1002 
1003         
1004 
1005 
1006 
1007         if (!dl_se->dl_throttled)
1008                 goto unlock;
1009 
1010         sched_clock_tick();
1011         update_rq_clock(rq);
1012 
1013         
1014 
1015 
1016 
1017 
1018 
1019 
1020 
1021 
1022 
1023 
1024 
1025 
1026 
1027         if (!task_on_rq_queued(p)) {
1028                 replenish_dl_entity(dl_se, dl_se);
1029                 goto unlock;
1030         }
1031 
1032 #ifdef CONFIG_SMP
1033         if (unlikely(!rq->online)) {
1034                 
1035 
1036 
1037 
1038                 lockdep_unpin_lock(&rq->lock, rf.cookie);
1039                 rq = dl_task_offline_migration(rq, p);
1040                 rf.cookie = lockdep_pin_lock(&rq->lock);
1041                 update_rq_clock(rq);
1042 
1043                 
1044 
1045 
1046 
1047 
1048         }
1049 #endif
1050 
1051         enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
1052         if (dl_task(rq->curr))
1053                 check_preempt_curr_dl(rq, p, 0);
1054         else
1055                 resched_curr(rq);
1056 
1057 #ifdef CONFIG_SMP
1058         
1059 
1060 
1061 
1062         if (has_pushable_dl_tasks(rq)) {
1063                 
1064 
1065 
1066 
1067                 rq_unpin_lock(rq, &rf);
1068                 push_dl_task(rq);
1069                 rq_repin_lock(rq, &rf);
1070         }
1071 #endif
1072 
1073 unlock:
1074         task_rq_unlock(rq, p, &rf);
1075 
1076         
1077 
1078 
1079 
1080         put_task_struct(p);
1081 
1082         return HRTIMER_NORESTART;
1083 }
1084 
1085 void init_dl_task_timer(struct sched_dl_entity *dl_se)
1086 {
1087         struct hrtimer *timer = &dl_se->dl_timer;
1088 
1089         hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1090         timer->function = dl_task_timer;
1091 }
1092 
1093 
1094 
1095 
1096 
1097 
1098 
1099 
1100 
1101 
1102 
1103 
1104 
1105 
1106 
1107 
1108 
1109 
1110 
1111 static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
1112 {
1113         struct task_struct *p = dl_task_of(dl_se);
1114         struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
1115 
1116         if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1117             dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
1118                 if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
1119                         return;
1120                 dl_se->dl_throttled = 1;
1121                 if (dl_se->runtime > 0)
1122                         dl_se->runtime = 0;
1123         }
1124 }
1125 
1126 static
1127 int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
1128 {
1129         return (dl_se->runtime <= 0);
1130 }
1131 
1132 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
1133 
1134 
1135 
1136 
1137 
1138 
1139 
1140 
1141 
1142 
1143 
1144 
1145 
1146 
1147 
1148 
1149 
1150 
1151 
1152 
1153 static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
1154 {
1155         u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; 
1156         u64 u_act;
1157         u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT;
1158 
1159         
1160 
1161 
1162 
1163 
1164 
1165 
1166 
1167         if (u_inact + rq->dl.extra_bw > BW_UNIT - u_act_min)
1168                 u_act = u_act_min;
1169         else
1170                 u_act = BW_UNIT - u_inact - rq->dl.extra_bw;
1171 
1172         return (delta * u_act) >> BW_SHIFT;
1173 }
1174 
1175 
1176 
1177 
1178 
1179 static void update_curr_dl(struct rq *rq)
1180 {
1181         struct task_struct *curr = rq->curr;
1182         struct sched_dl_entity *dl_se = &curr->dl;
1183         u64 delta_exec, scaled_delta_exec;
1184         int cpu = cpu_of(rq);
1185         u64 now;
1186 
1187         if (!dl_task(curr) || !on_dl_rq(dl_se))
1188                 return;
1189 
1190         
1191 
1192 
1193 
1194 
1195 
1196 
1197 
1198         now = rq_clock_task(rq);
1199         delta_exec = now - curr->se.exec_start;
1200         if (unlikely((s64)delta_exec <= 0)) {
1201                 if (unlikely(dl_se->dl_yielded))
1202                         goto throttle;
1203                 return;
1204         }
1205 
1206         schedstat_set(curr->se.statistics.exec_max,
1207                       max(curr->se.statistics.exec_max, delta_exec));
1208 
1209         curr->se.sum_exec_runtime += delta_exec;
1210         account_group_exec_runtime(curr, delta_exec);
1211 
1212         curr->se.exec_start = now;
1213         cgroup_account_cputime(curr, delta_exec);
1214 
1215         if (dl_entity_is_special(dl_se))
1216                 return;
1217 
1218         
1219 
1220 
1221 
1222 
1223 
1224 
1225         if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) {
1226                 scaled_delta_exec = grub_reclaim(delta_exec,
1227                                                  rq,
1228                                                  &curr->dl);
1229         } else {
1230                 unsigned long scale_freq = arch_scale_freq_capacity(cpu);
1231                 unsigned long scale_cpu = arch_scale_cpu_capacity(cpu);
1232 
1233                 scaled_delta_exec = cap_scale(delta_exec, scale_freq);
1234                 scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
1235         }
1236 
1237         dl_se->runtime -= scaled_delta_exec;
1238 
1239 throttle:
1240         if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
1241                 dl_se->dl_throttled = 1;
1242 
1243                 
1244                 if (dl_runtime_exceeded(dl_se) &&
1245                     (dl_se->flags & SCHED_FLAG_DL_OVERRUN))
1246                         dl_se->dl_overrun = 1;
1247 
1248                 __dequeue_task_dl(rq, curr, 0);
1249                 if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
1250                         enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
1251 
1252                 if (!is_leftmost(curr, &rq->dl))
1253                         resched_curr(rq);
1254         }
1255 
1256         
1257 
1258 
1259 
1260 
1261 
1262 
1263 
1264 
1265 
1266 
1267         if (rt_bandwidth_enabled()) {
1268                 struct rt_rq *rt_rq = &rq->rt;
1269 
1270                 raw_spin_lock(&rt_rq->rt_runtime_lock);
1271                 
1272 
1273 
1274 
1275 
1276                 if (sched_rt_bandwidth_account(rt_rq))
1277                         rt_rq->rt_time += delta_exec;
1278                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
1279         }
1280 }
1281 
1282 static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
1283 {
1284         struct sched_dl_entity *dl_se = container_of(timer,
1285                                                      struct sched_dl_entity,
1286                                                      inactive_timer);
1287         struct task_struct *p = dl_task_of(dl_se);
1288         struct rq_flags rf;
1289         struct rq *rq;
1290 
1291         rq = task_rq_lock(p, &rf);
1292 
1293         sched_clock_tick();
1294         update_rq_clock(rq);
1295 
1296         if (!dl_task(p) || p->state == TASK_DEAD) {
1297                 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1298 
1299                 if (p->state == TASK_DEAD && dl_se->dl_non_contending) {
1300                         sub_running_bw(&p->dl, dl_rq_of_se(&p->dl));
1301                         sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl));
1302                         dl_se->dl_non_contending = 0;
1303                 }
1304 
1305                 raw_spin_lock(&dl_b->lock);
1306                 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
1307                 raw_spin_unlock(&dl_b->lock);
1308                 __dl_clear_params(p);
1309 
1310                 goto unlock;
1311         }
1312         if (dl_se->dl_non_contending == 0)
1313                 goto unlock;
1314 
1315         sub_running_bw(dl_se, &rq->dl);
1316         dl_se->dl_non_contending = 0;
1317 unlock:
1318         task_rq_unlock(rq, p, &rf);
1319         put_task_struct(p);
1320 
1321         return HRTIMER_NORESTART;
1322 }
1323 
1324 void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
1325 {
1326         struct hrtimer *timer = &dl_se->inactive_timer;
1327 
1328         hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1329         timer->function = inactive_task_timer;
1330 }
1331 
1332 #ifdef CONFIG_SMP
1333 
1334 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1335 {
1336         struct rq *rq = rq_of_dl_rq(dl_rq);
1337 
1338         if (dl_rq->earliest_dl.curr == 0 ||
1339             dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
1340                 dl_rq->earliest_dl.curr = deadline;
1341                 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
1342         }
1343 }
1344 
1345 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1346 {
1347         struct rq *rq = rq_of_dl_rq(dl_rq);
1348 
1349         
1350 
1351 
1352 
1353         if (!dl_rq->dl_nr_running) {
1354                 dl_rq->earliest_dl.curr = 0;
1355                 dl_rq->earliest_dl.next = 0;
1356                 cpudl_clear(&rq->rd->cpudl, rq->cpu);
1357         } else {
1358                 struct rb_node *leftmost = dl_rq->root.rb_leftmost;
1359                 struct sched_dl_entity *entry;
1360 
1361                 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
1362                 dl_rq->earliest_dl.curr = entry->deadline;
1363                 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
1364         }
1365 }
1366 
1367 #else
1368 
1369 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1370 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1371 
1372 #endif 
1373 
1374 static inline
1375 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1376 {
1377         int prio = dl_task_of(dl_se)->prio;
1378         u64 deadline = dl_se->deadline;
1379 
1380         WARN_ON(!dl_prio(prio));
1381         dl_rq->dl_nr_running++;
1382         add_nr_running(rq_of_dl_rq(dl_rq), 1);
1383 
1384         inc_dl_deadline(dl_rq, deadline);
1385         inc_dl_migration(dl_se, dl_rq);
1386 }
1387 
1388 static inline
1389 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1390 {
1391         int prio = dl_task_of(dl_se)->prio;
1392 
1393         WARN_ON(!dl_prio(prio));
1394         WARN_ON(!dl_rq->dl_nr_running);
1395         dl_rq->dl_nr_running--;
1396         sub_nr_running(rq_of_dl_rq(dl_rq), 1);
1397 
1398         dec_dl_deadline(dl_rq, dl_se->deadline);
1399         dec_dl_migration(dl_se, dl_rq);
1400 }
1401 
1402 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
1403 {
1404         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1405         struct rb_node **link = &dl_rq->root.rb_root.rb_node;
1406         struct rb_node *parent = NULL;
1407         struct sched_dl_entity *entry;
1408         int leftmost = 1;
1409 
1410         BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
1411 
1412         while (*link) {
1413                 parent = *link;
1414                 entry = rb_entry(parent, struct sched_dl_entity, rb_node);
1415                 if (dl_time_before(dl_se->deadline, entry->deadline))
1416                         link = &parent->rb_left;
1417                 else {
1418                         link = &parent->rb_right;
1419                         leftmost = 0;
1420                 }
1421         }
1422 
1423         rb_link_node(&dl_se->rb_node, parent, link);
1424         rb_insert_color_cached(&dl_se->rb_node, &dl_rq->root, leftmost);
1425 
1426         inc_dl_tasks(dl_se, dl_rq);
1427 }
1428 
1429 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
1430 {
1431         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1432 
1433         if (RB_EMPTY_NODE(&dl_se->rb_node))
1434                 return;
1435 
1436         rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
1437         RB_CLEAR_NODE(&dl_se->rb_node);
1438 
1439         dec_dl_tasks(dl_se, dl_rq);
1440 }
1441 
1442 static void
1443 enqueue_dl_entity(struct sched_dl_entity *dl_se,
1444                   struct sched_dl_entity *pi_se, int flags)
1445 {
1446         BUG_ON(on_dl_rq(dl_se));
1447 
1448         
1449 
1450 
1451 
1452 
1453         if (flags & ENQUEUE_WAKEUP) {
1454                 task_contending(dl_se, flags);
1455                 update_dl_entity(dl_se, pi_se);
1456         } else if (flags & ENQUEUE_REPLENISH) {
1457                 replenish_dl_entity(dl_se, pi_se);
1458         } else if ((flags & ENQUEUE_RESTORE) &&
1459                   dl_time_before(dl_se->deadline,
1460                                  rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) {
1461                 setup_new_dl_entity(dl_se);
1462         }
1463 
1464         __enqueue_dl_entity(dl_se);
1465 }
1466 
1467 static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
1468 {
1469         __dequeue_dl_entity(dl_se);
1470 }
1471 
1472 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1473 {
1474         struct task_struct *pi_task = rt_mutex_get_top_task(p);
1475         struct sched_dl_entity *pi_se = &p->dl;
1476 
1477         
1478 
1479 
1480 
1481 
1482 
1483 
1484 
1485         if (pi_task && dl_prio(pi_task->normal_prio) && p->dl.dl_boosted) {
1486                 pi_se = &pi_task->dl;
1487         } else if (!dl_prio(p->normal_prio)) {
1488                 
1489 
1490 
1491 
1492 
1493 
1494 
1495                 BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
1496                 return;
1497         }
1498 
1499         
1500 
1501 
1502 
1503 
1504 
1505         if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
1506                 dl_check_constrained_dl(&p->dl);
1507 
1508         if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) {
1509                 add_rq_bw(&p->dl, &rq->dl);
1510                 add_running_bw(&p->dl, &rq->dl);
1511         }
1512 
1513         
1514 
1515 
1516 
1517 
1518 
1519 
1520 
1521 
1522 
1523 
1524 
1525         if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
1526                 if (flags & ENQUEUE_WAKEUP)
1527                         task_contending(&p->dl, flags);
1528 
1529                 return;
1530         }
1531 
1532         enqueue_dl_entity(&p->dl, pi_se, flags);
1533 
1534         if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1535                 enqueue_pushable_dl_task(rq, p);
1536 }
1537 
1538 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1539 {
1540         dequeue_dl_entity(&p->dl);
1541         dequeue_pushable_dl_task(rq, p);
1542 }
1543 
1544 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1545 {
1546         update_curr_dl(rq);
1547         __dequeue_task_dl(rq, p, flags);
1548 
1549         if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) {
1550                 sub_running_bw(&p->dl, &rq->dl);
1551                 sub_rq_bw(&p->dl, &rq->dl);
1552         }
1553 
1554         
1555 
1556 
1557 
1558 
1559 
1560 
1561 
1562 
1563         if (flags & DEQUEUE_SLEEP)
1564                 task_non_contending(p);
1565 }
1566 
1567 
1568 
1569 
1570 
1571 
1572 
1573 
1574 
1575 
1576 
1577 static void yield_task_dl(struct rq *rq)
1578 {
1579         
1580 
1581 
1582 
1583 
1584 
1585         rq->curr->dl.dl_yielded = 1;
1586 
1587         update_rq_clock(rq);
1588         update_curr_dl(rq);
1589         
1590 
1591 
1592 
1593 
1594         rq_clock_skip_update(rq);
1595 }
1596 
1597 #ifdef CONFIG_SMP
1598 
1599 static int find_later_rq(struct task_struct *task);
1600 
1601 static int
1602 select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
1603 {
1604         struct task_struct *curr;
1605         struct rq *rq;
1606 
1607         if (sd_flag != SD_BALANCE_WAKE)
1608                 goto out;
1609 
1610         rq = cpu_rq(cpu);
1611 
1612         rcu_read_lock();
1613         curr = READ_ONCE(rq->curr); 
1614 
1615         
1616 
1617 
1618 
1619 
1620 
1621 
1622 
1623 
1624         if (unlikely(dl_task(curr)) &&
1625             (curr->nr_cpus_allowed < 2 ||
1626              !dl_entity_preempt(&p->dl, &curr->dl)) &&
1627             (p->nr_cpus_allowed > 1)) {
1628                 int target = find_later_rq(p);
1629 
1630                 if (target != -1 &&
1631                                 (dl_time_before(p->dl.deadline,
1632                                         cpu_rq(target)->dl.earliest_dl.curr) ||
1633                                 (cpu_rq(target)->dl.dl_nr_running == 0)))
1634                         cpu = target;
1635         }
1636         rcu_read_unlock();
1637 
1638 out:
1639         return cpu;
1640 }
1641 
1642 static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
1643 {
1644         struct rq *rq;
1645 
1646         if (p->state != TASK_WAKING)
1647                 return;
1648 
1649         rq = task_rq(p);
1650         
1651 
1652 
1653 
1654 
1655         raw_spin_lock(&rq->lock);
1656         if (p->dl.dl_non_contending) {
1657                 sub_running_bw(&p->dl, &rq->dl);
1658                 p->dl.dl_non_contending = 0;
1659                 
1660 
1661 
1662 
1663 
1664 
1665 
1666                 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
1667                         put_task_struct(p);
1668         }
1669         sub_rq_bw(&p->dl, &rq->dl);
1670         raw_spin_unlock(&rq->lock);
1671 }
1672 
1673 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1674 {
1675         
1676 
1677 
1678 
1679         if (rq->curr->nr_cpus_allowed == 1 ||
1680             !cpudl_find(&rq->rd->cpudl, rq->curr, NULL))
1681                 return;
1682 
1683         
1684 
1685 
1686 
1687         if (p->nr_cpus_allowed != 1 &&
1688             cpudl_find(&rq->rd->cpudl, p, NULL))
1689                 return;
1690 
1691         resched_curr(rq);
1692 }
1693 
1694 static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1695 {
1696         if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
1697                 
1698 
1699 
1700 
1701 
1702 
1703                 rq_unpin_lock(rq, rf);
1704                 pull_dl_task(rq);
1705                 rq_repin_lock(rq, rf);
1706         }
1707 
1708         return sched_stop_runnable(rq) || sched_dl_runnable(rq);
1709 }
1710 #endif 
1711 
1712 
1713 
1714 
1715 
1716 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1717                                   int flags)
1718 {
1719         if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1720                 resched_curr(rq);
1721                 return;
1722         }
1723 
1724 #ifdef CONFIG_SMP
1725         
1726 
1727 
1728 
1729         if ((p->dl.deadline == rq->curr->dl.deadline) &&
1730             !test_tsk_need_resched(rq->curr))
1731                 check_preempt_equal_dl(rq, p);
1732 #endif 
1733 }
1734 
1735 #ifdef CONFIG_SCHED_HRTICK
1736 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1737 {
1738         hrtick_start(rq, p->dl.runtime);
1739 }
1740 #else 
1741 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1742 {
1743 }
1744 #endif
1745 
1746 static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
1747 {
1748         p->se.exec_start = rq_clock_task(rq);
1749 
1750         
1751         dequeue_pushable_dl_task(rq, p);
1752 
1753         if (!first)
1754                 return;
1755 
1756         if (hrtick_enabled(rq))
1757                 start_hrtick_dl(rq, p);
1758 
1759         if (rq->curr->sched_class != &dl_sched_class)
1760                 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
1761 
1762         deadline_queue_push_tasks(rq);
1763 }
1764 
1765 static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1766                                                    struct dl_rq *dl_rq)
1767 {
1768         struct rb_node *left = rb_first_cached(&dl_rq->root);
1769 
1770         if (!left)
1771                 return NULL;
1772 
1773         return rb_entry(left, struct sched_dl_entity, rb_node);
1774 }
1775 
1776 static struct task_struct *
1777 pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
1778 {
1779         struct sched_dl_entity *dl_se;
1780         struct dl_rq *dl_rq = &rq->dl;
1781         struct task_struct *p;
1782 
1783         WARN_ON_ONCE(prev || rf);
1784 
1785         if (!sched_dl_runnable(rq))
1786                 return NULL;
1787 
1788         dl_se = pick_next_dl_entity(rq, dl_rq);
1789         BUG_ON(!dl_se);
1790         p = dl_task_of(dl_se);
1791         set_next_task_dl(rq, p, true);
1792         return p;
1793 }
1794 
1795 static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1796 {
1797         update_curr_dl(rq);
1798 
1799         update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1800         if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1801                 enqueue_pushable_dl_task(rq, p);
1802 }
1803 
1804 
1805 
1806 
1807 
1808 
1809 
1810 
1811 
1812 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1813 {
1814         update_curr_dl(rq);
1815 
1816         update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1817         
1818 
1819 
1820 
1821 
1822         if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
1823             is_leftmost(p, &rq->dl))
1824                 start_hrtick_dl(rq, p);
1825 }
1826 
1827 static void task_fork_dl(struct task_struct *p)
1828 {
1829         
1830 
1831 
1832 
1833 }
1834 
1835 #ifdef CONFIG_SMP
1836 
1837 
1838 #define DL_MAX_TRIES 3
1839 
1840 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1841 {
1842         if (!task_running(rq, p) &&
1843             cpumask_test_cpu(cpu, p->cpus_ptr))
1844                 return 1;
1845         return 0;
1846 }
1847 
1848 
1849 
1850 
1851 
1852 static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
1853 {
1854         struct rb_node *next_node = rq->dl.pushable_dl_tasks_root.rb_leftmost;
1855         struct task_struct *p = NULL;
1856 
1857         if (!has_pushable_dl_tasks(rq))
1858                 return NULL;
1859 
1860 next_node:
1861         if (next_node) {
1862                 p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
1863 
1864                 if (pick_dl_task(rq, p, cpu))
1865                         return p;
1866 
1867                 next_node = rb_next(next_node);
1868                 goto next_node;
1869         }
1870 
1871         return NULL;
1872 }
1873 
1874 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1875 
1876 static int find_later_rq(struct task_struct *task)
1877 {
1878         struct sched_domain *sd;
1879         struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1880         int this_cpu = smp_processor_id();
1881         int cpu = task_cpu(task);
1882 
1883         
1884         if (unlikely(!later_mask))
1885                 return -1;
1886 
1887         if (task->nr_cpus_allowed == 1)
1888                 return -1;
1889 
1890         
1891 
1892 
1893 
1894         if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
1895                 return -1;
1896 
1897         
1898 
1899 
1900 
1901 
1902 
1903 
1904 
1905 
1906 
1907 
1908 
1909         if (cpumask_test_cpu(cpu, later_mask))
1910                 return cpu;
1911         
1912 
1913 
1914 
1915         if (!cpumask_test_cpu(this_cpu, later_mask))
1916                 this_cpu = -1;
1917 
1918         rcu_read_lock();
1919         for_each_domain(cpu, sd) {
1920                 if (sd->flags & SD_WAKE_AFFINE) {
1921                         int best_cpu;
1922 
1923                         
1924 
1925 
1926 
1927                         if (this_cpu != -1 &&
1928                             cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1929                                 rcu_read_unlock();
1930                                 return this_cpu;
1931                         }
1932 
1933                         best_cpu = cpumask_first_and(later_mask,
1934                                                         sched_domain_span(sd));
1935                         
1936 
1937 
1938 
1939 
1940 
1941                         if (best_cpu < nr_cpu_ids) {
1942                                 rcu_read_unlock();
1943                                 return best_cpu;
1944                         }
1945                 }
1946         }
1947         rcu_read_unlock();
1948 
1949         
1950 
1951 
1952 
1953         if (this_cpu != -1)
1954                 return this_cpu;
1955 
1956         cpu = cpumask_any(later_mask);
1957         if (cpu < nr_cpu_ids)
1958                 return cpu;
1959 
1960         return -1;
1961 }
1962 
1963 
1964 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1965 {
1966         struct rq *later_rq = NULL;
1967         int tries;
1968         int cpu;
1969 
1970         for (tries = 0; tries < DL_MAX_TRIES; tries++) {
1971                 cpu = find_later_rq(task);
1972 
1973                 if ((cpu == -1) || (cpu == rq->cpu))
1974                         break;
1975 
1976                 later_rq = cpu_rq(cpu);
1977 
1978                 if (later_rq->dl.dl_nr_running &&
1979                     !dl_time_before(task->dl.deadline,
1980                                         later_rq->dl.earliest_dl.curr)) {
1981                         
1982 
1983 
1984 
1985 
1986                         later_rq = NULL;
1987                         break;
1988                 }
1989 
1990                 
1991                 if (double_lock_balance(rq, later_rq)) {
1992                         if (unlikely(task_rq(task) != rq ||
1993                                      !cpumask_test_cpu(later_rq->cpu, task->cpus_ptr) ||
1994                                      task_running(rq, task) ||
1995                                      !dl_task(task) ||
1996                                      !task_on_rq_queued(task))) {
1997                                 double_unlock_balance(rq, later_rq);
1998                                 later_rq = NULL;
1999                                 break;
2000                         }
2001                 }
2002 
2003                 
2004 
2005 
2006 
2007 
2008                 if (!later_rq->dl.dl_nr_running ||
2009                     dl_time_before(task->dl.deadline,
2010                                    later_rq->dl.earliest_dl.curr))
2011                         break;
2012 
2013                 
2014                 double_unlock_balance(rq, later_rq);
2015                 later_rq = NULL;
2016         }
2017 
2018         return later_rq;
2019 }
2020 
2021 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
2022 {
2023         struct task_struct *p;
2024 
2025         if (!has_pushable_dl_tasks(rq))
2026                 return NULL;
2027 
2028         p = rb_entry(rq->dl.pushable_dl_tasks_root.rb_leftmost,
2029                      struct task_struct, pushable_dl_tasks);
2030 
2031         BUG_ON(rq->cpu != task_cpu(p));
2032         BUG_ON(task_current(rq, p));
2033         BUG_ON(p->nr_cpus_allowed <= 1);
2034 
2035         BUG_ON(!task_on_rq_queued(p));
2036         BUG_ON(!dl_task(p));
2037 
2038         return p;
2039 }
2040 
2041 
2042 
2043 
2044 
2045 
2046 static int push_dl_task(struct rq *rq)
2047 {
2048         struct task_struct *next_task;
2049         struct rq *later_rq;
2050         int ret = 0;
2051 
2052         if (!rq->dl.overloaded)
2053                 return 0;
2054 
2055         next_task = pick_next_pushable_dl_task(rq);
2056         if (!next_task)
2057                 return 0;
2058 
2059 retry:
2060         if (WARN_ON(next_task == rq->curr))
2061                 return 0;
2062 
2063         
2064 
2065 
2066 
2067 
2068         if (dl_task(rq->curr) &&
2069             dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
2070             rq->curr->nr_cpus_allowed > 1) {
2071                 resched_curr(rq);
2072                 return 0;
2073         }
2074 
2075         
2076         get_task_struct(next_task);
2077 
2078         
2079         later_rq = find_lock_later_rq(next_task, rq);
2080         if (!later_rq) {
2081                 struct task_struct *task;
2082 
2083                 
2084 
2085 
2086 
2087 
2088                 task = pick_next_pushable_dl_task(rq);
2089                 if (task == next_task) {
2090                         
2091 
2092 
2093 
2094                         goto out;
2095                 }
2096 
2097                 if (!task)
2098                         
2099                         goto out;
2100 
2101                 put_task_struct(next_task);
2102                 next_task = task;
2103                 goto retry;
2104         }
2105 
2106         deactivate_task(rq, next_task, 0);
2107         set_task_cpu(next_task, later_rq->cpu);
2108 
2109         
2110 
2111 
2112 
2113         update_rq_clock(later_rq);
2114         activate_task(later_rq, next_task, ENQUEUE_NOCLOCK);
2115         ret = 1;
2116 
2117         resched_curr(later_rq);
2118 
2119         double_unlock_balance(rq, later_rq);
2120 
2121 out:
2122         put_task_struct(next_task);
2123 
2124         return ret;
2125 }
2126 
2127 static void push_dl_tasks(struct rq *rq)
2128 {
2129         
2130         while (push_dl_task(rq))
2131                 ;
2132 }
2133 
2134 static void pull_dl_task(struct rq *this_rq)
2135 {
2136         int this_cpu = this_rq->cpu, cpu;
2137         struct task_struct *p;
2138         bool resched = false;
2139         struct rq *src_rq;
2140         u64 dmin = LONG_MAX;
2141 
2142         if (likely(!dl_overloaded(this_rq)))
2143                 return;
2144 
2145         
2146 
2147 
2148 
2149         smp_rmb();
2150 
2151         for_each_cpu(cpu, this_rq->rd->dlo_mask) {
2152                 if (this_cpu == cpu)
2153                         continue;
2154 
2155                 src_rq = cpu_rq(cpu);
2156 
2157                 
2158 
2159 
2160 
2161                 if (this_rq->dl.dl_nr_running &&
2162                     dl_time_before(this_rq->dl.earliest_dl.curr,
2163                                    src_rq->dl.earliest_dl.next))
2164                         continue;
2165 
2166                 
2167                 double_lock_balance(this_rq, src_rq);
2168 
2169                 
2170 
2171 
2172 
2173                 if (src_rq->dl.dl_nr_running <= 1)
2174                         goto skip;
2175 
2176                 p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
2177 
2178                 
2179 
2180 
2181 
2182 
2183                 if (p && dl_time_before(p->dl.deadline, dmin) &&
2184                     (!this_rq->dl.dl_nr_running ||
2185                      dl_time_before(p->dl.deadline,
2186                                     this_rq->dl.earliest_dl.curr))) {
2187                         WARN_ON(p == src_rq->curr);
2188                         WARN_ON(!task_on_rq_queued(p));
2189 
2190                         
2191 
2192 
2193 
2194                         if (dl_time_before(p->dl.deadline,
2195                                            src_rq->curr->dl.deadline))
2196                                 goto skip;
2197 
2198                         resched = true;
2199 
2200                         deactivate_task(src_rq, p, 0);
2201                         set_task_cpu(p, this_cpu);
2202                         activate_task(this_rq, p, 0);
2203                         dmin = p->dl.deadline;
2204 
2205                         
2206                 }
2207 skip:
2208                 double_unlock_balance(this_rq, src_rq);
2209         }
2210 
2211         if (resched)
2212                 resched_curr(this_rq);
2213 }
2214 
2215 
2216 
2217 
2218 
2219 static void task_woken_dl(struct rq *rq, struct task_struct *p)
2220 {
2221         if (!task_running(rq, p) &&
2222             !test_tsk_need_resched(rq->curr) &&
2223             p->nr_cpus_allowed > 1 &&
2224             dl_task(rq->curr) &&
2225             (rq->curr->nr_cpus_allowed < 2 ||
2226              !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
2227                 push_dl_tasks(rq);
2228         }
2229 }
2230 
2231 static void set_cpus_allowed_dl(struct task_struct *p,
2232                                 const struct cpumask *new_mask)
2233 {
2234         struct root_domain *src_rd;
2235         struct rq *rq;
2236 
2237         BUG_ON(!dl_task(p));
2238 
2239         rq = task_rq(p);
2240         src_rd = rq->rd;
2241         
2242 
2243 
2244 
2245 
2246 
2247         if (!cpumask_intersects(src_rd->span, new_mask)) {
2248                 struct dl_bw *src_dl_b;
2249 
2250                 src_dl_b = dl_bw_of(cpu_of(rq));
2251                 
2252 
2253 
2254 
2255 
2256                 raw_spin_lock(&src_dl_b->lock);
2257                 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
2258                 raw_spin_unlock(&src_dl_b->lock);
2259         }
2260 
2261         set_cpus_allowed_common(p, new_mask);
2262 }
2263 
2264 
2265 static void rq_online_dl(struct rq *rq)
2266 {
2267         if (rq->dl.overloaded)
2268                 dl_set_overload(rq);
2269 
2270         cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
2271         if (rq->dl.dl_nr_running > 0)
2272                 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
2273 }
2274 
2275 
2276 static void rq_offline_dl(struct rq *rq)
2277 {
2278         if (rq->dl.overloaded)
2279                 dl_clear_overload(rq);
2280 
2281         cpudl_clear(&rq->rd->cpudl, rq->cpu);
2282         cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
2283 }
2284 
2285 void __init init_sched_dl_class(void)
2286 {
2287         unsigned int i;
2288 
2289         for_each_possible_cpu(i)
2290                 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
2291                                         GFP_KERNEL, cpu_to_node(i));
2292 }
2293 
2294 void dl_add_task_root_domain(struct task_struct *p)
2295 {
2296         struct rq_flags rf;
2297         struct rq *rq;
2298         struct dl_bw *dl_b;
2299 
2300         rq = task_rq_lock(p, &rf);
2301         if (!dl_task(p))
2302                 goto unlock;
2303 
2304         dl_b = &rq->rd->dl_bw;
2305         raw_spin_lock(&dl_b->lock);
2306 
2307         __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
2308 
2309         raw_spin_unlock(&dl_b->lock);
2310 
2311 unlock:
2312         task_rq_unlock(rq, p, &rf);
2313 }
2314 
2315 void dl_clear_root_domain(struct root_domain *rd)
2316 {
2317         unsigned long flags;
2318 
2319         raw_spin_lock_irqsave(&rd->dl_bw.lock, flags);
2320         rd->dl_bw.total_bw = 0;
2321         raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags);
2322 }
2323 
2324 #endif 
2325 
2326 static void switched_from_dl(struct rq *rq, struct task_struct *p)
2327 {
2328         
2329 
2330 
2331 
2332 
2333 
2334 
2335 
2336         if (task_on_rq_queued(p) && p->dl.dl_runtime)
2337                 task_non_contending(p);
2338 
2339         if (!task_on_rq_queued(p)) {
2340                 
2341 
2342 
2343 
2344 
2345 
2346                 if (p->dl.dl_non_contending)
2347                         sub_running_bw(&p->dl, &rq->dl);
2348                 sub_rq_bw(&p->dl, &rq->dl);
2349         }
2350 
2351         
2352 
2353 
2354 
2355 
2356         if (p->dl.dl_non_contending)
2357                 p->dl.dl_non_contending = 0;
2358 
2359         
2360 
2361 
2362 
2363 
2364         if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
2365                 return;
2366 
2367         deadline_queue_pull_task(rq);
2368 }
2369 
2370 
2371 
2372 
2373 
2374 static void switched_to_dl(struct rq *rq, struct task_struct *p)
2375 {
2376         if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
2377                 put_task_struct(p);
2378 
2379         
2380         if (!task_on_rq_queued(p)) {
2381                 add_rq_bw(&p->dl, &rq->dl);
2382 
2383                 return;
2384         }
2385 
2386         if (rq->curr != p) {
2387 #ifdef CONFIG_SMP
2388                 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
2389                         deadline_queue_push_tasks(rq);
2390 #endif
2391                 if (dl_task(rq->curr))
2392                         check_preempt_curr_dl(rq, p, 0);
2393                 else
2394                         resched_curr(rq);
2395         }
2396 }
2397 
2398 
2399 
2400 
2401 
2402 static void prio_changed_dl(struct rq *rq, struct task_struct *p,
2403                             int oldprio)
2404 {
2405         if (task_on_rq_queued(p) || rq->curr == p) {
2406 #ifdef CONFIG_SMP
2407                 
2408 
2409 
2410 
2411 
2412 
2413                 if (!rq->dl.overloaded)
2414                         deadline_queue_pull_task(rq);
2415 
2416                 
2417 
2418 
2419 
2420 
2421                 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
2422                         resched_curr(rq);
2423 #else
2424                 
2425 
2426 
2427 
2428 
2429                 resched_curr(rq);
2430 #endif 
2431         }
2432 }
2433 
2434 const struct sched_class dl_sched_class = {
2435         .next                   = &rt_sched_class,
2436         .enqueue_task           = enqueue_task_dl,
2437         .dequeue_task           = dequeue_task_dl,
2438         .yield_task             = yield_task_dl,
2439 
2440         .check_preempt_curr     = check_preempt_curr_dl,
2441 
2442         .pick_next_task         = pick_next_task_dl,
2443         .put_prev_task          = put_prev_task_dl,
2444         .set_next_task          = set_next_task_dl,
2445 
2446 #ifdef CONFIG_SMP
2447         .balance                = balance_dl,
2448         .select_task_rq         = select_task_rq_dl,
2449         .migrate_task_rq        = migrate_task_rq_dl,
2450         .set_cpus_allowed       = set_cpus_allowed_dl,
2451         .rq_online              = rq_online_dl,
2452         .rq_offline             = rq_offline_dl,
2453         .task_woken             = task_woken_dl,
2454 #endif
2455 
2456         .task_tick              = task_tick_dl,
2457         .task_fork              = task_fork_dl,
2458 
2459         .prio_changed           = prio_changed_dl,
2460         .switched_from          = switched_from_dl,
2461         .switched_to            = switched_to_dl,
2462 
2463         .update_curr            = update_curr_dl,
2464 };
2465 
2466 int sched_dl_global_validate(void)
2467 {
2468         u64 runtime = global_rt_runtime();
2469         u64 period = global_rt_period();
2470         u64 new_bw = to_ratio(period, runtime);
2471         struct dl_bw *dl_b;
2472         int cpu, ret = 0;
2473         unsigned long flags;
2474 
2475         
2476 
2477 
2478 
2479 
2480 
2481 
2482 
2483 
2484         for_each_possible_cpu(cpu) {
2485                 rcu_read_lock_sched();
2486                 dl_b = dl_bw_of(cpu);
2487 
2488                 raw_spin_lock_irqsave(&dl_b->lock, flags);
2489                 if (new_bw < dl_b->total_bw)
2490                         ret = -EBUSY;
2491                 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2492 
2493                 rcu_read_unlock_sched();
2494 
2495                 if (ret)
2496                         break;
2497         }
2498 
2499         return ret;
2500 }
2501 
2502 void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
2503 {
2504         if (global_rt_runtime() == RUNTIME_INF) {
2505                 dl_rq->bw_ratio = 1 << RATIO_SHIFT;
2506                 dl_rq->extra_bw = 1 << BW_SHIFT;
2507         } else {
2508                 dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
2509                           global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
2510                 dl_rq->extra_bw = to_ratio(global_rt_period(),
2511                                                     global_rt_runtime());
2512         }
2513 }
2514 
2515 void sched_dl_do_global(void)
2516 {
2517         u64 new_bw = -1;
2518         struct dl_bw *dl_b;
2519         int cpu;
2520         unsigned long flags;
2521 
2522         def_dl_bandwidth.dl_period = global_rt_period();
2523         def_dl_bandwidth.dl_runtime = global_rt_runtime();
2524 
2525         if (global_rt_runtime() != RUNTIME_INF)
2526                 new_bw = to_ratio(global_rt_period(), global_rt_runtime());
2527 
2528         
2529 
2530 
2531         for_each_possible_cpu(cpu) {
2532                 rcu_read_lock_sched();
2533                 dl_b = dl_bw_of(cpu);
2534 
2535                 raw_spin_lock_irqsave(&dl_b->lock, flags);
2536                 dl_b->bw = new_bw;
2537                 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2538 
2539                 rcu_read_unlock_sched();
2540                 init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
2541         }
2542 }
2543 
2544 
2545 
2546 
2547 
2548 
2549 
2550 
2551 
2552 int sched_dl_overflow(struct task_struct *p, int policy,
2553                       const struct sched_attr *attr)
2554 {
2555         struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
2556         u64 period = attr->sched_period ?: attr->sched_deadline;
2557         u64 runtime = attr->sched_runtime;
2558         u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
2559         int cpus, err = -1;
2560 
2561         if (attr->sched_flags & SCHED_FLAG_SUGOV)
2562                 return 0;
2563 
2564         
2565         if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
2566                 return 0;
2567 
2568         
2569 
2570 
2571 
2572 
2573         raw_spin_lock(&dl_b->lock);
2574         cpus = dl_bw_cpus(task_cpu(p));
2575         if (dl_policy(policy) && !task_has_dl_policy(p) &&
2576             !__dl_overflow(dl_b, cpus, 0, new_bw)) {
2577                 if (hrtimer_active(&p->dl.inactive_timer))
2578                         __dl_sub(dl_b, p->dl.dl_bw, cpus);
2579                 __dl_add(dl_b, new_bw, cpus);
2580                 err = 0;
2581         } else if (dl_policy(policy) && task_has_dl_policy(p) &&
2582                    !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
2583                 
2584 
2585 
2586 
2587 
2588 
2589 
2590                 __dl_sub(dl_b, p->dl.dl_bw, cpus);
2591                 __dl_add(dl_b, new_bw, cpus);
2592                 dl_change_utilization(p, new_bw);
2593                 err = 0;
2594         } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
2595                 
2596 
2597 
2598 
2599 
2600                 err = 0;
2601         }
2602         raw_spin_unlock(&dl_b->lock);
2603 
2604         return err;
2605 }
2606 
2607 
2608 
2609 
2610 
2611 
2612 
2613 
2614 
2615 void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
2616 {
2617         struct sched_dl_entity *dl_se = &p->dl;
2618 
2619         dl_se->dl_runtime = attr->sched_runtime;
2620         dl_se->dl_deadline = attr->sched_deadline;
2621         dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
2622         dl_se->flags = attr->sched_flags;
2623         dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
2624         dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
2625 }
2626 
2627 void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
2628 {
2629         struct sched_dl_entity *dl_se = &p->dl;
2630 
2631         attr->sched_priority = p->rt_priority;
2632         attr->sched_runtime = dl_se->dl_runtime;
2633         attr->sched_deadline = dl_se->dl_deadline;
2634         attr->sched_period = dl_se->dl_period;
2635         attr->sched_flags = dl_se->flags;
2636 }
2637 
2638 
2639 
2640 
2641 
2642 
2643 
2644 
2645 
2646 
2647 
2648 bool __checkparam_dl(const struct sched_attr *attr)
2649 {
2650         
2651         if (attr->sched_flags & SCHED_FLAG_SUGOV)
2652                 return true;
2653 
2654         
2655         if (attr->sched_deadline == 0)
2656                 return false;
2657 
2658         
2659 
2660 
2661 
2662         if (attr->sched_runtime < (1ULL << DL_SCALE))
2663                 return false;
2664 
2665         
2666 
2667 
2668 
2669         if (attr->sched_deadline & (1ULL << 63) ||
2670             attr->sched_period & (1ULL << 63))
2671                 return false;
2672 
2673         
2674         if ((attr->sched_period != 0 &&
2675              attr->sched_period < attr->sched_deadline) ||
2676             attr->sched_deadline < attr->sched_runtime)
2677                 return false;
2678 
2679         return true;
2680 }
2681 
2682 
2683 
2684 
2685 void __dl_clear_params(struct task_struct *p)
2686 {
2687         struct sched_dl_entity *dl_se = &p->dl;
2688 
2689         dl_se->dl_runtime               = 0;
2690         dl_se->dl_deadline              = 0;
2691         dl_se->dl_period                = 0;
2692         dl_se->flags                    = 0;
2693         dl_se->dl_bw                    = 0;
2694         dl_se->dl_density               = 0;
2695 
2696         dl_se->dl_throttled             = 0;
2697         dl_se->dl_yielded               = 0;
2698         dl_se->dl_non_contending        = 0;
2699         dl_se->dl_overrun               = 0;
2700 }
2701 
2702 bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
2703 {
2704         struct sched_dl_entity *dl_se = &p->dl;
2705 
2706         if (dl_se->dl_runtime != attr->sched_runtime ||
2707             dl_se->dl_deadline != attr->sched_deadline ||
2708             dl_se->dl_period != attr->sched_period ||
2709             dl_se->flags != attr->sched_flags)
2710                 return true;
2711 
2712         return false;
2713 }
2714 
2715 #ifdef CONFIG_SMP
2716 int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed)
2717 {
2718         unsigned int dest_cpu;
2719         struct dl_bw *dl_b;
2720         bool overflow;
2721         int cpus, ret;
2722         unsigned long flags;
2723 
2724         dest_cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed);
2725 
2726         rcu_read_lock_sched();
2727         dl_b = dl_bw_of(dest_cpu);
2728         raw_spin_lock_irqsave(&dl_b->lock, flags);
2729         cpus = dl_bw_cpus(dest_cpu);
2730         overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
2731         if (overflow) {
2732                 ret = -EBUSY;
2733         } else {
2734                 
2735 
2736 
2737 
2738 
2739 
2740                 __dl_add(dl_b, p->dl.dl_bw, cpus);
2741                 ret = 0;
2742         }
2743         raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2744         rcu_read_unlock_sched();
2745 
2746         return ret;
2747 }
2748 
2749 int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
2750                                  const struct cpumask *trial)
2751 {
2752         int ret = 1, trial_cpus;
2753         struct dl_bw *cur_dl_b;
2754         unsigned long flags;
2755 
2756         rcu_read_lock_sched();
2757         cur_dl_b = dl_bw_of(cpumask_any(cur));
2758         trial_cpus = cpumask_weight(trial);
2759 
2760         raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
2761         if (cur_dl_b->bw != -1 &&
2762             cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
2763                 ret = 0;
2764         raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
2765         rcu_read_unlock_sched();
2766 
2767         return ret;
2768 }
2769 
2770 bool dl_cpu_busy(unsigned int cpu)
2771 {
2772         unsigned long flags;
2773         struct dl_bw *dl_b;
2774         bool overflow;
2775         int cpus;
2776 
2777         rcu_read_lock_sched();
2778         dl_b = dl_bw_of(cpu);
2779         raw_spin_lock_irqsave(&dl_b->lock, flags);
2780         cpus = dl_bw_cpus(cpu);
2781         overflow = __dl_overflow(dl_b, cpus, 0, 0);
2782         raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2783         rcu_read_unlock_sched();
2784 
2785         return overflow;
2786 }
2787 #endif
2788 
2789 #ifdef CONFIG_SCHED_DEBUG
2790 void print_dl_stats(struct seq_file *m, int cpu)
2791 {
2792         print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
2793 }
2794 #endif