1/* 2 * kernel/sched/proc.c 3 * 4 * Kernel load calculations, forked from sched/core.c 5 */ 6 7#include <linux/export.h> 8 9#include "sched.h" 10 11/* 12 * Global load-average calculations 13 * 14 * We take a distributed and async approach to calculating the global load-avg 15 * in order to minimize overhead. 16 * 17 * The global load average is an exponentially decaying average of nr_running + 18 * nr_uninterruptible. 19 * 20 * Once every LOAD_FREQ: 21 * 22 * nr_active = 0; 23 * for_each_possible_cpu(cpu) 24 * nr_active += cpu_of(cpu)->nr_running + cpu_of(cpu)->nr_uninterruptible; 25 * 26 * avenrun[n] = avenrun[0] * exp_n + nr_active * (1 - exp_n) 27 * 28 * Due to a number of reasons the above turns in the mess below: 29 * 30 * - for_each_possible_cpu() is prohibitively expensive on machines with 31 * serious number of cpus, therefore we need to take a distributed approach 32 * to calculating nr_active. 33 * 34 * \Sum_i x_i(t) = \Sum_i x_i(t) - x_i(t_0) | x_i(t_0) := 0 35 * = \Sum_i { \Sum_j=1 x_i(t_j) - x_i(t_j-1) } 36 * 37 * So assuming nr_active := 0 when we start out -- true per definition, we 38 * can simply take per-cpu deltas and fold those into a global accumulate 39 * to obtain the same result. See calc_load_fold_active(). 40 * 41 * Furthermore, in order to avoid synchronizing all per-cpu delta folding 42 * across the machine, we assume 10 ticks is sufficient time for every 43 * cpu to have completed this task. 44 * 45 * This places an upper-bound on the IRQ-off latency of the machine. Then 46 * again, being late doesn't loose the delta, just wrecks the sample. 47 * 48 * - cpu_rq()->nr_uninterruptible isn't accurately tracked per-cpu because 49 * this would add another cross-cpu cacheline miss and atomic operation 50 * to the wakeup path. Instead we increment on whatever cpu the task ran 51 * when it went into uninterruptible state and decrement on whatever cpu 52 * did the wakeup. This means that only the sum of nr_uninterruptible over 53 * all cpus yields the correct result. 54 * 55 * This covers the NO_HZ=n code, for extra head-aches, see the comment below. 56 */ 57 58/* Variables and functions for calc_load */ 59atomic_long_t calc_load_tasks; 60unsigned long calc_load_update; 61unsigned long avenrun[3]; 62EXPORT_SYMBOL(avenrun); /* should be removed */ 63 64/** 65 * get_avenrun - get the load average array 66 * @loads: pointer to dest load array 67 * @offset: offset to add 68 * @shift: shift count to shift the result left 69 * 70 * These values are estimates at best, so no need for locking. 71 */ 72void get_avenrun(unsigned long *loads, unsigned long offset, int shift) 73{ 74 loads[0] = (avenrun[0] + offset) << shift; 75 loads[1] = (avenrun[1] + offset) << shift; 76 loads[2] = (avenrun[2] + offset) << shift; 77} 78 79long calc_load_fold_active(struct rq *this_rq) 80{ 81 long nr_active, delta = 0; 82 83 nr_active = this_rq->nr_running; 84 nr_active += (long) this_rq->nr_uninterruptible; 85 86 if (nr_active != this_rq->calc_load_active) { 87 delta = nr_active - this_rq->calc_load_active; 88 this_rq->calc_load_active = nr_active; 89 } 90 91 return delta; 92} 93 94/* 95 * a1 = a0 * e + a * (1 - e) 96 */ 97static unsigned long 98calc_load(unsigned long load, unsigned long exp, unsigned long active) 99{ 100 unsigned long newload; 101 102 newload = load * exp + active * (FIXED_1 - exp); 103 if (active >= load) 104 newload += FIXED_1-1; 105 106 return newload / FIXED_1; 107} 108 109#ifdef CONFIG_NO_HZ_COMMON 110/* 111 * Handle NO_HZ for the global load-average. 112 * 113 * Since the above described distributed algorithm to compute the global 114 * load-average relies on per-cpu sampling from the tick, it is affected by 115 * NO_HZ. 116 * 117 * The basic idea is to fold the nr_active delta into a global idle-delta upon 118 * entering NO_HZ state such that we can include this as an 'extra' cpu delta 119 * when we read the global state. 120 * 121 * Obviously reality has to ruin such a delightfully simple scheme: 122 * 123 * - When we go NO_HZ idle during the window, we can negate our sample 124 * contribution, causing under-accounting. 125 * 126 * We avoid this by keeping two idle-delta counters and flipping them 127 * when the window starts, thus separating old and new NO_HZ load. 128 * 129 * The only trick is the slight shift in index flip for read vs write. 130 * 131 * 0s 5s 10s 15s 132 * +10 +10 +10 +10 133 * |-|-----------|-|-----------|-|-----------|-| 134 * r:0 0 1 1 0 0 1 1 0 135 * w:0 1 1 0 0 1 1 0 0 136 * 137 * This ensures we'll fold the old idle contribution in this window while 138 * accumlating the new one. 139 * 140 * - When we wake up from NO_HZ idle during the window, we push up our 141 * contribution, since we effectively move our sample point to a known 142 * busy state. 143 * 144 * This is solved by pushing the window forward, and thus skipping the 145 * sample, for this cpu (effectively using the idle-delta for this cpu which 146 * was in effect at the time the window opened). This also solves the issue 147 * of having to deal with a cpu having been in NOHZ idle for multiple 148 * LOAD_FREQ intervals. 149 * 150 * When making the ILB scale, we should try to pull this in as well. 151 */ 152static atomic_long_t calc_load_idle[2]; 153static int calc_load_idx; 154 155static inline int calc_load_write_idx(void) 156{ 157 int idx = calc_load_idx; 158 159 /* 160 * See calc_global_nohz(), if we observe the new index, we also 161 * need to observe the new update time. 162 */ 163 smp_rmb(); 164 165 /* 166 * If the folding window started, make sure we start writing in the 167 * next idle-delta. 168 */ 169 if (!time_before(jiffies, calc_load_update)) 170 idx++; 171 172 return idx & 1; 173} 174 175static inline int calc_load_read_idx(void) 176{ 177 return calc_load_idx & 1; 178} 179 180void calc_load_enter_idle(void) 181{ 182 struct rq *this_rq = this_rq(); 183 long delta; 184 185 /* 186 * We're going into NOHZ mode, if there's any pending delta, fold it 187 * into the pending idle delta. 188 */ 189 delta = calc_load_fold_active(this_rq); 190 if (delta) { 191 int idx = calc_load_write_idx(); 192 atomic_long_add(delta, &calc_load_idle[idx]); 193 } 194} 195 196void calc_load_exit_idle(void) 197{ 198 struct rq *this_rq = this_rq(); 199 200 /* 201 * If we're still before the sample window, we're done. 202 */ 203 if (time_before(jiffies, this_rq->calc_load_update)) 204 return; 205 206 /* 207 * We woke inside or after the sample window, this means we're already 208 * accounted through the nohz accounting, so skip the entire deal and 209 * sync up for the next window. 210 */ 211 this_rq->calc_load_update = calc_load_update; 212 if (time_before(jiffies, this_rq->calc_load_update + 10)) 213 this_rq->calc_load_update += LOAD_FREQ; 214} 215 216static long calc_load_fold_idle(void) 217{ 218 int idx = calc_load_read_idx(); 219 long delta = 0; 220 221 if (atomic_long_read(&calc_load_idle[idx])) 222 delta = atomic_long_xchg(&calc_load_idle[idx], 0); 223 224 return delta; 225} 226 227/** 228 * fixed_power_int - compute: x^n, in O(log n) time 229 * 230 * @x: base of the power 231 * @frac_bits: fractional bits of @x 232 * @n: power to raise @x to. 233 * 234 * By exploiting the relation between the definition of the natural power 235 * function: x^n := x*x*...*x (x multiplied by itself for n times), and 236 * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i, 237 * (where: n_i \elem {0, 1}, the binary vector representing n), 238 * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is 239 * of course trivially computable in O(log_2 n), the length of our binary 240 * vector. 241 */ 242static unsigned long 243fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n) 244{ 245 unsigned long result = 1UL << frac_bits; 246 247 if (n) for (;;) { 248 if (n & 1) { 249 result *= x; 250 result += 1UL << (frac_bits - 1); 251 result >>= frac_bits; 252 } 253 n >>= 1; 254 if (!n) 255 break; 256 x *= x; 257 x += 1UL << (frac_bits - 1); 258 x >>= frac_bits; 259 } 260 261 return result; 262} 263 264/* 265 * a1 = a0 * e + a * (1 - e) 266 * 267 * a2 = a1 * e + a * (1 - e) 268 * = (a0 * e + a * (1 - e)) * e + a * (1 - e) 269 * = a0 * e^2 + a * (1 - e) * (1 + e) 270 * 271 * a3 = a2 * e + a * (1 - e) 272 * = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e) 273 * = a0 * e^3 + a * (1 - e) * (1 + e + e^2) 274 * 275 * ... 276 * 277 * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1] 278 * = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e) 279 * = a0 * e^n + a * (1 - e^n) 280 * 281 * [1] application of the geometric series: 282 * 283 * n 1 - x^(n+1) 284 * S_n := \Sum x^i = ------------- 285 * i=0 1 - x 286 */ 287static unsigned long 288calc_load_n(unsigned long load, unsigned long exp, 289 unsigned long active, unsigned int n) 290{ 291 292 return calc_load(load, fixed_power_int(exp, FSHIFT, n), active); 293} 294 295/* 296 * NO_HZ can leave us missing all per-cpu ticks calling 297 * calc_load_account_active(), but since an idle CPU folds its delta into 298 * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold 299 * in the pending idle delta if our idle period crossed a load cycle boundary. 300 * 301 * Once we've updated the global active value, we need to apply the exponential 302 * weights adjusted to the number of cycles missed. 303 */ 304static void calc_global_nohz(void) 305{ 306 long delta, active, n; 307 308 if (!time_before(jiffies, calc_load_update + 10)) { 309 /* 310 * Catch-up, fold however many we are behind still 311 */ 312 delta = jiffies - calc_load_update - 10; 313 n = 1 + (delta / LOAD_FREQ); 314 315 active = atomic_long_read(&calc_load_tasks); 316 active = active > 0 ? active * FIXED_1 : 0; 317 318 avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n); 319 avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n); 320 avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n); 321 322 calc_load_update += n * LOAD_FREQ; 323 } 324 325 /* 326 * Flip the idle index... 327 * 328 * Make sure we first write the new time then flip the index, so that 329 * calc_load_write_idx() will see the new time when it reads the new 330 * index, this avoids a double flip messing things up. 331 */ 332 smp_wmb(); 333 calc_load_idx++; 334} 335#else /* !CONFIG_NO_HZ_COMMON */ 336 337static inline long calc_load_fold_idle(void) { return 0; } 338static inline void calc_global_nohz(void) { } 339 340#endif /* CONFIG_NO_HZ_COMMON */ 341 342/* 343 * calc_load - update the avenrun load estimates 10 ticks after the 344 * CPUs have updated calc_load_tasks. 345 */ 346void calc_global_load(unsigned long ticks) 347{ 348 long active, delta; 349 350 if (time_before(jiffies, calc_load_update + 10)) 351 return; 352 353 /* 354 * Fold the 'old' idle-delta to include all NO_HZ cpus. 355 */ 356 delta = calc_load_fold_idle(); 357 if (delta) 358 atomic_long_add(delta, &calc_load_tasks); 359 360 active = atomic_long_read(&calc_load_tasks); 361 active = active > 0 ? active * FIXED_1 : 0; 362 363 avenrun[0] = calc_load(avenrun[0], EXP_1, active); 364 avenrun[1] = calc_load(avenrun[1], EXP_5, active); 365 avenrun[2] = calc_load(avenrun[2], EXP_15, active); 366 367 calc_load_update += LOAD_FREQ; 368 369 /* 370 * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk. 371 */ 372 calc_global_nohz(); 373} 374 375/* 376 * Called from update_cpu_load() to periodically update this CPU's 377 * active count. 378 */ 379static void calc_load_account_active(struct rq *this_rq) 380{ 381 long delta; 382 383 if (time_before(jiffies, this_rq->calc_load_update)) 384 return; 385 386 delta = calc_load_fold_active(this_rq); 387 if (delta) 388 atomic_long_add(delta, &calc_load_tasks); 389 390 this_rq->calc_load_update += LOAD_FREQ; 391} 392 393/* 394 * End of global load-average stuff 395 */ 396 397/* 398 * The exact cpuload at various idx values, calculated at every tick would be 399 * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load 400 * 401 * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called 402 * on nth tick when cpu may be busy, then we have: 403 * load = ((2^idx - 1) / 2^idx)^(n-1) * load 404 * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load 405 * 406 * decay_load_missed() below does efficient calculation of 407 * load = ((2^idx - 1) / 2^idx)^(n-1) * load 408 * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load 409 * 410 * The calculation is approximated on a 128 point scale. 411 * degrade_zero_ticks is the number of ticks after which load at any 412 * particular idx is approximated to be zero. 413 * degrade_factor is a precomputed table, a row for each load idx. 414 * Each column corresponds to degradation factor for a power of two ticks, 415 * based on 128 point scale. 416 * Example: 417 * row 2, col 3 (=12) says that the degradation at load idx 2 after 418 * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8). 419 * 420 * With this power of 2 load factors, we can degrade the load n times 421 * by looking at 1 bits in n and doing as many mult/shift instead of 422 * n mult/shifts needed by the exact degradation. 423 */ 424#define DEGRADE_SHIFT 7 425static const unsigned char 426 degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128}; 427static const unsigned char 428 degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = { 429 {0, 0, 0, 0, 0, 0, 0, 0}, 430 {64, 32, 8, 0, 0, 0, 0, 0}, 431 {96, 72, 40, 12, 1, 0, 0}, 432 {112, 98, 75, 43, 15, 1, 0}, 433 {120, 112, 98, 76, 45, 16, 2} }; 434 435/* 436 * Update cpu_load for any missed ticks, due to tickless idle. The backlog 437 * would be when CPU is idle and so we just decay the old load without 438 * adding any new load. 439 */ 440static unsigned long 441decay_load_missed(unsigned long load, unsigned long missed_updates, int idx) 442{ 443 int j = 0; 444 445 if (!missed_updates) 446 return load; 447 448 if (missed_updates >= degrade_zero_ticks[idx]) 449 return 0; 450 451 if (idx == 1) 452 return load >> missed_updates; 453 454 while (missed_updates) { 455 if (missed_updates % 2) 456 load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT; 457 458 missed_updates >>= 1; 459 j++; 460 } 461 return load; 462} 463 464/* 465 * Update rq->cpu_load[] statistics. This function is usually called every 466 * scheduler tick (TICK_NSEC). With tickless idle this will not be called 467 * every tick. We fix it up based on jiffies. 468 */ 469static void __update_cpu_load(struct rq *this_rq, unsigned long this_load, 470 unsigned long pending_updates) 471{ 472 int i, scale; 473 474 this_rq->nr_load_updates++; 475 476 /* Update our load: */ 477 this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */ 478 for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) { 479 unsigned long old_load, new_load; 480 481 /* scale is effectively 1 << i now, and >> i divides by scale */ 482 483 old_load = this_rq->cpu_load[i]; 484 old_load = decay_load_missed(old_load, pending_updates - 1, i); 485 new_load = this_load; 486 /* 487 * Round up the averaging division if load is increasing. This 488 * prevents us from getting stuck on 9 if the load is 10, for 489 * example. 490 */ 491 if (new_load > old_load) 492 new_load += scale - 1; 493 494 this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i; 495 } 496 497 sched_avg_update(this_rq); 498} 499 500#ifdef CONFIG_SMP 501static inline unsigned long get_rq_runnable_load(struct rq *rq) 502{ 503 return rq->cfs.runnable_load_avg; 504} 505#else 506static inline unsigned long get_rq_runnable_load(struct rq *rq) 507{ 508 return rq->load.weight; 509} 510#endif 511 512#ifdef CONFIG_NO_HZ_COMMON 513/* 514 * There is no sane way to deal with nohz on smp when using jiffies because the 515 * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading 516 * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}. 517 * 518 * Therefore we cannot use the delta approach from the regular tick since that 519 * would seriously skew the load calculation. However we'll make do for those 520 * updates happening while idle (nohz_idle_balance) or coming out of idle 521 * (tick_nohz_idle_exit). 522 * 523 * This means we might still be one tick off for nohz periods. 524 */ 525 526/* 527 * Called from nohz_idle_balance() to update the load ratings before doing the 528 * idle balance. 529 */ 530void update_idle_cpu_load(struct rq *this_rq) 531{ 532 unsigned long curr_jiffies = ACCESS_ONCE(jiffies); 533 unsigned long load = get_rq_runnable_load(this_rq); 534 unsigned long pending_updates; 535 536 /* 537 * bail if there's load or we're actually up-to-date. 538 */ 539 if (load || curr_jiffies == this_rq->last_load_update_tick) 540 return; 541 542 pending_updates = curr_jiffies - this_rq->last_load_update_tick; 543 this_rq->last_load_update_tick = curr_jiffies; 544 545 __update_cpu_load(this_rq, load, pending_updates); 546} 547 548/* 549 * Called from tick_nohz_idle_exit() -- try and fix up the ticks we missed. 550 */ 551void update_cpu_load_nohz(void) 552{ 553 struct rq *this_rq = this_rq(); 554 unsigned long curr_jiffies = ACCESS_ONCE(jiffies); 555 unsigned long pending_updates; 556 557 if (curr_jiffies == this_rq->last_load_update_tick) 558 return; 559 560 raw_spin_lock(&this_rq->lock); 561 pending_updates = curr_jiffies - this_rq->last_load_update_tick; 562 if (pending_updates) { 563 this_rq->last_load_update_tick = curr_jiffies; 564 /* 565 * We were idle, this means load 0, the current load might be 566 * !0 due to remote wakeups and the sort. 567 */ 568 __update_cpu_load(this_rq, 0, pending_updates); 569 } 570 raw_spin_unlock(&this_rq->lock); 571} 572#endif /* CONFIG_NO_HZ */ 573 574/* 575 * Called from scheduler_tick() 576 */ 577void update_cpu_load_active(struct rq *this_rq) 578{ 579 unsigned long load = get_rq_runnable_load(this_rq); 580 /* 581 * See the mess around update_idle_cpu_load() / update_cpu_load_nohz(). 582 */ 583 this_rq->last_load_update_tick = jiffies; 584 __update_cpu_load(this_rq, load, 1); 585 586 calc_load_account_active(this_rq); 587} 588