1/* 2 * coupled.c - helper functions to enter the same idle state on multiple cpus 3 * 4 * Copyright (c) 2011 Google, Inc. 5 * 6 * Author: Colin Cross <ccross@android.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, but WITHOUT 14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 16 * more details. 17 */ 18 19#include <linux/kernel.h> 20#include <linux/cpu.h> 21#include <linux/cpuidle.h> 22#include <linux/mutex.h> 23#include <linux/sched.h> 24#include <linux/slab.h> 25#include <linux/spinlock.h> 26 27#include "cpuidle.h" 28 29/** 30 * DOC: Coupled cpuidle states 31 * 32 * On some ARM SMP SoCs (OMAP4460, Tegra 2, and probably more), the 33 * cpus cannot be independently powered down, either due to 34 * sequencing restrictions (on Tegra 2, cpu 0 must be the last to 35 * power down), or due to HW bugs (on OMAP4460, a cpu powering up 36 * will corrupt the gic state unless the other cpu runs a work 37 * around). Each cpu has a power state that it can enter without 38 * coordinating with the other cpu (usually Wait For Interrupt, or 39 * WFI), and one or more "coupled" power states that affect blocks 40 * shared between the cpus (L2 cache, interrupt controller, and 41 * sometimes the whole SoC). Entering a coupled power state must 42 * be tightly controlled on both cpus. 43 * 44 * This file implements a solution, where each cpu will wait in the 45 * WFI state until all cpus are ready to enter a coupled state, at 46 * which point the coupled state function will be called on all 47 * cpus at approximately the same time. 48 * 49 * Once all cpus are ready to enter idle, they are woken by an smp 50 * cross call. At this point, there is a chance that one of the 51 * cpus will find work to do, and choose not to enter idle. A 52 * final pass is needed to guarantee that all cpus will call the 53 * power state enter function at the same time. During this pass, 54 * each cpu will increment the ready counter, and continue once the 55 * ready counter matches the number of online coupled cpus. If any 56 * cpu exits idle, the other cpus will decrement their counter and 57 * retry. 58 * 59 * requested_state stores the deepest coupled idle state each cpu 60 * is ready for. It is assumed that the states are indexed from 61 * shallowest (highest power, lowest exit latency) to deepest 62 * (lowest power, highest exit latency). The requested_state 63 * variable is not locked. It is only written from the cpu that 64 * it stores (or by the on/offlining cpu if that cpu is offline), 65 * and only read after all the cpus are ready for the coupled idle 66 * state are are no longer updating it. 67 * 68 * Three atomic counters are used. alive_count tracks the number 69 * of cpus in the coupled set that are currently or soon will be 70 * online. waiting_count tracks the number of cpus that are in 71 * the waiting loop, in the ready loop, or in the coupled idle state. 72 * ready_count tracks the number of cpus that are in the ready loop 73 * or in the coupled idle state. 74 * 75 * To use coupled cpuidle states, a cpuidle driver must: 76 * 77 * Set struct cpuidle_device.coupled_cpus to the mask of all 78 * coupled cpus, usually the same as cpu_possible_mask if all cpus 79 * are part of the same cluster. The coupled_cpus mask must be 80 * set in the struct cpuidle_device for each cpu. 81 * 82 * Set struct cpuidle_device.safe_state to a state that is not a 83 * coupled state. This is usually WFI. 84 * 85 * Set CPUIDLE_FLAG_COUPLED in struct cpuidle_state.flags for each 86 * state that affects multiple cpus. 87 * 88 * Provide a struct cpuidle_state.enter function for each state 89 * that affects multiple cpus. This function is guaranteed to be 90 * called on all cpus at approximately the same time. The driver 91 * should ensure that the cpus all abort together if any cpu tries 92 * to abort once the function is called. The function should return 93 * with interrupts still disabled. 94 */ 95 96/** 97 * struct cpuidle_coupled - data for set of cpus that share a coupled idle state 98 * @coupled_cpus: mask of cpus that are part of the coupled set 99 * @requested_state: array of requested states for cpus in the coupled set 100 * @ready_waiting_counts: combined count of cpus in ready or waiting loops 101 * @online_count: count of cpus that are online 102 * @refcnt: reference count of cpuidle devices that are using this struct 103 * @prevent: flag to prevent coupled idle while a cpu is hotplugging 104 */ 105struct cpuidle_coupled { 106 cpumask_t coupled_cpus; 107 int requested_state[NR_CPUS]; 108 atomic_t ready_waiting_counts; 109 atomic_t abort_barrier; 110 int online_count; 111 int refcnt; 112 int prevent; 113}; 114 115#define WAITING_BITS 16 116#define MAX_WAITING_CPUS (1 << WAITING_BITS) 117#define WAITING_MASK (MAX_WAITING_CPUS - 1) 118#define READY_MASK (~WAITING_MASK) 119 120#define CPUIDLE_COUPLED_NOT_IDLE (-1) 121 122static DEFINE_MUTEX(cpuidle_coupled_lock); 123static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb); 124 125/* 126 * The cpuidle_coupled_poke_pending mask is used to avoid calling 127 * __smp_call_function_single with the per cpu call_single_data struct already 128 * in use. This prevents a deadlock where two cpus are waiting for each others 129 * call_single_data struct to be available 130 */ 131static cpumask_t cpuidle_coupled_poke_pending; 132 133/* 134 * The cpuidle_coupled_poked mask is used to ensure that each cpu has been poked 135 * once to minimize entering the ready loop with a poke pending, which would 136 * require aborting and retrying. 137 */ 138static cpumask_t cpuidle_coupled_poked; 139 140/** 141 * cpuidle_coupled_parallel_barrier - synchronize all online coupled cpus 142 * @dev: cpuidle_device of the calling cpu 143 * @a: atomic variable to hold the barrier 144 * 145 * No caller to this function will return from this function until all online 146 * cpus in the same coupled group have called this function. Once any caller 147 * has returned from this function, the barrier is immediately available for 148 * reuse. 149 * 150 * The atomic variable must be initialized to 0 before any cpu calls 151 * this function, will be reset to 0 before any cpu returns from this function. 152 * 153 * Must only be called from within a coupled idle state handler 154 * (state.enter when state.flags has CPUIDLE_FLAG_COUPLED set). 155 * 156 * Provides full smp barrier semantics before and after calling. 157 */ 158void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a) 159{ 160 int n = dev->coupled->online_count; 161 162 smp_mb__before_atomic(); 163 atomic_inc(a); 164 165 while (atomic_read(a) < n) 166 cpu_relax(); 167 168 if (atomic_inc_return(a) == n * 2) { 169 atomic_set(a, 0); 170 return; 171 } 172 173 while (atomic_read(a) > n) 174 cpu_relax(); 175} 176 177/** 178 * cpuidle_state_is_coupled - check if a state is part of a coupled set 179 * @drv: struct cpuidle_driver for the platform 180 * @state: index of the target state in drv->states 181 * 182 * Returns true if the target state is coupled with cpus besides this one 183 */ 184bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state) 185{ 186 return drv->states[state].flags & CPUIDLE_FLAG_COUPLED; 187} 188 189/** 190 * cpuidle_coupled_state_verify - check if the coupled states are correctly set. 191 * @drv: struct cpuidle_driver for the platform 192 * 193 * Returns 0 for valid state values, a negative error code otherwise: 194 * * -EINVAL if any coupled state(safe_state_index) is wrongly set. 195 */ 196int cpuidle_coupled_state_verify(struct cpuidle_driver *drv) 197{ 198 int i; 199 200 for (i = drv->state_count - 1; i >= 0; i--) { 201 if (cpuidle_state_is_coupled(drv, i) && 202 (drv->safe_state_index == i || 203 drv->safe_state_index < 0 || 204 drv->safe_state_index >= drv->state_count)) 205 return -EINVAL; 206 } 207 208 return 0; 209} 210 211/** 212 * cpuidle_coupled_set_ready - mark a cpu as ready 213 * @coupled: the struct coupled that contains the current cpu 214 */ 215static inline void cpuidle_coupled_set_ready(struct cpuidle_coupled *coupled) 216{ 217 atomic_add(MAX_WAITING_CPUS, &coupled->ready_waiting_counts); 218} 219 220/** 221 * cpuidle_coupled_set_not_ready - mark a cpu as not ready 222 * @coupled: the struct coupled that contains the current cpu 223 * 224 * Decrements the ready counter, unless the ready (and thus the waiting) counter 225 * is equal to the number of online cpus. Prevents a race where one cpu 226 * decrements the waiting counter and then re-increments it just before another 227 * cpu has decremented its ready counter, leading to the ready counter going 228 * down from the number of online cpus without going through the coupled idle 229 * state. 230 * 231 * Returns 0 if the counter was decremented successfully, -EINVAL if the ready 232 * counter was equal to the number of online cpus. 233 */ 234static 235inline int cpuidle_coupled_set_not_ready(struct cpuidle_coupled *coupled) 236{ 237 int all; 238 int ret; 239 240 all = coupled->online_count | (coupled->online_count << WAITING_BITS); 241 ret = atomic_add_unless(&coupled->ready_waiting_counts, 242 -MAX_WAITING_CPUS, all); 243 244 return ret ? 0 : -EINVAL; 245} 246 247/** 248 * cpuidle_coupled_no_cpus_ready - check if no cpus in a coupled set are ready 249 * @coupled: the struct coupled that contains the current cpu 250 * 251 * Returns true if all of the cpus in a coupled set are out of the ready loop. 252 */ 253static inline int cpuidle_coupled_no_cpus_ready(struct cpuidle_coupled *coupled) 254{ 255 int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS; 256 return r == 0; 257} 258 259/** 260 * cpuidle_coupled_cpus_ready - check if all cpus in a coupled set are ready 261 * @coupled: the struct coupled that contains the current cpu 262 * 263 * Returns true if all cpus coupled to this target state are in the ready loop 264 */ 265static inline bool cpuidle_coupled_cpus_ready(struct cpuidle_coupled *coupled) 266{ 267 int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS; 268 return r == coupled->online_count; 269} 270 271/** 272 * cpuidle_coupled_cpus_waiting - check if all cpus in a coupled set are waiting 273 * @coupled: the struct coupled that contains the current cpu 274 * 275 * Returns true if all cpus coupled to this target state are in the wait loop 276 */ 277static inline bool cpuidle_coupled_cpus_waiting(struct cpuidle_coupled *coupled) 278{ 279 int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK; 280 return w == coupled->online_count; 281} 282 283/** 284 * cpuidle_coupled_no_cpus_waiting - check if no cpus in coupled set are waiting 285 * @coupled: the struct coupled that contains the current cpu 286 * 287 * Returns true if all of the cpus in a coupled set are out of the waiting loop. 288 */ 289static inline int cpuidle_coupled_no_cpus_waiting(struct cpuidle_coupled *coupled) 290{ 291 int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK; 292 return w == 0; 293} 294 295/** 296 * cpuidle_coupled_get_state - determine the deepest idle state 297 * @dev: struct cpuidle_device for this cpu 298 * @coupled: the struct coupled that contains the current cpu 299 * 300 * Returns the deepest idle state that all coupled cpus can enter 301 */ 302static inline int cpuidle_coupled_get_state(struct cpuidle_device *dev, 303 struct cpuidle_coupled *coupled) 304{ 305 int i; 306 int state = INT_MAX; 307 308 /* 309 * Read barrier ensures that read of requested_state is ordered after 310 * reads of ready_count. Matches the write barriers 311 * cpuidle_set_state_waiting. 312 */ 313 smp_rmb(); 314 315 for_each_cpu(i, &coupled->coupled_cpus) 316 if (cpu_online(i) && coupled->requested_state[i] < state) 317 state = coupled->requested_state[i]; 318 319 return state; 320} 321 322static void cpuidle_coupled_handle_poke(void *info) 323{ 324 int cpu = (unsigned long)info; 325 cpumask_set_cpu(cpu, &cpuidle_coupled_poked); 326 cpumask_clear_cpu(cpu, &cpuidle_coupled_poke_pending); 327} 328 329/** 330 * cpuidle_coupled_poke - wake up a cpu that may be waiting 331 * @cpu: target cpu 332 * 333 * Ensures that the target cpu exits it's waiting idle state (if it is in it) 334 * and will see updates to waiting_count before it re-enters it's waiting idle 335 * state. 336 * 337 * If cpuidle_coupled_poked_mask is already set for the target cpu, that cpu 338 * either has or will soon have a pending IPI that will wake it out of idle, 339 * or it is currently processing the IPI and is not in idle. 340 */ 341static void cpuidle_coupled_poke(int cpu) 342{ 343 struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu); 344 345 if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poke_pending)) 346 smp_call_function_single_async(cpu, csd); 347} 348 349/** 350 * cpuidle_coupled_poke_others - wake up all other cpus that may be waiting 351 * @dev: struct cpuidle_device for this cpu 352 * @coupled: the struct coupled that contains the current cpu 353 * 354 * Calls cpuidle_coupled_poke on all other online cpus. 355 */ 356static void cpuidle_coupled_poke_others(int this_cpu, 357 struct cpuidle_coupled *coupled) 358{ 359 int cpu; 360 361 for_each_cpu(cpu, &coupled->coupled_cpus) 362 if (cpu != this_cpu && cpu_online(cpu)) 363 cpuidle_coupled_poke(cpu); 364} 365 366/** 367 * cpuidle_coupled_set_waiting - mark this cpu as in the wait loop 368 * @dev: struct cpuidle_device for this cpu 369 * @coupled: the struct coupled that contains the current cpu 370 * @next_state: the index in drv->states of the requested state for this cpu 371 * 372 * Updates the requested idle state for the specified cpuidle device. 373 * Returns the number of waiting cpus. 374 */ 375static int cpuidle_coupled_set_waiting(int cpu, 376 struct cpuidle_coupled *coupled, int next_state) 377{ 378 coupled->requested_state[cpu] = next_state; 379 380 /* 381 * The atomic_inc_return provides a write barrier to order the write 382 * to requested_state with the later write that increments ready_count. 383 */ 384 return atomic_inc_return(&coupled->ready_waiting_counts) & WAITING_MASK; 385} 386 387/** 388 * cpuidle_coupled_set_not_waiting - mark this cpu as leaving the wait loop 389 * @dev: struct cpuidle_device for this cpu 390 * @coupled: the struct coupled that contains the current cpu 391 * 392 * Removes the requested idle state for the specified cpuidle device. 393 */ 394static void cpuidle_coupled_set_not_waiting(int cpu, 395 struct cpuidle_coupled *coupled) 396{ 397 /* 398 * Decrementing waiting count can race with incrementing it in 399 * cpuidle_coupled_set_waiting, but that's OK. Worst case, some 400 * cpus will increment ready_count and then spin until they 401 * notice that this cpu has cleared it's requested_state. 402 */ 403 atomic_dec(&coupled->ready_waiting_counts); 404 405 coupled->requested_state[cpu] = CPUIDLE_COUPLED_NOT_IDLE; 406} 407 408/** 409 * cpuidle_coupled_set_done - mark this cpu as leaving the ready loop 410 * @cpu: the current cpu 411 * @coupled: the struct coupled that contains the current cpu 412 * 413 * Marks this cpu as no longer in the ready and waiting loops. Decrements 414 * the waiting count first to prevent another cpu looping back in and seeing 415 * this cpu as waiting just before it exits idle. 416 */ 417static void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled) 418{ 419 cpuidle_coupled_set_not_waiting(cpu, coupled); 420 atomic_sub(MAX_WAITING_CPUS, &coupled->ready_waiting_counts); 421} 422 423/** 424 * cpuidle_coupled_clear_pokes - spin until the poke interrupt is processed 425 * @cpu - this cpu 426 * 427 * Turns on interrupts and spins until any outstanding poke interrupts have 428 * been processed and the poke bit has been cleared. 429 * 430 * Other interrupts may also be processed while interrupts are enabled, so 431 * need_resched() must be tested after this function returns to make sure 432 * the interrupt didn't schedule work that should take the cpu out of idle. 433 * 434 * Returns 0 if no poke was pending, 1 if a poke was cleared. 435 */ 436static int cpuidle_coupled_clear_pokes(int cpu) 437{ 438 if (!cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending)) 439 return 0; 440 441 local_irq_enable(); 442 while (cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending)) 443 cpu_relax(); 444 local_irq_disable(); 445 446 return 1; 447} 448 449static bool cpuidle_coupled_any_pokes_pending(struct cpuidle_coupled *coupled) 450{ 451 cpumask_t cpus; 452 int ret; 453 454 cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus); 455 ret = cpumask_and(&cpus, &cpuidle_coupled_poke_pending, &cpus); 456 457 return ret; 458} 459 460/** 461 * cpuidle_enter_state_coupled - attempt to enter a state with coupled cpus 462 * @dev: struct cpuidle_device for the current cpu 463 * @drv: struct cpuidle_driver for the platform 464 * @next_state: index of the requested state in drv->states 465 * 466 * Coordinate with coupled cpus to enter the target state. This is a two 467 * stage process. In the first stage, the cpus are operating independently, 468 * and may call into cpuidle_enter_state_coupled at completely different times. 469 * To save as much power as possible, the first cpus to call this function will 470 * go to an intermediate state (the cpuidle_device's safe state), and wait for 471 * all the other cpus to call this function. Once all coupled cpus are idle, 472 * the second stage will start. Each coupled cpu will spin until all cpus have 473 * guaranteed that they will call the target_state. 474 * 475 * This function must be called with interrupts disabled. It may enable 476 * interrupts while preparing for idle, and it will always return with 477 * interrupts enabled. 478 */ 479int cpuidle_enter_state_coupled(struct cpuidle_device *dev, 480 struct cpuidle_driver *drv, int next_state) 481{ 482 int entered_state = -1; 483 struct cpuidle_coupled *coupled = dev->coupled; 484 int w; 485 486 if (!coupled) 487 return -EINVAL; 488 489 while (coupled->prevent) { 490 cpuidle_coupled_clear_pokes(dev->cpu); 491 if (need_resched()) { 492 local_irq_enable(); 493 return entered_state; 494 } 495 entered_state = cpuidle_enter_state(dev, drv, 496 drv->safe_state_index); 497 local_irq_disable(); 498 } 499 500 /* Read barrier ensures online_count is read after prevent is cleared */ 501 smp_rmb(); 502 503reset: 504 cpumask_clear_cpu(dev->cpu, &cpuidle_coupled_poked); 505 506 w = cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state); 507 /* 508 * If this is the last cpu to enter the waiting state, poke 509 * all the other cpus out of their waiting state so they can 510 * enter a deeper state. This can race with one of the cpus 511 * exiting the waiting state due to an interrupt and 512 * decrementing waiting_count, see comment below. 513 */ 514 if (w == coupled->online_count) { 515 cpumask_set_cpu(dev->cpu, &cpuidle_coupled_poked); 516 cpuidle_coupled_poke_others(dev->cpu, coupled); 517 } 518 519retry: 520 /* 521 * Wait for all coupled cpus to be idle, using the deepest state 522 * allowed for a single cpu. If this was not the poking cpu, wait 523 * for at least one poke before leaving to avoid a race where 524 * two cpus could arrive at the waiting loop at the same time, 525 * but the first of the two to arrive could skip the loop without 526 * processing the pokes from the last to arrive. 527 */ 528 while (!cpuidle_coupled_cpus_waiting(coupled) || 529 !cpumask_test_cpu(dev->cpu, &cpuidle_coupled_poked)) { 530 if (cpuidle_coupled_clear_pokes(dev->cpu)) 531 continue; 532 533 if (need_resched()) { 534 cpuidle_coupled_set_not_waiting(dev->cpu, coupled); 535 goto out; 536 } 537 538 if (coupled->prevent) { 539 cpuidle_coupled_set_not_waiting(dev->cpu, coupled); 540 goto out; 541 } 542 543 entered_state = cpuidle_enter_state(dev, drv, 544 drv->safe_state_index); 545 local_irq_disable(); 546 } 547 548 cpuidle_coupled_clear_pokes(dev->cpu); 549 if (need_resched()) { 550 cpuidle_coupled_set_not_waiting(dev->cpu, coupled); 551 goto out; 552 } 553 554 /* 555 * Make sure final poke status for this cpu is visible before setting 556 * cpu as ready. 557 */ 558 smp_wmb(); 559 560 /* 561 * All coupled cpus are probably idle. There is a small chance that 562 * one of the other cpus just became active. Increment the ready count, 563 * and spin until all coupled cpus have incremented the counter. Once a 564 * cpu has incremented the ready counter, it cannot abort idle and must 565 * spin until either all cpus have incremented the ready counter, or 566 * another cpu leaves idle and decrements the waiting counter. 567 */ 568 569 cpuidle_coupled_set_ready(coupled); 570 while (!cpuidle_coupled_cpus_ready(coupled)) { 571 /* Check if any other cpus bailed out of idle. */ 572 if (!cpuidle_coupled_cpus_waiting(coupled)) 573 if (!cpuidle_coupled_set_not_ready(coupled)) 574 goto retry; 575 576 cpu_relax(); 577 } 578 579 /* 580 * Make sure read of all cpus ready is done before reading pending pokes 581 */ 582 smp_rmb(); 583 584 /* 585 * There is a small chance that a cpu left and reentered idle after this 586 * cpu saw that all cpus were waiting. The cpu that reentered idle will 587 * have sent this cpu a poke, which will still be pending after the 588 * ready loop. The pending interrupt may be lost by the interrupt 589 * controller when entering the deep idle state. It's not possible to 590 * clear a pending interrupt without turning interrupts on and handling 591 * it, and it's too late to turn on interrupts here, so reset the 592 * coupled idle state of all cpus and retry. 593 */ 594 if (cpuidle_coupled_any_pokes_pending(coupled)) { 595 cpuidle_coupled_set_done(dev->cpu, coupled); 596 /* Wait for all cpus to see the pending pokes */ 597 cpuidle_coupled_parallel_barrier(dev, &coupled->abort_barrier); 598 goto reset; 599 } 600 601 /* all cpus have acked the coupled state */ 602 next_state = cpuidle_coupled_get_state(dev, coupled); 603 604 entered_state = cpuidle_enter_state(dev, drv, next_state); 605 606 cpuidle_coupled_set_done(dev->cpu, coupled); 607 608out: 609 /* 610 * Normal cpuidle states are expected to return with irqs enabled. 611 * That leads to an inefficiency where a cpu receiving an interrupt 612 * that brings it out of idle will process that interrupt before 613 * exiting the idle enter function and decrementing ready_count. All 614 * other cpus will need to spin waiting for the cpu that is processing 615 * the interrupt. If the driver returns with interrupts disabled, 616 * all other cpus will loop back into the safe idle state instead of 617 * spinning, saving power. 618 * 619 * Calling local_irq_enable here allows coupled states to return with 620 * interrupts disabled, but won't cause problems for drivers that 621 * exit with interrupts enabled. 622 */ 623 local_irq_enable(); 624 625 /* 626 * Wait until all coupled cpus have exited idle. There is no risk that 627 * a cpu exits and re-enters the ready state because this cpu has 628 * already decremented its waiting_count. 629 */ 630 while (!cpuidle_coupled_no_cpus_ready(coupled)) 631 cpu_relax(); 632 633 return entered_state; 634} 635 636static void cpuidle_coupled_update_online_cpus(struct cpuidle_coupled *coupled) 637{ 638 cpumask_t cpus; 639 cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus); 640 coupled->online_count = cpumask_weight(&cpus); 641} 642 643/** 644 * cpuidle_coupled_register_device - register a coupled cpuidle device 645 * @dev: struct cpuidle_device for the current cpu 646 * 647 * Called from cpuidle_register_device to handle coupled idle init. Finds the 648 * cpuidle_coupled struct for this set of coupled cpus, or creates one if none 649 * exists yet. 650 */ 651int cpuidle_coupled_register_device(struct cpuidle_device *dev) 652{ 653 int cpu; 654 struct cpuidle_device *other_dev; 655 struct call_single_data *csd; 656 struct cpuidle_coupled *coupled; 657 658 if (cpumask_empty(&dev->coupled_cpus)) 659 return 0; 660 661 for_each_cpu(cpu, &dev->coupled_cpus) { 662 other_dev = per_cpu(cpuidle_devices, cpu); 663 if (other_dev && other_dev->coupled) { 664 coupled = other_dev->coupled; 665 goto have_coupled; 666 } 667 } 668 669 /* No existing coupled info found, create a new one */ 670 coupled = kzalloc(sizeof(struct cpuidle_coupled), GFP_KERNEL); 671 if (!coupled) 672 return -ENOMEM; 673 674 coupled->coupled_cpus = dev->coupled_cpus; 675 676have_coupled: 677 dev->coupled = coupled; 678 if (WARN_ON(!cpumask_equal(&dev->coupled_cpus, &coupled->coupled_cpus))) 679 coupled->prevent++; 680 681 cpuidle_coupled_update_online_cpus(coupled); 682 683 coupled->refcnt++; 684 685 csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu); 686 csd->func = cpuidle_coupled_handle_poke; 687 csd->info = (void *)(unsigned long)dev->cpu; 688 689 return 0; 690} 691 692/** 693 * cpuidle_coupled_unregister_device - unregister a coupled cpuidle device 694 * @dev: struct cpuidle_device for the current cpu 695 * 696 * Called from cpuidle_unregister_device to tear down coupled idle. Removes the 697 * cpu from the coupled idle set, and frees the cpuidle_coupled_info struct if 698 * this was the last cpu in the set. 699 */ 700void cpuidle_coupled_unregister_device(struct cpuidle_device *dev) 701{ 702 struct cpuidle_coupled *coupled = dev->coupled; 703 704 if (cpumask_empty(&dev->coupled_cpus)) 705 return; 706 707 if (--coupled->refcnt) 708 kfree(coupled); 709 dev->coupled = NULL; 710} 711 712/** 713 * cpuidle_coupled_prevent_idle - prevent cpus from entering a coupled state 714 * @coupled: the struct coupled that contains the cpu that is changing state 715 * 716 * Disables coupled cpuidle on a coupled set of cpus. Used to ensure that 717 * cpu_online_mask doesn't change while cpus are coordinating coupled idle. 718 */ 719static void cpuidle_coupled_prevent_idle(struct cpuidle_coupled *coupled) 720{ 721 int cpu = get_cpu(); 722 723 /* Force all cpus out of the waiting loop. */ 724 coupled->prevent++; 725 cpuidle_coupled_poke_others(cpu, coupled); 726 put_cpu(); 727 while (!cpuidle_coupled_no_cpus_waiting(coupled)) 728 cpu_relax(); 729} 730 731/** 732 * cpuidle_coupled_allow_idle - allows cpus to enter a coupled state 733 * @coupled: the struct coupled that contains the cpu that is changing state 734 * 735 * Enables coupled cpuidle on a coupled set of cpus. Used to ensure that 736 * cpu_online_mask doesn't change while cpus are coordinating coupled idle. 737 */ 738static void cpuidle_coupled_allow_idle(struct cpuidle_coupled *coupled) 739{ 740 int cpu = get_cpu(); 741 742 /* 743 * Write barrier ensures readers see the new online_count when they 744 * see prevent == 0. 745 */ 746 smp_wmb(); 747 coupled->prevent--; 748 /* Force cpus out of the prevent loop. */ 749 cpuidle_coupled_poke_others(cpu, coupled); 750 put_cpu(); 751} 752 753/** 754 * cpuidle_coupled_cpu_notify - notifier called during hotplug transitions 755 * @nb: notifier block 756 * @action: hotplug transition 757 * @hcpu: target cpu number 758 * 759 * Called when a cpu is brought on or offline using hotplug. Updates the 760 * coupled cpu set appropriately 761 */ 762static int cpuidle_coupled_cpu_notify(struct notifier_block *nb, 763 unsigned long action, void *hcpu) 764{ 765 int cpu = (unsigned long)hcpu; 766 struct cpuidle_device *dev; 767 768 switch (action & ~CPU_TASKS_FROZEN) { 769 case CPU_UP_PREPARE: 770 case CPU_DOWN_PREPARE: 771 case CPU_ONLINE: 772 case CPU_DEAD: 773 case CPU_UP_CANCELED: 774 case CPU_DOWN_FAILED: 775 break; 776 default: 777 return NOTIFY_OK; 778 } 779 780 mutex_lock(&cpuidle_lock); 781 782 dev = per_cpu(cpuidle_devices, cpu); 783 if (!dev || !dev->coupled) 784 goto out; 785 786 switch (action & ~CPU_TASKS_FROZEN) { 787 case CPU_UP_PREPARE: 788 case CPU_DOWN_PREPARE: 789 cpuidle_coupled_prevent_idle(dev->coupled); 790 break; 791 case CPU_ONLINE: 792 case CPU_DEAD: 793 cpuidle_coupled_update_online_cpus(dev->coupled); 794 /* Fall through */ 795 case CPU_UP_CANCELED: 796 case CPU_DOWN_FAILED: 797 cpuidle_coupled_allow_idle(dev->coupled); 798 break; 799 } 800 801out: 802 mutex_unlock(&cpuidle_lock); 803 return NOTIFY_OK; 804} 805 806static struct notifier_block cpuidle_coupled_cpu_notifier = { 807 .notifier_call = cpuidle_coupled_cpu_notify, 808}; 809 810static int __init cpuidle_coupled_init(void) 811{ 812 return register_cpu_notifier(&cpuidle_coupled_cpu_notifier); 813} 814core_initcall(cpuidle_coupled_init); 815