root/arch/arm/common/bL_switcher.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. read_mpidr
  2. bL_do_switch
  3. bL_switchpoint
  4. bL_switch_to
  5. bL_switcher_thread
  6. bL_switcher_thread_create
  7. bL_switch_request_cb
  8. bL_switcher_register_notifier
  9. bL_switcher_unregister_notifier
  10. bL_activation_notify
  11. bL_switcher_restore_cpus
  12. bL_switcher_halve_cpus
  13. bL_switcher_get_logical_index
  14. bL_switcher_trace_trigger_cpu
  15. bL_switcher_trace_trigger
  16. bL_switcher_enable
  17. bL_switcher_disable
  18. bL_switcher_active_show
  19. bL_switcher_active_store
  20. bL_switcher_trace_trigger_store
  21. bL_switcher_sysfs_init
  22. bL_switcher_get_enabled
  23. bL_switcher_put_enabled
  24. bL_switcher_cpu_pre
  25. bL_switcher_init

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * arch/arm/common/bL_switcher.c -- big.LITTLE cluster switcher core driver
   4  *
   5  * Created by:  Nicolas Pitre, March 2012
   6  * Copyright:   (C) 2012-2013  Linaro Limited
   7  */
   8 
   9 #include <linux/atomic.h>
  10 #include <linux/init.h>
  11 #include <linux/kernel.h>
  12 #include <linux/module.h>
  13 #include <linux/sched/signal.h>
  14 #include <uapi/linux/sched/types.h>
  15 #include <linux/interrupt.h>
  16 #include <linux/cpu_pm.h>
  17 #include <linux/cpu.h>
  18 #include <linux/cpumask.h>
  19 #include <linux/kthread.h>
  20 #include <linux/wait.h>
  21 #include <linux/time.h>
  22 #include <linux/clockchips.h>
  23 #include <linux/hrtimer.h>
  24 #include <linux/tick.h>
  25 #include <linux/notifier.h>
  26 #include <linux/mm.h>
  27 #include <linux/mutex.h>
  28 #include <linux/smp.h>
  29 #include <linux/spinlock.h>
  30 #include <linux/string.h>
  31 #include <linux/sysfs.h>
  32 #include <linux/irqchip/arm-gic.h>
  33 #include <linux/moduleparam.h>
  34 
  35 #include <asm/smp_plat.h>
  36 #include <asm/cputype.h>
  37 #include <asm/suspend.h>
  38 #include <asm/mcpm.h>
  39 #include <asm/bL_switcher.h>
  40 
  41 #define CREATE_TRACE_POINTS
  42 #include <trace/events/power_cpu_migrate.h>
  43 
  44 
  45 /*
  46  * Use our own MPIDR accessors as the generic ones in asm/cputype.h have
  47  * __attribute_const__ and we don't want the compiler to assume any
  48  * constness here as the value _does_ change along some code paths.
  49  */
  50 
  51 static int read_mpidr(void)
  52 {
  53         unsigned int id;
  54         asm volatile ("mrc p15, 0, %0, c0, c0, 5" : "=r" (id));
  55         return id & MPIDR_HWID_BITMASK;
  56 }
  57 
  58 /*
  59  * bL switcher core code.
  60  */
  61 
  62 static void bL_do_switch(void *_arg)
  63 {
  64         unsigned ib_mpidr, ib_cpu, ib_cluster;
  65         long volatile handshake, **handshake_ptr = _arg;
  66 
  67         pr_debug("%s\n", __func__);
  68 
  69         ib_mpidr = cpu_logical_map(smp_processor_id());
  70         ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0);
  71         ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1);
  72 
  73         /* Advertise our handshake location */
  74         if (handshake_ptr) {
  75                 handshake = 0;
  76                 *handshake_ptr = &handshake;
  77         } else
  78                 handshake = -1;
  79 
  80         /*
  81          * Our state has been saved at this point.  Let's release our
  82          * inbound CPU.
  83          */
  84         mcpm_set_entry_vector(ib_cpu, ib_cluster, cpu_resume);
  85         sev();
  86 
  87         /*
  88          * From this point, we must assume that our counterpart CPU might
  89          * have taken over in its parallel world already, as if execution
  90          * just returned from cpu_suspend().  It is therefore important to
  91          * be very careful not to make any change the other guy is not
  92          * expecting.  This is why we need stack isolation.
  93          *
  94          * Fancy under cover tasks could be performed here.  For now
  95          * we have none.
  96          */
  97 
  98         /*
  99          * Let's wait until our inbound is alive.
 100          */
 101         while (!handshake) {
 102                 wfe();
 103                 smp_mb();
 104         }
 105 
 106         /* Let's put ourself down. */
 107         mcpm_cpu_power_down();
 108 
 109         /* should never get here */
 110         BUG();
 111 }
 112 
 113 /*
 114  * Stack isolation.  To ensure 'current' remains valid, we just use another
 115  * piece of our thread's stack space which should be fairly lightly used.
 116  * The selected area starts just above the thread_info structure located
 117  * at the very bottom of the stack, aligned to a cache line, and indexed
 118  * with the cluster number.
 119  */
 120 #define STACK_SIZE 512
 121 extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
 122 static int bL_switchpoint(unsigned long _arg)
 123 {
 124         unsigned int mpidr = read_mpidr();
 125         unsigned int clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
 126         void *stack = current_thread_info() + 1;
 127         stack = PTR_ALIGN(stack, L1_CACHE_BYTES);
 128         stack += clusterid * STACK_SIZE + STACK_SIZE;
 129         call_with_stack(bL_do_switch, (void *)_arg, stack);
 130         BUG();
 131 }
 132 
 133 /*
 134  * Generic switcher interface
 135  */
 136 
 137 static unsigned int bL_gic_id[MAX_CPUS_PER_CLUSTER][MAX_NR_CLUSTERS];
 138 static int bL_switcher_cpu_pairing[NR_CPUS];
 139 
 140 /*
 141  * bL_switch_to - Switch to a specific cluster for the current CPU
 142  * @new_cluster_id: the ID of the cluster to switch to.
 143  *
 144  * This function must be called on the CPU to be switched.
 145  * Returns 0 on success, else a negative status code.
 146  */
 147 static int bL_switch_to(unsigned int new_cluster_id)
 148 {
 149         unsigned int mpidr, this_cpu, that_cpu;
 150         unsigned int ob_mpidr, ob_cpu, ob_cluster, ib_mpidr, ib_cpu, ib_cluster;
 151         struct completion inbound_alive;
 152         long volatile *handshake_ptr;
 153         int ipi_nr, ret;
 154 
 155         this_cpu = smp_processor_id();
 156         ob_mpidr = read_mpidr();
 157         ob_cpu = MPIDR_AFFINITY_LEVEL(ob_mpidr, 0);
 158         ob_cluster = MPIDR_AFFINITY_LEVEL(ob_mpidr, 1);
 159         BUG_ON(cpu_logical_map(this_cpu) != ob_mpidr);
 160 
 161         if (new_cluster_id == ob_cluster)
 162                 return 0;
 163 
 164         that_cpu = bL_switcher_cpu_pairing[this_cpu];
 165         ib_mpidr = cpu_logical_map(that_cpu);
 166         ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0);
 167         ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1);
 168 
 169         pr_debug("before switch: CPU %d MPIDR %#x -> %#x\n",
 170                  this_cpu, ob_mpidr, ib_mpidr);
 171 
 172         this_cpu = smp_processor_id();
 173 
 174         /* Close the gate for our entry vectors */
 175         mcpm_set_entry_vector(ob_cpu, ob_cluster, NULL);
 176         mcpm_set_entry_vector(ib_cpu, ib_cluster, NULL);
 177 
 178         /* Install our "inbound alive" notifier. */
 179         init_completion(&inbound_alive);
 180         ipi_nr = register_ipi_completion(&inbound_alive, this_cpu);
 181         ipi_nr |= ((1 << 16) << bL_gic_id[ob_cpu][ob_cluster]);
 182         mcpm_set_early_poke(ib_cpu, ib_cluster, gic_get_sgir_physaddr(), ipi_nr);
 183 
 184         /*
 185          * Let's wake up the inbound CPU now in case it requires some delay
 186          * to come online, but leave it gated in our entry vector code.
 187          */
 188         ret = mcpm_cpu_power_up(ib_cpu, ib_cluster);
 189         if (ret) {
 190                 pr_err("%s: mcpm_cpu_power_up() returned %d\n", __func__, ret);
 191                 return ret;
 192         }
 193 
 194         /*
 195          * Raise a SGI on the inbound CPU to make sure it doesn't stall
 196          * in a possible WFI, such as in bL_power_down().
 197          */
 198         gic_send_sgi(bL_gic_id[ib_cpu][ib_cluster], 0);
 199 
 200         /*
 201          * Wait for the inbound to come up.  This allows for other
 202          * tasks to be scheduled in the mean time.
 203          */
 204         wait_for_completion(&inbound_alive);
 205         mcpm_set_early_poke(ib_cpu, ib_cluster, 0, 0);
 206 
 207         /*
 208          * From this point we are entering the switch critical zone
 209          * and can't take any interrupts anymore.
 210          */
 211         local_irq_disable();
 212         local_fiq_disable();
 213         trace_cpu_migrate_begin(ktime_get_real_ns(), ob_mpidr);
 214 
 215         /* redirect GIC's SGIs to our counterpart */
 216         gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]);
 217 
 218         tick_suspend_local();
 219 
 220         ret = cpu_pm_enter();
 221 
 222         /* we can not tolerate errors at this point */
 223         if (ret)
 224                 panic("%s: cpu_pm_enter() returned %d\n", __func__, ret);
 225 
 226         /* Swap the physical CPUs in the logical map for this logical CPU. */
 227         cpu_logical_map(this_cpu) = ib_mpidr;
 228         cpu_logical_map(that_cpu) = ob_mpidr;
 229 
 230         /* Let's do the actual CPU switch. */
 231         ret = cpu_suspend((unsigned long)&handshake_ptr, bL_switchpoint);
 232         if (ret > 0)
 233                 panic("%s: cpu_suspend() returned %d\n", __func__, ret);
 234 
 235         /* We are executing on the inbound CPU at this point */
 236         mpidr = read_mpidr();
 237         pr_debug("after switch: CPU %d MPIDR %#x\n", this_cpu, mpidr);
 238         BUG_ON(mpidr != ib_mpidr);
 239 
 240         mcpm_cpu_powered_up();
 241 
 242         ret = cpu_pm_exit();
 243 
 244         tick_resume_local();
 245 
 246         trace_cpu_migrate_finish(ktime_get_real_ns(), ib_mpidr);
 247         local_fiq_enable();
 248         local_irq_enable();
 249 
 250         *handshake_ptr = 1;
 251         dsb_sev();
 252 
 253         if (ret)
 254                 pr_err("%s exiting with error %d\n", __func__, ret);
 255         return ret;
 256 }
 257 
 258 struct bL_thread {
 259         spinlock_t lock;
 260         struct task_struct *task;
 261         wait_queue_head_t wq;
 262         int wanted_cluster;
 263         struct completion started;
 264         bL_switch_completion_handler completer;
 265         void *completer_cookie;
 266 };
 267 
 268 static struct bL_thread bL_threads[NR_CPUS];
 269 
 270 static int bL_switcher_thread(void *arg)
 271 {
 272         struct bL_thread *t = arg;
 273         struct sched_param param = { .sched_priority = 1 };
 274         int cluster;
 275         bL_switch_completion_handler completer;
 276         void *completer_cookie;
 277 
 278         sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
 279         complete(&t->started);
 280 
 281         do {
 282                 if (signal_pending(current))
 283                         flush_signals(current);
 284                 wait_event_interruptible(t->wq,
 285                                 t->wanted_cluster != -1 ||
 286                                 kthread_should_stop());
 287 
 288                 spin_lock(&t->lock);
 289                 cluster = t->wanted_cluster;
 290                 completer = t->completer;
 291                 completer_cookie = t->completer_cookie;
 292                 t->wanted_cluster = -1;
 293                 t->completer = NULL;
 294                 spin_unlock(&t->lock);
 295 
 296                 if (cluster != -1) {
 297                         bL_switch_to(cluster);
 298 
 299                         if (completer)
 300                                 completer(completer_cookie);
 301                 }
 302         } while (!kthread_should_stop());
 303 
 304         return 0;
 305 }
 306 
 307 static struct task_struct *bL_switcher_thread_create(int cpu, void *arg)
 308 {
 309         struct task_struct *task;
 310 
 311         task = kthread_create_on_node(bL_switcher_thread, arg,
 312                                       cpu_to_node(cpu), "kswitcher_%d", cpu);
 313         if (!IS_ERR(task)) {
 314                 kthread_bind(task, cpu);
 315                 wake_up_process(task);
 316         } else
 317                 pr_err("%s failed for CPU %d\n", __func__, cpu);
 318         return task;
 319 }
 320 
 321 /*
 322  * bL_switch_request_cb - Switch to a specific cluster for the given CPU,
 323  *      with completion notification via a callback
 324  *
 325  * @cpu: the CPU to switch
 326  * @new_cluster_id: the ID of the cluster to switch to.
 327  * @completer: switch completion callback.  if non-NULL,
 328  *      @completer(@completer_cookie) will be called on completion of
 329  *      the switch, in non-atomic context.
 330  * @completer_cookie: opaque context argument for @completer.
 331  *
 332  * This function causes a cluster switch on the given CPU by waking up
 333  * the appropriate switcher thread.  This function may or may not return
 334  * before the switch has occurred.
 335  *
 336  * If a @completer callback function is supplied, it will be called when
 337  * the switch is complete.  This can be used to determine asynchronously
 338  * when the switch is complete, regardless of when bL_switch_request()
 339  * returns.  When @completer is supplied, no new switch request is permitted
 340  * for the affected CPU until after the switch is complete, and @completer
 341  * has returned.
 342  */
 343 int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id,
 344                          bL_switch_completion_handler completer,
 345                          void *completer_cookie)
 346 {
 347         struct bL_thread *t;
 348 
 349         if (cpu >= ARRAY_SIZE(bL_threads)) {
 350                 pr_err("%s: cpu %d out of bounds\n", __func__, cpu);
 351                 return -EINVAL;
 352         }
 353 
 354         t = &bL_threads[cpu];
 355 
 356         if (IS_ERR(t->task))
 357                 return PTR_ERR(t->task);
 358         if (!t->task)
 359                 return -ESRCH;
 360 
 361         spin_lock(&t->lock);
 362         if (t->completer) {
 363                 spin_unlock(&t->lock);
 364                 return -EBUSY;
 365         }
 366         t->completer = completer;
 367         t->completer_cookie = completer_cookie;
 368         t->wanted_cluster = new_cluster_id;
 369         spin_unlock(&t->lock);
 370         wake_up(&t->wq);
 371         return 0;
 372 }
 373 EXPORT_SYMBOL_GPL(bL_switch_request_cb);
 374 
 375 /*
 376  * Activation and configuration code.
 377  */
 378 
 379 static DEFINE_MUTEX(bL_switcher_activation_lock);
 380 static BLOCKING_NOTIFIER_HEAD(bL_activation_notifier);
 381 static unsigned int bL_switcher_active;
 382 static unsigned int bL_switcher_cpu_original_cluster[NR_CPUS];
 383 static cpumask_t bL_switcher_removed_logical_cpus;
 384 
 385 int bL_switcher_register_notifier(struct notifier_block *nb)
 386 {
 387         return blocking_notifier_chain_register(&bL_activation_notifier, nb);
 388 }
 389 EXPORT_SYMBOL_GPL(bL_switcher_register_notifier);
 390 
 391 int bL_switcher_unregister_notifier(struct notifier_block *nb)
 392 {
 393         return blocking_notifier_chain_unregister(&bL_activation_notifier, nb);
 394 }
 395 EXPORT_SYMBOL_GPL(bL_switcher_unregister_notifier);
 396 
 397 static int bL_activation_notify(unsigned long val)
 398 {
 399         int ret;
 400 
 401         ret = blocking_notifier_call_chain(&bL_activation_notifier, val, NULL);
 402         if (ret & NOTIFY_STOP_MASK)
 403                 pr_err("%s: notifier chain failed with status 0x%x\n",
 404                         __func__, ret);
 405         return notifier_to_errno(ret);
 406 }
 407 
 408 static void bL_switcher_restore_cpus(void)
 409 {
 410         int i;
 411 
 412         for_each_cpu(i, &bL_switcher_removed_logical_cpus) {
 413                 struct device *cpu_dev = get_cpu_device(i);
 414                 int ret = device_online(cpu_dev);
 415                 if (ret)
 416                         dev_err(cpu_dev, "switcher: unable to restore CPU\n");
 417         }
 418 }
 419 
 420 static int bL_switcher_halve_cpus(void)
 421 {
 422         int i, j, cluster_0, gic_id, ret;
 423         unsigned int cpu, cluster, mask;
 424         cpumask_t available_cpus;
 425 
 426         /* First pass to validate what we have */
 427         mask = 0;
 428         for_each_online_cpu(i) {
 429                 cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0);
 430                 cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
 431                 if (cluster >= 2) {
 432                         pr_err("%s: only dual cluster systems are supported\n", __func__);
 433                         return -EINVAL;
 434                 }
 435                 if (WARN_ON(cpu >= MAX_CPUS_PER_CLUSTER))
 436                         return -EINVAL;
 437                 mask |= (1 << cluster);
 438         }
 439         if (mask != 3) {
 440                 pr_err("%s: no CPU pairing possible\n", __func__);
 441                 return -EINVAL;
 442         }
 443 
 444         /*
 445          * Now let's do the pairing.  We match each CPU with another CPU
 446          * from a different cluster.  To get a uniform scheduling behavior
 447          * without fiddling with CPU topology and compute capacity data,
 448          * we'll use logical CPUs initially belonging to the same cluster.
 449          */
 450         memset(bL_switcher_cpu_pairing, -1, sizeof(bL_switcher_cpu_pairing));
 451         cpumask_copy(&available_cpus, cpu_online_mask);
 452         cluster_0 = -1;
 453         for_each_cpu(i, &available_cpus) {
 454                 int match = -1;
 455                 cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
 456                 if (cluster_0 == -1)
 457                         cluster_0 = cluster;
 458                 if (cluster != cluster_0)
 459                         continue;
 460                 cpumask_clear_cpu(i, &available_cpus);
 461                 for_each_cpu(j, &available_cpus) {
 462                         cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(j), 1);
 463                         /*
 464                          * Let's remember the last match to create "odd"
 465                          * pairings on purpose in order for other code not
 466                          * to assume any relation between physical and
 467                          * logical CPU numbers.
 468                          */
 469                         if (cluster != cluster_0)
 470                                 match = j;
 471                 }
 472                 if (match != -1) {
 473                         bL_switcher_cpu_pairing[i] = match;
 474                         cpumask_clear_cpu(match, &available_cpus);
 475                         pr_info("CPU%d paired with CPU%d\n", i, match);
 476                 }
 477         }
 478 
 479         /*
 480          * Now we disable the unwanted CPUs i.e. everything that has no
 481          * pairing information (that includes the pairing counterparts).
 482          */
 483         cpumask_clear(&bL_switcher_removed_logical_cpus);
 484         for_each_online_cpu(i) {
 485                 cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0);
 486                 cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
 487 
 488                 /* Let's take note of the GIC ID for this CPU */
 489                 gic_id = gic_get_cpu_id(i);
 490                 if (gic_id < 0) {
 491                         pr_err("%s: bad GIC ID for CPU %d\n", __func__, i);
 492                         bL_switcher_restore_cpus();
 493                         return -EINVAL;
 494                 }
 495                 bL_gic_id[cpu][cluster] = gic_id;
 496                 pr_info("GIC ID for CPU %u cluster %u is %u\n",
 497                         cpu, cluster, gic_id);
 498 
 499                 if (bL_switcher_cpu_pairing[i] != -1) {
 500                         bL_switcher_cpu_original_cluster[i] = cluster;
 501                         continue;
 502                 }
 503 
 504                 ret = device_offline(get_cpu_device(i));
 505                 if (ret) {
 506                         bL_switcher_restore_cpus();
 507                         return ret;
 508                 }
 509                 cpumask_set_cpu(i, &bL_switcher_removed_logical_cpus);
 510         }
 511 
 512         return 0;
 513 }
 514 
 515 /* Determine the logical CPU a given physical CPU is grouped on. */
 516 int bL_switcher_get_logical_index(u32 mpidr)
 517 {
 518         int cpu;
 519 
 520         if (!bL_switcher_active)
 521                 return -EUNATCH;
 522 
 523         mpidr &= MPIDR_HWID_BITMASK;
 524         for_each_online_cpu(cpu) {
 525                 int pairing = bL_switcher_cpu_pairing[cpu];
 526                 if (pairing == -1)
 527                         continue;
 528                 if ((mpidr == cpu_logical_map(cpu)) ||
 529                     (mpidr == cpu_logical_map(pairing)))
 530                         return cpu;
 531         }
 532         return -EINVAL;
 533 }
 534 
 535 static void bL_switcher_trace_trigger_cpu(void *__always_unused info)
 536 {
 537         trace_cpu_migrate_current(ktime_get_real_ns(), read_mpidr());
 538 }
 539 
 540 int bL_switcher_trace_trigger(void)
 541 {
 542         preempt_disable();
 543 
 544         bL_switcher_trace_trigger_cpu(NULL);
 545         smp_call_function(bL_switcher_trace_trigger_cpu, NULL, true);
 546 
 547         preempt_enable();
 548 
 549         return 0;
 550 }
 551 EXPORT_SYMBOL_GPL(bL_switcher_trace_trigger);
 552 
 553 static int bL_switcher_enable(void)
 554 {
 555         int cpu, ret;
 556 
 557         mutex_lock(&bL_switcher_activation_lock);
 558         lock_device_hotplug();
 559         if (bL_switcher_active) {
 560                 unlock_device_hotplug();
 561                 mutex_unlock(&bL_switcher_activation_lock);
 562                 return 0;
 563         }
 564 
 565         pr_info("big.LITTLE switcher initializing\n");
 566 
 567         ret = bL_activation_notify(BL_NOTIFY_PRE_ENABLE);
 568         if (ret)
 569                 goto error;
 570 
 571         ret = bL_switcher_halve_cpus();
 572         if (ret)
 573                 goto error;
 574 
 575         bL_switcher_trace_trigger();
 576 
 577         for_each_online_cpu(cpu) {
 578                 struct bL_thread *t = &bL_threads[cpu];
 579                 spin_lock_init(&t->lock);
 580                 init_waitqueue_head(&t->wq);
 581                 init_completion(&t->started);
 582                 t->wanted_cluster = -1;
 583                 t->task = bL_switcher_thread_create(cpu, t);
 584         }
 585 
 586         bL_switcher_active = 1;
 587         bL_activation_notify(BL_NOTIFY_POST_ENABLE);
 588         pr_info("big.LITTLE switcher initialized\n");
 589         goto out;
 590 
 591 error:
 592         pr_warn("big.LITTLE switcher initialization failed\n");
 593         bL_activation_notify(BL_NOTIFY_POST_DISABLE);
 594 
 595 out:
 596         unlock_device_hotplug();
 597         mutex_unlock(&bL_switcher_activation_lock);
 598         return ret;
 599 }
 600 
 601 #ifdef CONFIG_SYSFS
 602 
 603 static void bL_switcher_disable(void)
 604 {
 605         unsigned int cpu, cluster;
 606         struct bL_thread *t;
 607         struct task_struct *task;
 608 
 609         mutex_lock(&bL_switcher_activation_lock);
 610         lock_device_hotplug();
 611 
 612         if (!bL_switcher_active)
 613                 goto out;
 614 
 615         if (bL_activation_notify(BL_NOTIFY_PRE_DISABLE) != 0) {
 616                 bL_activation_notify(BL_NOTIFY_POST_ENABLE);
 617                 goto out;
 618         }
 619 
 620         bL_switcher_active = 0;
 621 
 622         /*
 623          * To deactivate the switcher, we must shut down the switcher
 624          * threads to prevent any other requests from being accepted.
 625          * Then, if the final cluster for given logical CPU is not the
 626          * same as the original one, we'll recreate a switcher thread
 627          * just for the purpose of switching the CPU back without any
 628          * possibility for interference from external requests.
 629          */
 630         for_each_online_cpu(cpu) {
 631                 t = &bL_threads[cpu];
 632                 task = t->task;
 633                 t->task = NULL;
 634                 if (!task || IS_ERR(task))
 635                         continue;
 636                 kthread_stop(task);
 637                 /* no more switch may happen on this CPU at this point */
 638                 cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
 639                 if (cluster == bL_switcher_cpu_original_cluster[cpu])
 640                         continue;
 641                 init_completion(&t->started);
 642                 t->wanted_cluster = bL_switcher_cpu_original_cluster[cpu];
 643                 task = bL_switcher_thread_create(cpu, t);
 644                 if (!IS_ERR(task)) {
 645                         wait_for_completion(&t->started);
 646                         kthread_stop(task);
 647                         cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
 648                         if (cluster == bL_switcher_cpu_original_cluster[cpu])
 649                                 continue;
 650                 }
 651                 /* If execution gets here, we're in trouble. */
 652                 pr_crit("%s: unable to restore original cluster for CPU %d\n",
 653                         __func__, cpu);
 654                 pr_crit("%s: CPU %d can't be restored\n",
 655                         __func__, bL_switcher_cpu_pairing[cpu]);
 656                 cpumask_clear_cpu(bL_switcher_cpu_pairing[cpu],
 657                                   &bL_switcher_removed_logical_cpus);
 658         }
 659 
 660         bL_switcher_restore_cpus();
 661         bL_switcher_trace_trigger();
 662 
 663         bL_activation_notify(BL_NOTIFY_POST_DISABLE);
 664 
 665 out:
 666         unlock_device_hotplug();
 667         mutex_unlock(&bL_switcher_activation_lock);
 668 }
 669 
 670 static ssize_t bL_switcher_active_show(struct kobject *kobj,
 671                 struct kobj_attribute *attr, char *buf)
 672 {
 673         return sprintf(buf, "%u\n", bL_switcher_active);
 674 }
 675 
 676 static ssize_t bL_switcher_active_store(struct kobject *kobj,
 677                 struct kobj_attribute *attr, const char *buf, size_t count)
 678 {
 679         int ret;
 680 
 681         switch (buf[0]) {
 682         case '0':
 683                 bL_switcher_disable();
 684                 ret = 0;
 685                 break;
 686         case '1':
 687                 ret = bL_switcher_enable();
 688                 break;
 689         default:
 690                 ret = -EINVAL;
 691         }
 692 
 693         return (ret >= 0) ? count : ret;
 694 }
 695 
 696 static ssize_t bL_switcher_trace_trigger_store(struct kobject *kobj,
 697                 struct kobj_attribute *attr, const char *buf, size_t count)
 698 {
 699         int ret = bL_switcher_trace_trigger();
 700 
 701         return ret ? ret : count;
 702 }
 703 
 704 static struct kobj_attribute bL_switcher_active_attr =
 705         __ATTR(active, 0644, bL_switcher_active_show, bL_switcher_active_store);
 706 
 707 static struct kobj_attribute bL_switcher_trace_trigger_attr =
 708         __ATTR(trace_trigger, 0200, NULL, bL_switcher_trace_trigger_store);
 709 
 710 static struct attribute *bL_switcher_attrs[] = {
 711         &bL_switcher_active_attr.attr,
 712         &bL_switcher_trace_trigger_attr.attr,
 713         NULL,
 714 };
 715 
 716 static struct attribute_group bL_switcher_attr_group = {
 717         .attrs = bL_switcher_attrs,
 718 };
 719 
 720 static struct kobject *bL_switcher_kobj;
 721 
 722 static int __init bL_switcher_sysfs_init(void)
 723 {
 724         int ret;
 725 
 726         bL_switcher_kobj = kobject_create_and_add("bL_switcher", kernel_kobj);
 727         if (!bL_switcher_kobj)
 728                 return -ENOMEM;
 729         ret = sysfs_create_group(bL_switcher_kobj, &bL_switcher_attr_group);
 730         if (ret)
 731                 kobject_put(bL_switcher_kobj);
 732         return ret;
 733 }
 734 
 735 #endif  /* CONFIG_SYSFS */
 736 
 737 bool bL_switcher_get_enabled(void)
 738 {
 739         mutex_lock(&bL_switcher_activation_lock);
 740 
 741         return bL_switcher_active;
 742 }
 743 EXPORT_SYMBOL_GPL(bL_switcher_get_enabled);
 744 
 745 void bL_switcher_put_enabled(void)
 746 {
 747         mutex_unlock(&bL_switcher_activation_lock);
 748 }
 749 EXPORT_SYMBOL_GPL(bL_switcher_put_enabled);
 750 
 751 /*
 752  * Veto any CPU hotplug operation on those CPUs we've removed
 753  * while the switcher is active.
 754  * We're just not ready to deal with that given the trickery involved.
 755  */
 756 static int bL_switcher_cpu_pre(unsigned int cpu)
 757 {
 758         int pairing;
 759 
 760         if (!bL_switcher_active)
 761                 return 0;
 762 
 763         pairing = bL_switcher_cpu_pairing[cpu];
 764 
 765         if (pairing == -1)
 766                 return -EINVAL;
 767         return 0;
 768 }
 769 
 770 static bool no_bL_switcher;
 771 core_param(no_bL_switcher, no_bL_switcher, bool, 0644);
 772 
 773 static int __init bL_switcher_init(void)
 774 {
 775         int ret;
 776 
 777         if (!mcpm_is_available())
 778                 return -ENODEV;
 779 
 780         cpuhp_setup_state_nocalls(CPUHP_ARM_BL_PREPARE, "arm/bl:prepare",
 781                                   bL_switcher_cpu_pre, NULL);
 782         ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "arm/bl:predown",
 783                                         NULL, bL_switcher_cpu_pre);
 784         if (ret < 0) {
 785                 cpuhp_remove_state_nocalls(CPUHP_ARM_BL_PREPARE);
 786                 pr_err("bL_switcher: Failed to allocate a hotplug state\n");
 787                 return ret;
 788         }
 789         if (!no_bL_switcher) {
 790                 ret = bL_switcher_enable();
 791                 if (ret)
 792                         return ret;
 793         }
 794 
 795 #ifdef CONFIG_SYSFS
 796         ret = bL_switcher_sysfs_init();
 797         if (ret)
 798                 pr_err("%s: unable to create sysfs entry\n", __func__);
 799 #endif
 800 
 801         return 0;
 802 }
 803 
 804 late_initcall(bL_switcher_init);

/* [<][>][^][v][top][bottom][index][help] */