root/arch/arm/mach-vexpress/tc2_pm.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. tc2_pm_cpu_powerup
  2. tc2_pm_cluster_powerup
  3. tc2_pm_cpu_powerdown_prepare
  4. tc2_pm_cluster_powerdown_prepare
  5. tc2_pm_cpu_cache_disable
  6. tc2_pm_cluster_cache_disable
  7. tc2_core_in_reset
  8. tc2_pm_wait_for_powerdown
  9. tc2_pm_cpu_suspend_prepare
  10. tc2_pm_cpu_is_up
  11. tc2_pm_cluster_is_up
  12. tc2_pm_power_up_setup
  13. tc2_pm_init

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * arch/arm/mach-vexpress/tc2_pm.c - TC2 power management support
   4  *
   5  * Created by:  Nicolas Pitre, October 2012
   6  * Copyright:   (C) 2012-2013  Linaro Limited
   7  *
   8  * Some portions of this file were originally written by Achin Gupta
   9  * Copyright:   (C) 2012  ARM Limited
  10  */
  11 
  12 #include <linux/delay.h>
  13 #include <linux/init.h>
  14 #include <linux/io.h>
  15 #include <linux/kernel.h>
  16 #include <linux/of_address.h>
  17 #include <linux/of_irq.h>
  18 #include <linux/errno.h>
  19 #include <linux/irqchip/arm-gic.h>
  20 
  21 #include <asm/mcpm.h>
  22 #include <asm/proc-fns.h>
  23 #include <asm/cacheflush.h>
  24 #include <asm/cputype.h>
  25 #include <asm/cp15.h>
  26 
  27 #include <linux/arm-cci.h>
  28 
  29 #include "spc.h"
  30 
  31 /* SCC conf registers */
  32 #define RESET_CTRL              0x018
  33 #define RESET_A15_NCORERESET(cpu)       (1 << (2 + (cpu)))
  34 #define RESET_A7_NCORERESET(cpu)        (1 << (16 + (cpu)))
  35 
  36 #define A15_CONF                0x400
  37 #define A7_CONF                 0x500
  38 #define SYS_INFO                0x700
  39 #define SPC_BASE                0xb00
  40 
  41 static void __iomem *scc;
  42 
  43 #define TC2_CLUSTERS                    2
  44 #define TC2_MAX_CPUS_PER_CLUSTER        3
  45 
  46 static unsigned int tc2_nr_cpus[TC2_CLUSTERS];
  47 
  48 static int tc2_pm_cpu_powerup(unsigned int cpu, unsigned int cluster)
  49 {
  50         pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
  51         if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster])
  52                 return -EINVAL;
  53         ve_spc_set_resume_addr(cluster, cpu,
  54                                __pa_symbol(mcpm_entry_point));
  55         ve_spc_cpu_wakeup_irq(cluster, cpu, true);
  56         return 0;
  57 }
  58 
  59 static int tc2_pm_cluster_powerup(unsigned int cluster)
  60 {
  61         pr_debug("%s: cluster %u\n", __func__, cluster);
  62         if (cluster >= TC2_CLUSTERS)
  63                 return -EINVAL;
  64         ve_spc_powerdown(cluster, false);
  65         return 0;
  66 }
  67 
  68 static void tc2_pm_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster)
  69 {
  70         pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
  71         BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
  72         ve_spc_cpu_wakeup_irq(cluster, cpu, true);
  73         /*
  74          * If the CPU is committed to power down, make sure
  75          * the power controller will be in charge of waking it
  76          * up upon IRQ, ie IRQ lines are cut from GIC CPU IF
  77          * to the CPU by disabling the GIC CPU IF to prevent wfi
  78          * from completing execution behind power controller back
  79          */
  80         gic_cpu_if_down(0);
  81 }
  82 
  83 static void tc2_pm_cluster_powerdown_prepare(unsigned int cluster)
  84 {
  85         pr_debug("%s: cluster %u\n", __func__, cluster);
  86         BUG_ON(cluster >= TC2_CLUSTERS);
  87         ve_spc_powerdown(cluster, true);
  88         ve_spc_global_wakeup_irq(true);
  89 }
  90 
  91 static void tc2_pm_cpu_cache_disable(void)
  92 {
  93         v7_exit_coherency_flush(louis);
  94 }
  95 
  96 static void tc2_pm_cluster_cache_disable(void)
  97 {
  98         if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
  99                 /*
 100                  * On the Cortex-A15 we need to disable
 101                  * L2 prefetching before flushing the cache.
 102                  */
 103                 asm volatile(
 104                 "mcr    p15, 1, %0, c15, c0, 3 \n\t"
 105                 "isb    \n\t"
 106                 "dsb    "
 107                 : : "r" (0x400) );
 108         }
 109 
 110         v7_exit_coherency_flush(all);
 111         cci_disable_port_by_cpu(read_cpuid_mpidr());
 112 }
 113 
 114 static int tc2_core_in_reset(unsigned int cpu, unsigned int cluster)
 115 {
 116         u32 mask = cluster ?
 117                   RESET_A7_NCORERESET(cpu)
 118                 : RESET_A15_NCORERESET(cpu);
 119 
 120         return !(readl_relaxed(scc + RESET_CTRL) & mask);
 121 }
 122 
 123 #define POLL_MSEC 10
 124 #define TIMEOUT_MSEC 1000
 125 
 126 static int tc2_pm_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
 127 {
 128         unsigned tries;
 129 
 130         pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
 131         BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
 132 
 133         for (tries = 0; tries < TIMEOUT_MSEC / POLL_MSEC; ++tries) {
 134                 pr_debug("%s(cpu=%u, cluster=%u): RESET_CTRL = 0x%08X\n",
 135                          __func__, cpu, cluster,
 136                          readl_relaxed(scc + RESET_CTRL));
 137 
 138                 /*
 139                  * We need the CPU to reach WFI, but the power
 140                  * controller may put the cluster in reset and
 141                  * power it off as soon as that happens, before
 142                  * we have a chance to see STANDBYWFI.
 143                  *
 144                  * So we need to check for both conditions:
 145                  */
 146                 if (tc2_core_in_reset(cpu, cluster) ||
 147                     ve_spc_cpu_in_wfi(cpu, cluster))
 148                         return 0; /* success: the CPU is halted */
 149 
 150                 /* Otherwise, wait and retry: */
 151                 msleep(POLL_MSEC);
 152         }
 153 
 154         return -ETIMEDOUT; /* timeout */
 155 }
 156 
 157 static void tc2_pm_cpu_suspend_prepare(unsigned int cpu, unsigned int cluster)
 158 {
 159         ve_spc_set_resume_addr(cluster, cpu, __pa_symbol(mcpm_entry_point));
 160 }
 161 
 162 static void tc2_pm_cpu_is_up(unsigned int cpu, unsigned int cluster)
 163 {
 164         pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
 165         BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
 166         ve_spc_cpu_wakeup_irq(cluster, cpu, false);
 167         ve_spc_set_resume_addr(cluster, cpu, 0);
 168 }
 169 
 170 static void tc2_pm_cluster_is_up(unsigned int cluster)
 171 {
 172         pr_debug("%s: cluster %u\n", __func__, cluster);
 173         BUG_ON(cluster >= TC2_CLUSTERS);
 174         ve_spc_powerdown(cluster, false);
 175         ve_spc_global_wakeup_irq(false);
 176 }
 177 
 178 static const struct mcpm_platform_ops tc2_pm_power_ops = {
 179         .cpu_powerup            = tc2_pm_cpu_powerup,
 180         .cluster_powerup        = tc2_pm_cluster_powerup,
 181         .cpu_suspend_prepare    = tc2_pm_cpu_suspend_prepare,
 182         .cpu_powerdown_prepare  = tc2_pm_cpu_powerdown_prepare,
 183         .cluster_powerdown_prepare = tc2_pm_cluster_powerdown_prepare,
 184         .cpu_cache_disable      = tc2_pm_cpu_cache_disable,
 185         .cluster_cache_disable  = tc2_pm_cluster_cache_disable,
 186         .wait_for_powerdown     = tc2_pm_wait_for_powerdown,
 187         .cpu_is_up              = tc2_pm_cpu_is_up,
 188         .cluster_is_up          = tc2_pm_cluster_is_up,
 189 };
 190 
 191 /*
 192  * Enable cluster-level coherency, in preparation for turning on the MMU.
 193  */
 194 static void __naked tc2_pm_power_up_setup(unsigned int affinity_level)
 195 {
 196         asm volatile (" \n"
 197 "       cmp     r0, #1 \n"
 198 "       bxne    lr \n"
 199 "       b       cci_enable_port_for_self ");
 200 }
 201 
 202 static int __init tc2_pm_init(void)
 203 {
 204         unsigned int mpidr, cpu, cluster;
 205         int ret, irq;
 206         u32 a15_cluster_id, a7_cluster_id, sys_info;
 207         struct device_node *np;
 208 
 209         /*
 210          * The power management-related features are hidden behind
 211          * SCC registers. We need to extract runtime information like
 212          * cluster ids and number of CPUs really available in clusters.
 213          */
 214         np = of_find_compatible_node(NULL, NULL,
 215                         "arm,vexpress-scc,v2p-ca15_a7");
 216         scc = of_iomap(np, 0);
 217         if (!scc)
 218                 return -ENODEV;
 219 
 220         a15_cluster_id = readl_relaxed(scc + A15_CONF) & 0xf;
 221         a7_cluster_id = readl_relaxed(scc + A7_CONF) & 0xf;
 222         if (a15_cluster_id >= TC2_CLUSTERS || a7_cluster_id >= TC2_CLUSTERS)
 223                 return -EINVAL;
 224 
 225         sys_info = readl_relaxed(scc + SYS_INFO);
 226         tc2_nr_cpus[a15_cluster_id] = (sys_info >> 16) & 0xf;
 227         tc2_nr_cpus[a7_cluster_id] = (sys_info >> 20) & 0xf;
 228 
 229         irq = irq_of_parse_and_map(np, 0);
 230 
 231         /*
 232          * A subset of the SCC registers is also used to communicate
 233          * with the SPC (power controller). We need to be able to
 234          * drive it very early in the boot process to power up
 235          * processors, so we initialize the SPC driver here.
 236          */
 237         ret = ve_spc_init(scc + SPC_BASE, a15_cluster_id, irq);
 238         if (ret)
 239                 return ret;
 240 
 241         if (!cci_probed())
 242                 return -ENODEV;
 243 
 244         mpidr = read_cpuid_mpidr();
 245         cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
 246         cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
 247         pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
 248         if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) {
 249                 pr_err("%s: boot CPU is out of bound!\n", __func__);
 250                 return -EINVAL;
 251         }
 252 
 253         ret = mcpm_platform_register(&tc2_pm_power_ops);
 254         if (!ret) {
 255                 mcpm_sync_init(tc2_pm_power_up_setup);
 256                 /* test if we can (re)enable the CCI on our own */
 257                 BUG_ON(mcpm_loopback(tc2_pm_cluster_cache_disable) != 0);
 258                 pr_info("TC2 power management initialized\n");
 259         }
 260         return ret;
 261 }
 262 
 263 early_initcall(tc2_pm_init);

/* [<][>][^][v][top][bottom][index][help] */