1/* 2 * arch/arm/mach-vexpress/tc2_pm.c - TC2 power management support 3 * 4 * Created by: Nicolas Pitre, October 2012 5 * Copyright: (C) 2012-2013 Linaro Limited 6 * 7 * Some portions of this file were originally written by Achin Gupta 8 * Copyright: (C) 2012 ARM Limited 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as 12 * published by the Free Software Foundation. 13 */ 14 15#include <linux/delay.h> 16#include <linux/init.h> 17#include <linux/io.h> 18#include <linux/kernel.h> 19#include <linux/of_address.h> 20#include <linux/of_irq.h> 21#include <linux/errno.h> 22#include <linux/irqchip/arm-gic.h> 23 24#include <asm/mcpm.h> 25#include <asm/proc-fns.h> 26#include <asm/cacheflush.h> 27#include <asm/cputype.h> 28#include <asm/cp15.h> 29 30#include <linux/arm-cci.h> 31 32#include "spc.h" 33 34/* SCC conf registers */ 35#define RESET_CTRL 0x018 36#define RESET_A15_NCORERESET(cpu) (1 << (2 + (cpu))) 37#define RESET_A7_NCORERESET(cpu) (1 << (16 + (cpu))) 38 39#define A15_CONF 0x400 40#define A7_CONF 0x500 41#define SYS_INFO 0x700 42#define SPC_BASE 0xb00 43 44static void __iomem *scc; 45 46#define TC2_CLUSTERS 2 47#define TC2_MAX_CPUS_PER_CLUSTER 3 48 49static unsigned int tc2_nr_cpus[TC2_CLUSTERS]; 50 51static int tc2_pm_cpu_powerup(unsigned int cpu, unsigned int cluster) 52{ 53 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); 54 if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) 55 return -EINVAL; 56 ve_spc_set_resume_addr(cluster, cpu, 57 virt_to_phys(mcpm_entry_point)); 58 ve_spc_cpu_wakeup_irq(cluster, cpu, true); 59 return 0; 60} 61 62static int tc2_pm_cluster_powerup(unsigned int cluster) 63{ 64 pr_debug("%s: cluster %u\n", __func__, cluster); 65 if (cluster >= TC2_CLUSTERS) 66 return -EINVAL; 67 ve_spc_powerdown(cluster, false); 68 return 0; 69} 70 71static void tc2_pm_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster) 72{ 73 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); 74 BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER); 75 ve_spc_cpu_wakeup_irq(cluster, cpu, true); 76 /* 77 * If the CPU is committed to power down, make sure 78 * the power controller will be in charge of waking it 79 * up upon IRQ, ie IRQ lines are cut from GIC CPU IF 80 * to the CPU by disabling the GIC CPU IF to prevent wfi 81 * from completing execution behind power controller back 82 */ 83 gic_cpu_if_down(); 84} 85 86static void tc2_pm_cluster_powerdown_prepare(unsigned int cluster) 87{ 88 pr_debug("%s: cluster %u\n", __func__, cluster); 89 BUG_ON(cluster >= TC2_CLUSTERS); 90 ve_spc_powerdown(cluster, true); 91 ve_spc_global_wakeup_irq(true); 92} 93 94static void tc2_pm_cpu_cache_disable(void) 95{ 96 v7_exit_coherency_flush(louis); 97} 98 99static void tc2_pm_cluster_cache_disable(void) 100{ 101 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) { 102 /* 103 * On the Cortex-A15 we need to disable 104 * L2 prefetching before flushing the cache. 105 */ 106 asm volatile( 107 "mcr p15, 1, %0, c15, c0, 3 \n\t" 108 "isb \n\t" 109 "dsb " 110 : : "r" (0x400) ); 111 } 112 113 v7_exit_coherency_flush(all); 114 cci_disable_port_by_cpu(read_cpuid_mpidr()); 115} 116 117static int tc2_core_in_reset(unsigned int cpu, unsigned int cluster) 118{ 119 u32 mask = cluster ? 120 RESET_A7_NCORERESET(cpu) 121 : RESET_A15_NCORERESET(cpu); 122 123 return !(readl_relaxed(scc + RESET_CTRL) & mask); 124} 125 126#define POLL_MSEC 10 127#define TIMEOUT_MSEC 1000 128 129static int tc2_pm_wait_for_powerdown(unsigned int cpu, unsigned int cluster) 130{ 131 unsigned tries; 132 133 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); 134 BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER); 135 136 for (tries = 0; tries < TIMEOUT_MSEC / POLL_MSEC; ++tries) { 137 pr_debug("%s(cpu=%u, cluster=%u): RESET_CTRL = 0x%08X\n", 138 __func__, cpu, cluster, 139 readl_relaxed(scc + RESET_CTRL)); 140 141 /* 142 * We need the CPU to reach WFI, but the power 143 * controller may put the cluster in reset and 144 * power it off as soon as that happens, before 145 * we have a chance to see STANDBYWFI. 146 * 147 * So we need to check for both conditions: 148 */ 149 if (tc2_core_in_reset(cpu, cluster) || 150 ve_spc_cpu_in_wfi(cpu, cluster)) 151 return 0; /* success: the CPU is halted */ 152 153 /* Otherwise, wait and retry: */ 154 msleep(POLL_MSEC); 155 } 156 157 return -ETIMEDOUT; /* timeout */ 158} 159 160static void tc2_pm_cpu_suspend_prepare(unsigned int cpu, unsigned int cluster) 161{ 162 ve_spc_set_resume_addr(cluster, cpu, virt_to_phys(mcpm_entry_point)); 163} 164 165static void tc2_pm_cpu_is_up(unsigned int cpu, unsigned int cluster) 166{ 167 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); 168 BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER); 169 ve_spc_cpu_wakeup_irq(cluster, cpu, false); 170 ve_spc_set_resume_addr(cluster, cpu, 0); 171} 172 173static void tc2_pm_cluster_is_up(unsigned int cluster) 174{ 175 pr_debug("%s: cluster %u\n", __func__, cluster); 176 BUG_ON(cluster >= TC2_CLUSTERS); 177 ve_spc_powerdown(cluster, false); 178 ve_spc_global_wakeup_irq(false); 179} 180 181static const struct mcpm_platform_ops tc2_pm_power_ops = { 182 .cpu_powerup = tc2_pm_cpu_powerup, 183 .cluster_powerup = tc2_pm_cluster_powerup, 184 .cpu_suspend_prepare = tc2_pm_cpu_suspend_prepare, 185 .cpu_powerdown_prepare = tc2_pm_cpu_powerdown_prepare, 186 .cluster_powerdown_prepare = tc2_pm_cluster_powerdown_prepare, 187 .cpu_cache_disable = tc2_pm_cpu_cache_disable, 188 .cluster_cache_disable = tc2_pm_cluster_cache_disable, 189 .wait_for_powerdown = tc2_pm_wait_for_powerdown, 190 .cpu_is_up = tc2_pm_cpu_is_up, 191 .cluster_is_up = tc2_pm_cluster_is_up, 192}; 193 194/* 195 * Enable cluster-level coherency, in preparation for turning on the MMU. 196 */ 197static void __naked tc2_pm_power_up_setup(unsigned int affinity_level) 198{ 199 asm volatile (" \n" 200" cmp r0, #1 \n" 201" bxne lr \n" 202" b cci_enable_port_for_self "); 203} 204 205static int __init tc2_pm_init(void) 206{ 207 unsigned int mpidr, cpu, cluster; 208 int ret, irq; 209 u32 a15_cluster_id, a7_cluster_id, sys_info; 210 struct device_node *np; 211 212 /* 213 * The power management-related features are hidden behind 214 * SCC registers. We need to extract runtime information like 215 * cluster ids and number of CPUs really available in clusters. 216 */ 217 np = of_find_compatible_node(NULL, NULL, 218 "arm,vexpress-scc,v2p-ca15_a7"); 219 scc = of_iomap(np, 0); 220 if (!scc) 221 return -ENODEV; 222 223 a15_cluster_id = readl_relaxed(scc + A15_CONF) & 0xf; 224 a7_cluster_id = readl_relaxed(scc + A7_CONF) & 0xf; 225 if (a15_cluster_id >= TC2_CLUSTERS || a7_cluster_id >= TC2_CLUSTERS) 226 return -EINVAL; 227 228 sys_info = readl_relaxed(scc + SYS_INFO); 229 tc2_nr_cpus[a15_cluster_id] = (sys_info >> 16) & 0xf; 230 tc2_nr_cpus[a7_cluster_id] = (sys_info >> 20) & 0xf; 231 232 irq = irq_of_parse_and_map(np, 0); 233 234 /* 235 * A subset of the SCC registers is also used to communicate 236 * with the SPC (power controller). We need to be able to 237 * drive it very early in the boot process to power up 238 * processors, so we initialize the SPC driver here. 239 */ 240 ret = ve_spc_init(scc + SPC_BASE, a15_cluster_id, irq); 241 if (ret) 242 return ret; 243 244 if (!cci_probed()) 245 return -ENODEV; 246 247 mpidr = read_cpuid_mpidr(); 248 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); 249 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); 250 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); 251 if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) { 252 pr_err("%s: boot CPU is out of bound!\n", __func__); 253 return -EINVAL; 254 } 255 256 ret = mcpm_platform_register(&tc2_pm_power_ops); 257 if (!ret) { 258 mcpm_sync_init(tc2_pm_power_up_setup); 259 /* test if we can (re)enable the CCI on our own */ 260 BUG_ON(mcpm_loopback(tc2_pm_cluster_cache_disable) != 0); 261 pr_info("TC2 power management initialized\n"); 262 } 263 return ret; 264} 265 266early_initcall(tc2_pm_init); 267