root/arch/arm/mach-omap2/omap-mpuss-lowpower.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. default_finish_suspend
  2. dummy_cpu_resume
  3. dummy_scu_prepare
  4. set_cpu_wakeup_addr
  5. scu_pwrst_prepare
  6. mpuss_clear_prev_logic_pwrst
  7. cpu_clear_prev_logic_pwrst
  8. l2x0_pwrst_prepare
  9. save_l2x0_context
  10. save_l2x0_context
  11. omap4_enter_lowpower
  12. omap4_hotplug_cpu
  13. enable_mercury_retention_mode
  14. omap4_mpuss_init
  15. omap4_get_cpu1_ns_pa_addr
  16. omap4_mpuss_early_init

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * OMAP MPUSS low power code
   4  *
   5  * Copyright (C) 2011 Texas Instruments, Inc.
   6  *      Santosh Shilimkar <santosh.shilimkar@ti.com>
   7  *
   8  * OMAP4430 MPUSS mainly consists of dual Cortex-A9 with per-CPU
   9  * Local timer and Watchdog, GIC, SCU, PL310 L2 cache controller,
  10  * CPU0 and CPU1 LPRM modules.
  11  * CPU0, CPU1 and MPUSS each have there own power domain and
  12  * hence multiple low power combinations of MPUSS are possible.
  13  *
  14  * The CPU0 and CPU1 can't support Closed switch Retention (CSWR)
  15  * because the mode is not supported by hw constraints of dormant
  16  * mode. While waking up from the dormant mode, a reset  signal
  17  * to the Cortex-A9 processor must be asserted by the external
  18  * power controller.
  19  *
  20  * With architectural inputs and hardware recommendations, only
  21  * below modes are supported from power gain vs latency point of view.
  22  *
  23  *      CPU0            CPU1            MPUSS
  24  *      ----------------------------------------------
  25  *      ON              ON              ON
  26  *      ON(Inactive)    OFF             ON(Inactive)
  27  *      OFF             OFF             CSWR
  28  *      OFF             OFF             OSWR
  29  *      OFF             OFF             OFF(Device OFF *TBD)
  30  *      ----------------------------------------------
  31  *
  32  * Note: CPU0 is the master core and it is the last CPU to go down
  33  * and first to wake-up when MPUSS low power states are excercised
  34  */
  35 
  36 #include <linux/kernel.h>
  37 #include <linux/io.h>
  38 #include <linux/errno.h>
  39 #include <linux/linkage.h>
  40 #include <linux/smp.h>
  41 
  42 #include <asm/cacheflush.h>
  43 #include <asm/tlbflush.h>
  44 #include <asm/smp_scu.h>
  45 #include <asm/pgalloc.h>
  46 #include <asm/suspend.h>
  47 #include <asm/virt.h>
  48 #include <asm/hardware/cache-l2x0.h>
  49 
  50 #include "soc.h"
  51 #include "common.h"
  52 #include "omap44xx.h"
  53 #include "omap4-sar-layout.h"
  54 #include "pm.h"
  55 #include "prcm_mpu44xx.h"
  56 #include "prcm_mpu54xx.h"
  57 #include "prminst44xx.h"
  58 #include "prcm44xx.h"
  59 #include "prm44xx.h"
  60 #include "prm-regbits-44xx.h"
  61 
  62 static void __iomem *sar_base;
  63 static u32 old_cpu1_ns_pa_addr;
  64 
  65 #if defined(CONFIG_PM) && defined(CONFIG_SMP)
  66 
  67 struct omap4_cpu_pm_info {
  68         struct powerdomain *pwrdm;
  69         void __iomem *scu_sar_addr;
  70         void __iomem *wkup_sar_addr;
  71         void __iomem *l2x0_sar_addr;
  72 };
  73 
  74 /**
  75  * struct cpu_pm_ops - CPU pm operations
  76  * @finish_suspend:     CPU suspend finisher function pointer
  77  * @resume:             CPU resume function pointer
  78  * @scu_prepare:        CPU Snoop Control program function pointer
  79  * @hotplug_restart:    CPU restart function pointer
  80  *
  81  * Structure holds functions pointer for CPU low power operations like
  82  * suspend, resume and scu programming.
  83  */
  84 struct cpu_pm_ops {
  85         int (*finish_suspend)(unsigned long cpu_state);
  86         void (*resume)(void);
  87         void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
  88         void (*hotplug_restart)(void);
  89 };
  90 
  91 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
  92 static struct powerdomain *mpuss_pd;
  93 static u32 cpu_context_offset;
  94 
  95 static int default_finish_suspend(unsigned long cpu_state)
  96 {
  97         omap_do_wfi();
  98         return 0;
  99 }
 100 
 101 static void dummy_cpu_resume(void)
 102 {}
 103 
 104 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
 105 {}
 106 
 107 static struct cpu_pm_ops omap_pm_ops = {
 108         .finish_suspend         = default_finish_suspend,
 109         .resume                 = dummy_cpu_resume,
 110         .scu_prepare            = dummy_scu_prepare,
 111         .hotplug_restart        = dummy_cpu_resume,
 112 };
 113 
 114 /*
 115  * Program the wakeup routine address for the CPU0 and CPU1
 116  * used for OFF or DORMANT wakeup.
 117  */
 118 static inline void set_cpu_wakeup_addr(unsigned int cpu_id, u32 addr)
 119 {
 120         struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
 121 
 122         if (pm_info->wkup_sar_addr)
 123                 writel_relaxed(addr, pm_info->wkup_sar_addr);
 124 }
 125 
 126 /*
 127  * Store the SCU power status value to scratchpad memory
 128  */
 129 static void scu_pwrst_prepare(unsigned int cpu_id, unsigned int cpu_state)
 130 {
 131         struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
 132         u32 scu_pwr_st;
 133 
 134         switch (cpu_state) {
 135         case PWRDM_POWER_RET:
 136                 scu_pwr_st = SCU_PM_DORMANT;
 137                 break;
 138         case PWRDM_POWER_OFF:
 139                 scu_pwr_st = SCU_PM_POWEROFF;
 140                 break;
 141         case PWRDM_POWER_ON:
 142         case PWRDM_POWER_INACTIVE:
 143         default:
 144                 scu_pwr_st = SCU_PM_NORMAL;
 145                 break;
 146         }
 147 
 148         if (pm_info->scu_sar_addr)
 149                 writel_relaxed(scu_pwr_st, pm_info->scu_sar_addr);
 150 }
 151 
 152 /* Helper functions for MPUSS OSWR */
 153 static inline void mpuss_clear_prev_logic_pwrst(void)
 154 {
 155         u32 reg;
 156 
 157         reg = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
 158                 OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET);
 159         omap4_prminst_write_inst_reg(reg, OMAP4430_PRM_PARTITION,
 160                 OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET);
 161 }
 162 
 163 static inline void cpu_clear_prev_logic_pwrst(unsigned int cpu_id)
 164 {
 165         u32 reg;
 166 
 167         if (cpu_id) {
 168                 reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU1_INST,
 169                                         cpu_context_offset);
 170                 omap4_prcm_mpu_write_inst_reg(reg, OMAP4430_PRCM_MPU_CPU1_INST,
 171                                         cpu_context_offset);
 172         } else {
 173                 reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU0_INST,
 174                                         cpu_context_offset);
 175                 omap4_prcm_mpu_write_inst_reg(reg, OMAP4430_PRCM_MPU_CPU0_INST,
 176                                         cpu_context_offset);
 177         }
 178 }
 179 
 180 /*
 181  * Store the CPU cluster state for L2X0 low power operations.
 182  */
 183 static void l2x0_pwrst_prepare(unsigned int cpu_id, unsigned int save_state)
 184 {
 185         struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
 186 
 187         if (pm_info->l2x0_sar_addr)
 188                 writel_relaxed(save_state, pm_info->l2x0_sar_addr);
 189 }
 190 
 191 /*
 192  * Save the L2X0 AUXCTRL and POR value to SAR memory. Its used to
 193  * in every restore MPUSS OFF path.
 194  */
 195 #ifdef CONFIG_CACHE_L2X0
 196 static void __init save_l2x0_context(void)
 197 {
 198         void __iomem *l2x0_base = omap4_get_l2cache_base();
 199 
 200         if (l2x0_base && sar_base) {
 201                 writel_relaxed(l2x0_saved_regs.aux_ctrl,
 202                                sar_base + L2X0_AUXCTRL_OFFSET);
 203                 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
 204                                sar_base + L2X0_PREFETCH_CTRL_OFFSET);
 205         }
 206 }
 207 #else
 208 static void __init save_l2x0_context(void)
 209 {}
 210 #endif
 211 
 212 /**
 213  * omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function
 214  * The purpose of this function is to manage low power programming
 215  * of OMAP4 MPUSS subsystem
 216  * @cpu : CPU ID
 217  * @power_state: Low power state.
 218  *
 219  * MPUSS states for the context save:
 220  * save_state =
 221  *      0 - Nothing lost and no need to save: MPUSS INACTIVE
 222  *      1 - CPUx L1 and logic lost: MPUSS CSWR
 223  *      2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
 224  *      3 - CPUx L1 and logic lost + GIC + L2 lost: DEVICE OFF
 225  */
 226 int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state)
 227 {
 228         struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu);
 229         unsigned int save_state = 0, cpu_logic_state = PWRDM_POWER_RET;
 230         unsigned int wakeup_cpu;
 231 
 232         if (omap_rev() == OMAP4430_REV_ES1_0)
 233                 return -ENXIO;
 234 
 235         switch (power_state) {
 236         case PWRDM_POWER_ON:
 237         case PWRDM_POWER_INACTIVE:
 238                 save_state = 0;
 239                 break;
 240         case PWRDM_POWER_OFF:
 241                 cpu_logic_state = PWRDM_POWER_OFF;
 242                 save_state = 1;
 243                 break;
 244         case PWRDM_POWER_RET:
 245                 if (IS_PM44XX_ERRATUM(PM_OMAP4_CPU_OSWR_DISABLE))
 246                         save_state = 0;
 247                 break;
 248         default:
 249                 /*
 250                  * CPUx CSWR is invalid hardware state. Also CPUx OSWR
 251                  * doesn't make much scense, since logic is lost and $L1
 252                  * needs to be cleaned because of coherency. This makes
 253                  * CPUx OSWR equivalent to CPUX OFF and hence not supported
 254                  */
 255                 WARN_ON(1);
 256                 return -ENXIO;
 257         }
 258 
 259         pwrdm_pre_transition(NULL);
 260 
 261         /*
 262          * Check MPUSS next state and save interrupt controller if needed.
 263          * In MPUSS OSWR or device OFF, interrupt controller  contest is lost.
 264          */
 265         mpuss_clear_prev_logic_pwrst();
 266         if ((pwrdm_read_next_pwrst(mpuss_pd) == PWRDM_POWER_RET) &&
 267                 (pwrdm_read_logic_retst(mpuss_pd) == PWRDM_POWER_OFF))
 268                 save_state = 2;
 269 
 270         cpu_clear_prev_logic_pwrst(cpu);
 271         pwrdm_set_next_pwrst(pm_info->pwrdm, power_state);
 272         pwrdm_set_logic_retst(pm_info->pwrdm, cpu_logic_state);
 273         set_cpu_wakeup_addr(cpu, __pa_symbol(omap_pm_ops.resume));
 274         omap_pm_ops.scu_prepare(cpu, power_state);
 275         l2x0_pwrst_prepare(cpu, save_state);
 276 
 277         /*
 278          * Call low level function  with targeted low power state.
 279          */
 280         if (save_state)
 281                 cpu_suspend(save_state, omap_pm_ops.finish_suspend);
 282         else
 283                 omap_pm_ops.finish_suspend(save_state);
 284 
 285         if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) && cpu)
 286                 gic_dist_enable();
 287 
 288         /*
 289          * Restore the CPUx power state to ON otherwise CPUx
 290          * power domain can transitions to programmed low power
 291          * state while doing WFI outside the low powe code. On
 292          * secure devices, CPUx does WFI which can result in
 293          * domain transition
 294          */
 295         wakeup_cpu = smp_processor_id();
 296         pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);
 297 
 298         pwrdm_post_transition(NULL);
 299 
 300         return 0;
 301 }
 302 
 303 /**
 304  * omap4_hotplug_cpu: OMAP4 CPU hotplug entry
 305  * @cpu : CPU ID
 306  * @power_state: CPU low power state.
 307  */
 308 int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state)
 309 {
 310         struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu);
 311         unsigned int cpu_state = 0;
 312 
 313         if (omap_rev() == OMAP4430_REV_ES1_0)
 314                 return -ENXIO;
 315 
 316         /* Use the achievable power state for the domain */
 317         power_state = pwrdm_get_valid_lp_state(pm_info->pwrdm,
 318                                                false, power_state);
 319 
 320         if (power_state == PWRDM_POWER_OFF)
 321                 cpu_state = 1;
 322 
 323         pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
 324         pwrdm_set_next_pwrst(pm_info->pwrdm, power_state);
 325         set_cpu_wakeup_addr(cpu, __pa_symbol(omap_pm_ops.hotplug_restart));
 326         omap_pm_ops.scu_prepare(cpu, power_state);
 327 
 328         /*
 329          * CPU never retuns back if targeted power state is OFF mode.
 330          * CPU ONLINE follows normal CPU ONLINE ptah via
 331          * omap4_secondary_startup().
 332          */
 333         omap_pm_ops.finish_suspend(cpu_state);
 334 
 335         pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);
 336         return 0;
 337 }
 338 
 339 
 340 /*
 341  * Enable Mercury Fast HG retention mode by default.
 342  */
 343 static void enable_mercury_retention_mode(void)
 344 {
 345         u32 reg;
 346 
 347         reg = omap4_prcm_mpu_read_inst_reg(OMAP54XX_PRCM_MPU_DEVICE_INST,
 348                                   OMAP54XX_PRCM_MPU_PRM_PSCON_COUNT_OFFSET);
 349         /* Enable HG_EN, HG_RAMPUP = fast mode */
 350         reg |= BIT(24) | BIT(25);
 351         omap4_prcm_mpu_write_inst_reg(reg, OMAP54XX_PRCM_MPU_DEVICE_INST,
 352                                       OMAP54XX_PRCM_MPU_PRM_PSCON_COUNT_OFFSET);
 353 }
 354 
 355 /*
 356  * Initialise OMAP4 MPUSS
 357  */
 358 int __init omap4_mpuss_init(void)
 359 {
 360         struct omap4_cpu_pm_info *pm_info;
 361 
 362         if (omap_rev() == OMAP4430_REV_ES1_0) {
 363                 WARN(1, "Power Management not supported on OMAP4430 ES1.0\n");
 364                 return -ENODEV;
 365         }
 366 
 367         /* Initilaise per CPU PM information */
 368         pm_info = &per_cpu(omap4_pm_info, 0x0);
 369         if (sar_base) {
 370                 pm_info->scu_sar_addr = sar_base + SCU_OFFSET0;
 371                 if (cpu_is_omap44xx())
 372                         pm_info->wkup_sar_addr = sar_base +
 373                                 CPU0_WAKEUP_NS_PA_ADDR_OFFSET;
 374                 else
 375                         pm_info->wkup_sar_addr = sar_base +
 376                                 OMAP5_CPU0_WAKEUP_NS_PA_ADDR_OFFSET;
 377                 pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET0;
 378         }
 379         pm_info->pwrdm = pwrdm_lookup("cpu0_pwrdm");
 380         if (!pm_info->pwrdm) {
 381                 pr_err("Lookup failed for CPU0 pwrdm\n");
 382                 return -ENODEV;
 383         }
 384 
 385         /* Clear CPU previous power domain state */
 386         pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
 387         cpu_clear_prev_logic_pwrst(0);
 388 
 389         /* Initialise CPU0 power domain state to ON */
 390         pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);
 391 
 392         pm_info = &per_cpu(omap4_pm_info, 0x1);
 393         if (sar_base) {
 394                 pm_info->scu_sar_addr = sar_base + SCU_OFFSET1;
 395                 if (cpu_is_omap44xx())
 396                         pm_info->wkup_sar_addr = sar_base +
 397                                 CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
 398                 else
 399                         pm_info->wkup_sar_addr = sar_base +
 400                                 OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
 401                 pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET1;
 402         }
 403 
 404         pm_info->pwrdm = pwrdm_lookup("cpu1_pwrdm");
 405         if (!pm_info->pwrdm) {
 406                 pr_err("Lookup failed for CPU1 pwrdm\n");
 407                 return -ENODEV;
 408         }
 409 
 410         /* Clear CPU previous power domain state */
 411         pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
 412         cpu_clear_prev_logic_pwrst(1);
 413 
 414         /* Initialise CPU1 power domain state to ON */
 415         pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);
 416 
 417         mpuss_pd = pwrdm_lookup("mpu_pwrdm");
 418         if (!mpuss_pd) {
 419                 pr_err("Failed to lookup MPUSS power domain\n");
 420                 return -ENODEV;
 421         }
 422         pwrdm_clear_all_prev_pwrst(mpuss_pd);
 423         mpuss_clear_prev_logic_pwrst();
 424 
 425         if (sar_base) {
 426                 /* Save device type on scratchpad for low level code to use */
 427                 writel_relaxed((omap_type() != OMAP2_DEVICE_TYPE_GP) ? 1 : 0,
 428                                sar_base + OMAP_TYPE_OFFSET);
 429                 save_l2x0_context();
 430         }
 431 
 432         if (cpu_is_omap44xx()) {
 433                 omap_pm_ops.finish_suspend = omap4_finish_suspend;
 434                 omap_pm_ops.resume = omap4_cpu_resume;
 435                 omap_pm_ops.scu_prepare = scu_pwrst_prepare;
 436                 omap_pm_ops.hotplug_restart = omap4_secondary_startup;
 437                 cpu_context_offset = OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET;
 438         } else if (soc_is_omap54xx() || soc_is_dra7xx()) {
 439                 cpu_context_offset = OMAP54XX_RM_CPU0_CPU0_CONTEXT_OFFSET;
 440                 enable_mercury_retention_mode();
 441         }
 442 
 443         if (cpu_is_omap446x())
 444                 omap_pm_ops.hotplug_restart = omap4460_secondary_startup;
 445 
 446         return 0;
 447 }
 448 
 449 #endif
 450 
 451 u32 omap4_get_cpu1_ns_pa_addr(void)
 452 {
 453         return old_cpu1_ns_pa_addr;
 454 }
 455 
 456 /*
 457  * For kexec, we must set CPU1_WAKEUP_NS_PA_ADDR to point to
 458  * current kernel's secondary_startup() early before
 459  * clockdomains_init(). Otherwise clockdomain_init() can
 460  * wake CPU1 and cause a hang.
 461  */
 462 void __init omap4_mpuss_early_init(void)
 463 {
 464         unsigned long startup_pa;
 465         void __iomem *ns_pa_addr;
 466 
 467         if (!(soc_is_omap44xx() || soc_is_omap54xx()))
 468                 return;
 469 
 470         sar_base = omap4_get_sar_ram_base();
 471 
 472         /* Save old NS_PA_ADDR for validity checks later on */
 473         if (soc_is_omap44xx())
 474                 ns_pa_addr = sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
 475         else
 476                 ns_pa_addr = sar_base + OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
 477         old_cpu1_ns_pa_addr = readl_relaxed(ns_pa_addr);
 478 
 479         if (soc_is_omap443x())
 480                 startup_pa = __pa_symbol(omap4_secondary_startup);
 481         else if (soc_is_omap446x())
 482                 startup_pa = __pa_symbol(omap4460_secondary_startup);
 483         else if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
 484                 startup_pa = __pa_symbol(omap5_secondary_hyp_startup);
 485         else
 486                 startup_pa = __pa_symbol(omap5_secondary_startup);
 487 
 488         if (soc_is_omap44xx())
 489                 writel_relaxed(startup_pa, sar_base +
 490                                CPU1_WAKEUP_NS_PA_ADDR_OFFSET);
 491         else
 492                 writel_relaxed(startup_pa, sar_base +
 493                                OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET);
 494 }

/* [<][>][^][v][top][bottom][index][help] */