1/*
2 * OMAP4+ CPU idle Routines
3 *
4 * Copyright (C) 2011-2013 Texas Instruments, Inc.
5 * Santosh Shilimkar <santosh.shilimkar@ti.com>
6 * Rajendra Nayak <rnayak@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/sched.h>
14#include <linux/cpuidle.h>
15#include <linux/cpu_pm.h>
16#include <linux/export.h>
17#include <linux/tick.h>
18
19#include <asm/cpuidle.h>
20
21#include "common.h"
22#include "pm.h"
23#include "prm.h"
24#include "clockdomain.h"
25
26#define MAX_CPUS	2
27
28/* Machine specific information */
29struct idle_statedata {
30	u32 cpu_state;
31	u32 mpu_logic_state;
32	u32 mpu_state;
33};
34
35static struct idle_statedata omap4_idle_data[] = {
36	{
37		.cpu_state = PWRDM_POWER_ON,
38		.mpu_state = PWRDM_POWER_ON,
39		.mpu_logic_state = PWRDM_POWER_RET,
40	},
41	{
42		.cpu_state = PWRDM_POWER_OFF,
43		.mpu_state = PWRDM_POWER_RET,
44		.mpu_logic_state = PWRDM_POWER_RET,
45	},
46	{
47		.cpu_state = PWRDM_POWER_OFF,
48		.mpu_state = PWRDM_POWER_RET,
49		.mpu_logic_state = PWRDM_POWER_OFF,
50	},
51};
52
53static struct powerdomain *mpu_pd, *cpu_pd[MAX_CPUS];
54static struct clockdomain *cpu_clkdm[MAX_CPUS];
55
56static atomic_t abort_barrier;
57static bool cpu_done[MAX_CPUS];
58static struct idle_statedata *state_ptr = &omap4_idle_data[0];
59
60/* Private functions */
61
62/**
63 * omap_enter_idle_[simple/coupled] - OMAP4PLUS cpuidle entry functions
64 * @dev: cpuidle device
65 * @drv: cpuidle driver
66 * @index: the index of state to be entered
67 *
68 * Called from the CPUidle framework to program the device to the
69 * specified low power state selected by the governor.
70 * Returns the amount of time spent in the low power state.
71 */
72static int omap_enter_idle_simple(struct cpuidle_device *dev,
73			struct cpuidle_driver *drv,
74			int index)
75{
76	omap_do_wfi();
77	return index;
78}
79
80static int omap_enter_idle_coupled(struct cpuidle_device *dev,
81			struct cpuidle_driver *drv,
82			int index)
83{
84	struct idle_statedata *cx = state_ptr + index;
85	u32 mpuss_can_lose_context = 0;
86
87	/*
88	 * CPU0 has to wait and stay ON until CPU1 is OFF state.
89	 * This is necessary to honour hardware recommondation
90	 * of triggeing all the possible low power modes once CPU1 is
91	 * out of coherency and in OFF mode.
92	 */
93	if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
94		while (pwrdm_read_pwrst(cpu_pd[1]) != PWRDM_POWER_OFF) {
95			cpu_relax();
96
97			/*
98			 * CPU1 could have already entered & exited idle
99			 * without hitting off because of a wakeup
100			 * or a failed attempt to hit off mode.  Check for
101			 * that here, otherwise we could spin forever
102			 * waiting for CPU1 off.
103			 */
104			if (cpu_done[1])
105			    goto fail;
106
107		}
108	}
109
110	mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) &&
111				 (cx->mpu_logic_state == PWRDM_POWER_OFF);
112
113	tick_broadcast_enter();
114
115	/*
116	 * Call idle CPU PM enter notifier chain so that
117	 * VFP and per CPU interrupt context is saved.
118	 */
119	cpu_pm_enter();
120
121	if (dev->cpu == 0) {
122		pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
123		omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
124
125		/*
126		 * Call idle CPU cluster PM enter notifier chain
127		 * to save GIC and wakeupgen context.
128		 */
129		if (mpuss_can_lose_context)
130			cpu_cluster_pm_enter();
131	}
132
133	omap4_enter_lowpower(dev->cpu, cx->cpu_state);
134	cpu_done[dev->cpu] = true;
135
136	/* Wakeup CPU1 only if it is not offlined */
137	if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
138
139		if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) &&
140		    mpuss_can_lose_context)
141			gic_dist_disable();
142
143		clkdm_wakeup(cpu_clkdm[1]);
144		omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON);
145		clkdm_allow_idle(cpu_clkdm[1]);
146
147		if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) &&
148		    mpuss_can_lose_context) {
149			while (gic_dist_disabled()) {
150				udelay(1);
151				cpu_relax();
152			}
153			gic_timer_retrigger();
154		}
155	}
156
157	/*
158	 * Call idle CPU PM exit notifier chain to restore
159	 * VFP and per CPU IRQ context.
160	 */
161	cpu_pm_exit();
162
163	/*
164	 * Call idle CPU cluster PM exit notifier chain
165	 * to restore GIC and wakeupgen context.
166	 */
167	if (dev->cpu == 0 && mpuss_can_lose_context)
168		cpu_cluster_pm_exit();
169
170	tick_broadcast_exit();
171
172fail:
173	cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
174	cpu_done[dev->cpu] = false;
175
176	return index;
177}
178
179/*
180 * For each cpu, setup the broadcast timer because local timers
181 * stops for the states above C1.
182 */
183static void omap_setup_broadcast_timer(void *arg)
184{
185	tick_broadcast_enable();
186}
187
188static struct cpuidle_driver omap4_idle_driver = {
189	.name				= "omap4_idle",
190	.owner				= THIS_MODULE,
191	.states = {
192		{
193			/* C1 - CPU0 ON + CPU1 ON + MPU ON */
194			.exit_latency = 2 + 2,
195			.target_residency = 5,
196			.enter = omap_enter_idle_simple,
197			.name = "C1",
198			.desc = "CPUx ON, MPUSS ON"
199		},
200		{
201			/* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
202			.exit_latency = 328 + 440,
203			.target_residency = 960,
204			.flags = CPUIDLE_FLAG_COUPLED,
205			.enter = omap_enter_idle_coupled,
206			.name = "C2",
207			.desc = "CPUx OFF, MPUSS CSWR",
208		},
209		{
210			/* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
211			.exit_latency = 460 + 518,
212			.target_residency = 1100,
213			.flags = CPUIDLE_FLAG_COUPLED,
214			.enter = omap_enter_idle_coupled,
215			.name = "C3",
216			.desc = "CPUx OFF, MPUSS OSWR",
217		},
218	},
219	.state_count = ARRAY_SIZE(omap4_idle_data),
220	.safe_state_index = 0,
221};
222
223/* Public functions */
224
225/**
226 * omap4_idle_init - Init routine for OMAP4+ idle
227 *
228 * Registers the OMAP4+ specific cpuidle driver to the cpuidle
229 * framework with the valid set of states.
230 */
231int __init omap4_idle_init(void)
232{
233	mpu_pd = pwrdm_lookup("mpu_pwrdm");
234	cpu_pd[0] = pwrdm_lookup("cpu0_pwrdm");
235	cpu_pd[1] = pwrdm_lookup("cpu1_pwrdm");
236	if ((!mpu_pd) || (!cpu_pd[0]) || (!cpu_pd[1]))
237		return -ENODEV;
238
239	cpu_clkdm[0] = clkdm_lookup("mpu0_clkdm");
240	cpu_clkdm[1] = clkdm_lookup("mpu1_clkdm");
241	if (!cpu_clkdm[0] || !cpu_clkdm[1])
242		return -ENODEV;
243
244	/* Configure the broadcast timer on each cpu */
245	on_each_cpu(omap_setup_broadcast_timer, NULL, 1);
246
247	return cpuidle_register(&omap4_idle_driver, cpu_online_mask);
248}
249