1/*
2 * Low-level Power Management code.
3 *
4 * Copyright (C) 2008 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <asm/asm.h>
11#include <asm/asm-offsets.h>
12#include <asm/thread_info.h>
13#include <mach/pm.h>
14
15#include "pm.h"
16#include "sdramc.h"
17
18/* Same as 0xfff00000 but fits in a 21 bit signed immediate */
19#define PM_BASE	-0x100000
20
21	/* Keep this close to the irq handlers */
22	.section .irq.text, "ax", @progbits
23
24	/*
25	 * void cpu_enter_idle(void)
26	 *
27	 * Put the CPU into "idle" mode, in which it will consume
28	 * significantly less power.
29	 *
30	 * If an interrupt comes along in the window between
31	 * unmask_interrupts and the sleep instruction below, the
32	 * interrupt code will adjust the return address so that we
33	 * never execute the sleep instruction. This is required
34	 * because the AP7000 doesn't unmask interrupts when entering
35	 * sleep modes; later CPUs may not need this workaround.
36	 */
37	.global	cpu_enter_idle
38	.type	cpu_enter_idle, @function
39cpu_enter_idle:
40	mask_interrupts
41	get_thread_info r8
42	ld.w	r9, r8[TI_flags]
43	bld	r9, TIF_NEED_RESCHED
44	brcs	.Lret_from_sleep
45	sbr	r9, TIF_CPU_GOING_TO_SLEEP
46	st.w	r8[TI_flags], r9
47	unmask_interrupts
48	sleep	CPU_SLEEP_IDLE
49	.size	cpu_enter_idle, . - cpu_enter_idle
50
51	/*
52	 * Common return path for PM functions that don't run from
53	 * SRAM.
54	 */
55	.global cpu_idle_skip_sleep
56	.type	cpu_idle_skip_sleep, @function
57cpu_idle_skip_sleep:
58	mask_interrupts
59	ld.w	r9, r8[TI_flags]
60	cbr	r9, TIF_CPU_GOING_TO_SLEEP
61	st.w	r8[TI_flags], r9
62.Lret_from_sleep:
63	unmask_interrupts
64	retal	r12
65	.size	cpu_idle_skip_sleep, . - cpu_idle_skip_sleep
66
67#ifdef CONFIG_PM
68	.section .init.text, "ax", @progbits
69
70	.global	pm_exception
71	.type	pm_exception, @function
72pm_exception:
73	/*
74	 * Exceptions are masked when we switch to this handler, so
75	 * we'll only get "unrecoverable" exceptions (offset 0.)
76	 */
77	sub	r12, pc, . - .Lpanic_msg
78	lddpc	pc, .Lpanic_addr
79
80	.align	2
81.Lpanic_addr:
82	.long	panic
83.Lpanic_msg:
84	.asciz	"Unrecoverable exception during suspend\n"
85	.size	pm_exception, . - pm_exception
86
87	.global	pm_irq0
88	.type	pm_irq0, @function
89pm_irq0:
90	/* Disable interrupts and return after the sleep instruction */
91	mfsr	r9, SYSREG_RSR_INT0
92	mtsr	SYSREG_RAR_INT0, r8
93	sbr	r9, SYSREG_GM_OFFSET
94	mtsr	SYSREG_RSR_INT0, r9
95	rete
96
97	/*
98	 * void cpu_enter_standby(unsigned long sdramc_base)
99	 *
100	 * Enter PM_SUSPEND_STANDBY mode. At this point, all drivers
101	 * are suspended and interrupts are disabled. Interrupts
102	 * marked as 'wakeup' event sources may still come along and
103	 * get us out of here.
104	 *
105	 * The SDRAM will be put into self-refresh mode (which does
106	 * not require a clock from the CPU), and the CPU will be put
107	 * into "frozen" mode (HSB bus stopped). The SDRAM controller
108	 * will automatically bring the SDRAM into normal mode on the
109	 * first access, and the power manager will automatically
110	 * start the HSB and CPU clocks upon a wakeup event.
111	 *
112	 * This code uses the same "skip sleep" technique as above.
113	 * It is very important that we jump directly to
114	 * cpu_after_sleep after the sleep instruction since that's
115	 * where we'll end up if the interrupt handler decides that we
116	 * need to skip the sleep instruction.
117	 */
118	.global	pm_standby
119	.type	pm_standby, @function
120pm_standby:
121	/*
122	 * interrupts are already masked at this point, and EVBA
123	 * points to pm_exception above.
124	 */
125	ld.w	r10, r12[SDRAMC_LPR]
126	sub	r8, pc, . - 1f		/* return address for irq handler */
127	mov	r11, SDRAMC_LPR_LPCB_SELF_RFR
128	bfins	r10, r11, 0, 2		/* LPCB <- self Refresh */
129	sync	0			/* flush write buffer */
130	st.w	r12[SDRAMC_LPR], r10	/* put SDRAM in self-refresh mode */
131	ld.w	r11, r12[SDRAMC_LPR]
132	unmask_interrupts
133	sleep	CPU_SLEEP_FROZEN
1341:	mask_interrupts
135	retal	r12
136	.size	pm_standby, . - pm_standby
137
138	.global	pm_suspend_to_ram
139	.type	pm_suspend_to_ram, @function
140pm_suspend_to_ram:
141	/*
142	 * interrupts are already masked at this point, and EVBA
143	 * points to pm_exception above.
144	 */
145	mov	r11, 0
146	cache	r11[2], 8		/* clean all dcache lines */
147	sync	0			/* flush write buffer */
148	ld.w	r10, r12[SDRAMC_LPR]
149	sub	r8, pc, . - 1f		/* return address for irq handler */
150	mov	r11, SDRAMC_LPR_LPCB_SELF_RFR
151	bfins	r10, r11, 0, 2		/* LPCB <- self refresh */
152	st.w	r12[SDRAMC_LPR], r10	/* put SDRAM in self-refresh mode */
153	ld.w	r11, r12[SDRAMC_LPR]
154
155	unmask_interrupts
156	sleep	CPU_SLEEP_STOP
1571:	mask_interrupts
158
159	retal	r12
160	.size	pm_suspend_to_ram, . - pm_suspend_to_ram
161
162	.global	pm_sram_end
163	.type	pm_sram_end, @function
164pm_sram_end:
165	.size	pm_sram_end, 0
166
167#endif /* CONFIG_PM */
168