1/*
2 * Copyright (C) 2004-2006 Atmel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#include <linux/sched.h>
9#include <linux/module.h>
10#include <linux/kallsyms.h>
11#include <linux/fs.h>
12#include <linux/pm.h>
13#include <linux/ptrace.h>
14#include <linux/slab.h>
15#include <linux/reboot.h>
16#include <linux/tick.h>
17#include <linux/uaccess.h>
18#include <linux/unistd.h>
19
20#include <asm/sysreg.h>
21#include <asm/ocd.h>
22#include <asm/syscalls.h>
23
24#include <mach/pm.h>
25
26void (*pm_power_off)(void);
27EXPORT_SYMBOL(pm_power_off);
28
29/*
30 * This file handles the architecture-dependent parts of process handling..
31 */
32
33void arch_cpu_idle(void)
34{
35	cpu_enter_idle();
36}
37
38void machine_halt(void)
39{
40	/*
41	 * Enter Stop mode. The 32 kHz oscillator will keep running so
42	 * the RTC will keep the time properly and the system will
43	 * boot quickly.
44	 */
45	asm volatile("sleep 3\n\t"
46		     "sub pc, -2");
47}
48
49void machine_power_off(void)
50{
51	if (pm_power_off)
52		pm_power_off();
53}
54
55void machine_restart(char *cmd)
56{
57	ocd_write(DC, (1 << OCD_DC_DBE_BIT));
58	ocd_write(DC, (1 << OCD_DC_RES_BIT));
59	while (1) ;
60}
61
62/*
63 * Free current thread data structures etc
64 */
65void exit_thread(void)
66{
67	ocd_disable(current);
68}
69
70void flush_thread(void)
71{
72	/* nothing to do */
73}
74
75void release_thread(struct task_struct *dead_task)
76{
77	/* do nothing */
78}
79
80static void dump_mem(const char *str, const char *log_lvl,
81		     unsigned long bottom, unsigned long top)
82{
83	unsigned long p;
84	int i;
85
86	printk("%s%s(0x%08lx to 0x%08lx)\n", log_lvl, str, bottom, top);
87
88	for (p = bottom & ~31; p < top; ) {
89		printk("%s%04lx: ", log_lvl, p & 0xffff);
90
91		for (i = 0; i < 8; i++, p += 4) {
92			unsigned int val;
93
94			if (p < bottom || p >= top)
95				printk("         ");
96			else {
97				if (__get_user(val, (unsigned int __user *)p)) {
98					printk("\n");
99					goto out;
100				}
101				printk("%08x ", val);
102			}
103		}
104		printk("\n");
105	}
106
107out:
108	return;
109}
110
111static inline int valid_stack_ptr(struct thread_info *tinfo, unsigned long p)
112{
113	return (p > (unsigned long)tinfo)
114		&& (p < (unsigned long)tinfo + THREAD_SIZE - 3);
115}
116
117#ifdef CONFIG_FRAME_POINTER
118static void show_trace_log_lvl(struct task_struct *tsk, unsigned long *sp,
119			       struct pt_regs *regs, const char *log_lvl)
120{
121	unsigned long lr, fp;
122	struct thread_info *tinfo;
123
124	if (regs)
125		fp = regs->r7;
126	else if (tsk == current)
127		asm("mov %0, r7" : "=r"(fp));
128	else
129		fp = tsk->thread.cpu_context.r7;
130
131	/*
132	 * Walk the stack as long as the frame pointer (a) is within
133	 * the kernel stack of the task, and (b) it doesn't move
134	 * downwards.
135	 */
136	tinfo = task_thread_info(tsk);
137	printk("%sCall trace:\n", log_lvl);
138	while (valid_stack_ptr(tinfo, fp)) {
139		unsigned long new_fp;
140
141		lr = *(unsigned long *)fp;
142#ifdef CONFIG_KALLSYMS
143		printk("%s [<%08lx>] ", log_lvl, lr);
144#else
145		printk(" [<%08lx>] ", lr);
146#endif
147		print_symbol("%s\n", lr);
148
149		new_fp = *(unsigned long *)(fp + 4);
150		if (new_fp <= fp)
151			break;
152		fp = new_fp;
153	}
154	printk("\n");
155}
156#else
157static void show_trace_log_lvl(struct task_struct *tsk, unsigned long *sp,
158			       struct pt_regs *regs, const char *log_lvl)
159{
160	unsigned long addr;
161
162	printk("%sCall trace:\n", log_lvl);
163
164	while (!kstack_end(sp)) {
165		addr = *sp++;
166		if (kernel_text_address(addr)) {
167#ifdef CONFIG_KALLSYMS
168			printk("%s [<%08lx>] ", log_lvl, addr);
169#else
170			printk(" [<%08lx>] ", addr);
171#endif
172			print_symbol("%s\n", addr);
173		}
174	}
175	printk("\n");
176}
177#endif
178
179void show_stack_log_lvl(struct task_struct *tsk, unsigned long sp,
180			struct pt_regs *regs, const char *log_lvl)
181{
182	struct thread_info *tinfo;
183
184	if (sp == 0) {
185		if (tsk)
186			sp = tsk->thread.cpu_context.ksp;
187		else
188			sp = (unsigned long)&tinfo;
189	}
190	if (!tsk)
191		tsk = current;
192
193	tinfo = task_thread_info(tsk);
194
195	if (valid_stack_ptr(tinfo, sp)) {
196		dump_mem("Stack: ", log_lvl, sp,
197			 THREAD_SIZE + (unsigned long)tinfo);
198		show_trace_log_lvl(tsk, (unsigned long *)sp, regs, log_lvl);
199	}
200}
201
202void show_stack(struct task_struct *tsk, unsigned long *stack)
203{
204	show_stack_log_lvl(tsk, (unsigned long)stack, NULL, "");
205}
206
207static const char *cpu_modes[] = {
208	"Application", "Supervisor", "Interrupt level 0", "Interrupt level 1",
209	"Interrupt level 2", "Interrupt level 3", "Exception", "NMI"
210};
211
212void show_regs_log_lvl(struct pt_regs *regs, const char *log_lvl)
213{
214	unsigned long sp = regs->sp;
215	unsigned long lr = regs->lr;
216	unsigned long mode = (regs->sr & MODE_MASK) >> MODE_SHIFT;
217
218	show_regs_print_info(log_lvl);
219
220	if (!user_mode(regs)) {
221		sp = (unsigned long)regs + FRAME_SIZE_FULL;
222
223		printk("%s", log_lvl);
224		print_symbol("PC is at %s\n", instruction_pointer(regs));
225		printk("%s", log_lvl);
226		print_symbol("LR is at %s\n", lr);
227	}
228
229	printk("%spc : [<%08lx>]    lr : [<%08lx>]    %s\n"
230	       "%ssp : %08lx  r12: %08lx  r11: %08lx\n",
231	       log_lvl, instruction_pointer(regs), lr, print_tainted(),
232	       log_lvl, sp, regs->r12, regs->r11);
233	printk("%sr10: %08lx  r9 : %08lx  r8 : %08lx\n",
234	       log_lvl, regs->r10, regs->r9, regs->r8);
235	printk("%sr7 : %08lx  r6 : %08lx  r5 : %08lx  r4 : %08lx\n",
236	       log_lvl, regs->r7, regs->r6, regs->r5, regs->r4);
237	printk("%sr3 : %08lx  r2 : %08lx  r1 : %08lx  r0 : %08lx\n",
238	       log_lvl, regs->r3, regs->r2, regs->r1, regs->r0);
239	printk("%sFlags: %c%c%c%c%c\n", log_lvl,
240	       regs->sr & SR_Q ? 'Q' : 'q',
241	       regs->sr & SR_V ? 'V' : 'v',
242	       regs->sr & SR_N ? 'N' : 'n',
243	       regs->sr & SR_Z ? 'Z' : 'z',
244	       regs->sr & SR_C ? 'C' : 'c');
245	printk("%sMode bits: %c%c%c%c%c%c%c%c%c%c\n", log_lvl,
246	       regs->sr & SR_H ? 'H' : 'h',
247	       regs->sr & SR_J ? 'J' : 'j',
248	       regs->sr & SR_DM ? 'M' : 'm',
249	       regs->sr & SR_D ? 'D' : 'd',
250	       regs->sr & SR_EM ? 'E' : 'e',
251	       regs->sr & SR_I3M ? '3' : '.',
252	       regs->sr & SR_I2M ? '2' : '.',
253	       regs->sr & SR_I1M ? '1' : '.',
254	       regs->sr & SR_I0M ? '0' : '.',
255	       regs->sr & SR_GM ? 'G' : 'g');
256	printk("%sCPU Mode: %s\n", log_lvl, cpu_modes[mode]);
257}
258
259void show_regs(struct pt_regs *regs)
260{
261	unsigned long sp = regs->sp;
262
263	if (!user_mode(regs))
264		sp = (unsigned long)regs + FRAME_SIZE_FULL;
265
266	show_regs_log_lvl(regs, "");
267	show_trace_log_lvl(current, (unsigned long *)sp, regs, "");
268}
269EXPORT_SYMBOL(show_regs);
270
271/* Fill in the fpu structure for a core dump. This is easy -- we don't have any */
272int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
273{
274	/* Not valid */
275	return 0;
276}
277
278asmlinkage void ret_from_fork(void);
279asmlinkage void ret_from_kernel_thread(void);
280asmlinkage void syscall_return(void);
281
282int copy_thread(unsigned long clone_flags, unsigned long usp,
283		unsigned long arg,
284		struct task_struct *p)
285{
286	struct pt_regs *childregs = task_pt_regs(p);
287
288	if (unlikely(p->flags & PF_KTHREAD)) {
289		memset(childregs, 0, sizeof(struct pt_regs));
290		p->thread.cpu_context.r0 = arg;
291		p->thread.cpu_context.r1 = usp; /* fn */
292		p->thread.cpu_context.r2 = (unsigned long)syscall_return;
293		p->thread.cpu_context.pc = (unsigned long)ret_from_kernel_thread;
294		childregs->sr = MODE_SUPERVISOR;
295	} else {
296		*childregs = *current_pt_regs();
297		if (usp)
298			childregs->sp = usp;
299		childregs->r12 = 0; /* Set return value for child */
300		p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
301	}
302
303	p->thread.cpu_context.sr = MODE_SUPERVISOR | SR_GM;
304	p->thread.cpu_context.ksp = (unsigned long)childregs;
305
306	clear_tsk_thread_flag(p, TIF_DEBUG);
307	if ((clone_flags & CLONE_PTRACE) && test_thread_flag(TIF_DEBUG))
308		ocd_enable(p);
309
310	return 0;
311}
312
313/*
314 * This function is supposed to answer the question "who called
315 * schedule()?"
316 */
317unsigned long get_wchan(struct task_struct *p)
318{
319	unsigned long pc;
320	unsigned long stack_page;
321
322	if (!p || p == current || p->state == TASK_RUNNING)
323		return 0;
324
325	stack_page = (unsigned long)task_stack_page(p);
326	BUG_ON(!stack_page);
327
328	/*
329	 * The stored value of PC is either the address right after
330	 * the call to __switch_to() or ret_from_fork.
331	 */
332	pc = thread_saved_pc(p);
333	if (in_sched_functions(pc)) {
334#ifdef CONFIG_FRAME_POINTER
335		unsigned long fp = p->thread.cpu_context.r7;
336		BUG_ON(fp < stack_page || fp > (THREAD_SIZE + stack_page));
337		pc = *(unsigned long *)fp;
338#else
339		/*
340		 * We depend on the frame size of schedule here, which
341		 * is actually quite ugly. It might be possible to
342		 * determine the frame size automatically at build
343		 * time by doing this:
344		 *   - compile sched/core.c
345		 *   - disassemble the resulting sched.o
346		 *   - look for 'sub sp,??' shortly after '<schedule>:'
347		 */
348		unsigned long sp = p->thread.cpu_context.ksp + 16;
349		BUG_ON(sp < stack_page || sp > (THREAD_SIZE + stack_page));
350		pc = *(unsigned long *)sp;
351#endif
352	}
353
354	return pc;
355}
356