1 /*
2  * Copyright 2010 Tilera Corporation. All Rights Reserved.
3  *
4  *   This program is free software; you can redistribute it and/or
5  *   modify it under the terms of the GNU General Public License
6  *   as published by the Free Software Foundation, version 2.
7  *
8  *   This program is distributed in the hope that it will be useful, but
9  *   WITHOUT ANY WARRANTY; without even the implied warranty of
10  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11  *   NON INFRINGEMENT.  See the GNU General Public License for
12  *   more details.
13  */
14 
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/kprobes.h>
18 #include <linux/module.h>
19 #include <linux/pfn.h>
20 #include <linux/kallsyms.h>
21 #include <linux/stacktrace.h>
22 #include <linux/uaccess.h>
23 #include <linux/mmzone.h>
24 #include <linux/dcache.h>
25 #include <linux/fs.h>
26 #include <linux/string.h>
27 #include <asm/backtrace.h>
28 #include <asm/page.h>
29 #include <asm/ucontext.h>
30 #include <asm/switch_to.h>
31 #include <asm/sigframe.h>
32 #include <asm/stack.h>
33 #include <asm/vdso.h>
34 #include <arch/abi.h>
35 #include <arch/interrupts.h>
36 
37 #define KBT_ONGOING	0  /* Backtrace still ongoing */
38 #define KBT_DONE	1  /* Backtrace cleanly completed */
39 #define KBT_RUNNING	2  /* Can't run backtrace on a running task */
40 #define KBT_LOOP	3  /* Backtrace entered a loop */
41 
42 /* Is address on the specified kernel stack? */
in_kernel_stack(struct KBacktraceIterator * kbt,unsigned long sp)43 static int in_kernel_stack(struct KBacktraceIterator *kbt, unsigned long sp)
44 {
45 	ulong kstack_base = (ulong) kbt->task->stack;
46 	if (kstack_base == 0)  /* corrupt task pointer; just follow stack... */
47 		return sp >= PAGE_OFFSET && sp < (unsigned long)high_memory;
48 	return sp >= kstack_base && sp < kstack_base + THREAD_SIZE;
49 }
50 
51 /* Callback for backtracer; basically a glorified memcpy */
read_memory_func(void * result,unsigned long address,unsigned int size,void * vkbt)52 static bool read_memory_func(void *result, unsigned long address,
53 			     unsigned int size, void *vkbt)
54 {
55 	int retval;
56 	struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt;
57 
58 	if (address == 0)
59 		return 0;
60 	if (__kernel_text_address(address)) {
61 		/* OK to read kernel code. */
62 	} else if (address >= PAGE_OFFSET) {
63 		/* We only tolerate kernel-space reads of this task's stack */
64 		if (!in_kernel_stack(kbt, address))
65 			return 0;
66 	} else if (!kbt->is_current) {
67 		return 0;	/* can't read from other user address spaces */
68 	}
69 	pagefault_disable();
70 	retval = __copy_from_user_inatomic(result,
71 					   (void __user __force *)address,
72 					   size);
73 	pagefault_enable();
74 	return (retval == 0);
75 }
76 
77 /* Return a pt_regs pointer for a valid fault handler frame */
valid_fault_handler(struct KBacktraceIterator * kbt)78 static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
79 {
80 	const char *fault = NULL;  /* happy compiler */
81 	char fault_buf[64];
82 	unsigned long sp = kbt->it.sp;
83 	struct pt_regs *p;
84 
85 	if (sp % sizeof(long) != 0)
86 		return NULL;
87 	if (!in_kernel_stack(kbt, sp))
88 		return NULL;
89 	if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1))
90 		return NULL;
91 	p = (struct pt_regs *)(sp + C_ABI_SAVE_AREA_SIZE);
92 	if (p->faultnum == INT_SWINT_1 || p->faultnum == INT_SWINT_1_SIGRETURN)
93 		fault = "syscall";
94 	else {
95 		if (kbt->verbose) {     /* else we aren't going to use it */
96 			snprintf(fault_buf, sizeof(fault_buf),
97 				 "interrupt %ld", p->faultnum);
98 			fault = fault_buf;
99 		}
100 	}
101 	if (EX1_PL(p->ex1) == KERNEL_PL &&
102 	    __kernel_text_address(p->pc) &&
103 	    in_kernel_stack(kbt, p->sp) &&
104 	    p->sp >= sp) {
105 		if (kbt->verbose)
106 			pr_err("  <%s while in kernel mode>\n", fault);
107 	} else if (user_mode(p) &&
108 		   p->sp < PAGE_OFFSET && p->sp != 0) {
109 		if (kbt->verbose)
110 			pr_err("  <%s while in user mode>\n", fault);
111 	} else {
112 		if (kbt->verbose)
113 			pr_err("  (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n",
114 			       p->pc, p->sp, p->ex1);
115 		return NULL;
116 	}
117 	if (kbt->profile && ((1ULL << p->faultnum) & QUEUED_INTERRUPTS) != 0)
118 		return NULL;
119 	return p;
120 }
121 
122 /* Is the pc pointing to a sigreturn trampoline? */
is_sigreturn(unsigned long pc)123 static int is_sigreturn(unsigned long pc)
124 {
125 	return current->mm && (pc == VDSO_SYM(&__vdso_rt_sigreturn));
126 }
127 
128 /* Return a pt_regs pointer for a valid signal handler frame */
valid_sigframe(struct KBacktraceIterator * kbt,struct rt_sigframe * kframe)129 static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt,
130 				      struct rt_sigframe* kframe)
131 {
132 	BacktraceIterator *b = &kbt->it;
133 
134 	if (is_sigreturn(b->pc) && b->sp < PAGE_OFFSET &&
135 	    b->sp % sizeof(long) == 0) {
136 		int retval;
137 		pagefault_disable();
138 		retval = __copy_from_user_inatomic(
139 			kframe, (void __user __force *)b->sp,
140 			sizeof(*kframe));
141 		pagefault_enable();
142 		if (retval != 0 ||
143 		    (unsigned int)(kframe->info.si_signo) >= _NSIG)
144 			return NULL;
145 		if (kbt->verbose) {
146 			pr_err("  <received signal %d>\n",
147 			       kframe->info.si_signo);
148 		}
149 		return (struct pt_regs *)&kframe->uc.uc_mcontext;
150 	}
151 	return NULL;
152 }
153 
KBacktraceIterator_is_sigreturn(struct KBacktraceIterator * kbt)154 static int KBacktraceIterator_is_sigreturn(struct KBacktraceIterator *kbt)
155 {
156 	return is_sigreturn(kbt->it.pc);
157 }
158 
KBacktraceIterator_restart(struct KBacktraceIterator * kbt)159 static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt)
160 {
161 	struct pt_regs *p;
162 	struct rt_sigframe kframe;
163 
164 	p = valid_fault_handler(kbt);
165 	if (p == NULL)
166 		p = valid_sigframe(kbt, &kframe);
167 	if (p == NULL)
168 		return 0;
169 	backtrace_init(&kbt->it, read_memory_func, kbt,
170 		       p->pc, p->lr, p->sp, p->regs[52]);
171 	kbt->new_context = 1;
172 	return 1;
173 }
174 
175 /* Find a frame that isn't a sigreturn, if there is one. */
KBacktraceIterator_next_item_inclusive(struct KBacktraceIterator * kbt)176 static int KBacktraceIterator_next_item_inclusive(
177 	struct KBacktraceIterator *kbt)
178 {
179 	for (;;) {
180 		do {
181 			if (!KBacktraceIterator_is_sigreturn(kbt))
182 				return KBT_ONGOING;
183 		} while (backtrace_next(&kbt->it));
184 
185 		if (!KBacktraceIterator_restart(kbt))
186 			return KBT_DONE;
187 	}
188 }
189 
190 /*
191  * If the current sp is on a page different than what we recorded
192  * as the top-of-kernel-stack last time we context switched, we have
193  * probably blown the stack, and nothing is going to work out well.
194  * If we can at least get out a warning, that may help the debug,
195  * though we probably won't be able to backtrace into the code that
196  * actually did the recursive damage.
197  */
validate_stack(struct pt_regs * regs)198 static void validate_stack(struct pt_regs *regs)
199 {
200 	int cpu = raw_smp_processor_id();
201 	unsigned long ksp0 = get_current_ksp0();
202 	unsigned long ksp0_base = ksp0 & -THREAD_SIZE;
203 	unsigned long sp = stack_pointer;
204 
205 	if (EX1_PL(regs->ex1) == KERNEL_PL && regs->sp >= ksp0) {
206 		pr_err("WARNING: cpu %d: kernel stack %#lx..%#lx underrun!\n"
207 		       "  sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
208 		       cpu, ksp0_base, ksp0, sp, regs->sp, regs->pc, regs->lr);
209 	}
210 
211 	else if (sp < ksp0_base + sizeof(struct thread_info)) {
212 		pr_err("WARNING: cpu %d: kernel stack %#lx..%#lx overrun!\n"
213 		       "  sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
214 		       cpu, ksp0_base, ksp0, sp, regs->sp, regs->pc, regs->lr);
215 	}
216 }
217 
KBacktraceIterator_init(struct KBacktraceIterator * kbt,struct task_struct * t,struct pt_regs * regs)218 void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
219 			     struct task_struct *t, struct pt_regs *regs)
220 {
221 	unsigned long pc, lr, sp, r52;
222 	int is_current;
223 
224 	/*
225 	 * Set up callback information.  We grab the kernel stack base
226 	 * so we will allow reads of that address range.
227 	 */
228 	is_current = (t == NULL || t == current);
229 	kbt->is_current = is_current;
230 	if (is_current)
231 		t = validate_current();
232 	kbt->task = t;
233 	kbt->verbose = 0;   /* override in caller if desired */
234 	kbt->profile = 0;   /* override in caller if desired */
235 	kbt->end = KBT_ONGOING;
236 	kbt->new_context = 1;
237 	if (is_current)
238 		validate_stack(regs);
239 
240 	if (regs == NULL) {
241 		if (is_current || t->state == TASK_RUNNING) {
242 			/* Can't do this; we need registers */
243 			kbt->end = KBT_RUNNING;
244 			return;
245 		}
246 		pc = get_switch_to_pc();
247 		lr = t->thread.pc;
248 		sp = t->thread.ksp;
249 		r52 = 0;
250 	} else {
251 		pc = regs->pc;
252 		lr = regs->lr;
253 		sp = regs->sp;
254 		r52 = regs->regs[52];
255 	}
256 
257 	backtrace_init(&kbt->it, read_memory_func, kbt, pc, lr, sp, r52);
258 	kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
259 }
260 EXPORT_SYMBOL(KBacktraceIterator_init);
261 
KBacktraceIterator_end(struct KBacktraceIterator * kbt)262 int KBacktraceIterator_end(struct KBacktraceIterator *kbt)
263 {
264 	return kbt->end != KBT_ONGOING;
265 }
266 EXPORT_SYMBOL(KBacktraceIterator_end);
267 
KBacktraceIterator_next(struct KBacktraceIterator * kbt)268 void KBacktraceIterator_next(struct KBacktraceIterator *kbt)
269 {
270 	unsigned long old_pc = kbt->it.pc, old_sp = kbt->it.sp;
271 	kbt->new_context = 0;
272 	if (!backtrace_next(&kbt->it) && !KBacktraceIterator_restart(kbt)) {
273 		kbt->end = KBT_DONE;
274 		return;
275 	}
276 	kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
277 	if (old_pc == kbt->it.pc && old_sp == kbt->it.sp) {
278 		/* Trapped in a loop; give up. */
279 		kbt->end = KBT_LOOP;
280 	}
281 }
282 EXPORT_SYMBOL(KBacktraceIterator_next);
283 
describe_addr(struct KBacktraceIterator * kbt,unsigned long address,int have_mmap_sem,char * buf,size_t bufsize)284 static void describe_addr(struct KBacktraceIterator *kbt,
285 			  unsigned long address,
286 			  int have_mmap_sem, char *buf, size_t bufsize)
287 {
288 	struct vm_area_struct *vma;
289 	size_t namelen, remaining;
290 	unsigned long size, offset, adjust;
291 	char *p, *modname;
292 	const char *name;
293 	int rc;
294 
295 	/*
296 	 * Look one byte back for every caller frame (i.e. those that
297 	 * aren't a new context) so we look up symbol data for the
298 	 * call itself, not the following instruction, which may be on
299 	 * a different line (or in a different function).
300 	 */
301 	adjust = !kbt->new_context;
302 	address -= adjust;
303 
304 	if (address >= PAGE_OFFSET) {
305 		/* Handle kernel symbols. */
306 		BUG_ON(bufsize < KSYM_NAME_LEN);
307 		name = kallsyms_lookup(address, &size, &offset,
308 				       &modname, buf);
309 		if (name == NULL) {
310 			buf[0] = '\0';
311 			return;
312 		}
313 		namelen = strlen(buf);
314 		remaining = (bufsize - 1) - namelen;
315 		p = buf + namelen;
316 		rc = snprintf(p, remaining, "+%#lx/%#lx ",
317 			      offset + adjust, size);
318 		if (modname && rc < remaining)
319 			snprintf(p + rc, remaining - rc, "[%s] ", modname);
320 		buf[bufsize-1] = '\0';
321 		return;
322 	}
323 
324 	/* If we don't have the mmap_sem, we can't show any more info. */
325 	buf[0] = '\0';
326 	if (!have_mmap_sem)
327 		return;
328 
329 	/* Find vma info. */
330 	vma = find_vma(kbt->task->mm, address);
331 	if (vma == NULL || address < vma->vm_start) {
332 		snprintf(buf, bufsize, "[unmapped address] ");
333 		return;
334 	}
335 
336 	if (vma->vm_file) {
337 		p = d_path(&vma->vm_file->f_path, buf, bufsize);
338 		if (IS_ERR(p))
339 			p = "?";
340 		name = kbasename(p);
341 	} else {
342 		name = "anon";
343 	}
344 
345 	/* Generate a string description of the vma info. */
346 	namelen = strlen(name);
347 	remaining = (bufsize - 1) - namelen;
348 	memmove(buf, name, namelen);
349 	snprintf(buf + namelen, remaining, "[%lx+%lx] ",
350 		 vma->vm_start, vma->vm_end - vma->vm_start);
351 }
352 
353 /*
354  * Avoid possible crash recursion during backtrace.  If it happens, it
355  * makes it easy to lose the actual root cause of the failure, so we
356  * put a simple guard on all the backtrace loops.
357  */
start_backtrace(void)358 static bool start_backtrace(void)
359 {
360 	if (current->thread.in_backtrace) {
361 		pr_err("Backtrace requested while in backtrace!\n");
362 		return false;
363 	}
364 	current->thread.in_backtrace = true;
365 	return true;
366 }
367 
end_backtrace(void)368 static void end_backtrace(void)
369 {
370 	current->thread.in_backtrace = false;
371 }
372 
373 /*
374  * This method wraps the backtracer's more generic support.
375  * It is only invoked from the architecture-specific code; show_stack()
376  * and dump_stack() (in entry.S) are architecture-independent entry points.
377  */
tile_show_stack(struct KBacktraceIterator * kbt,int headers)378 void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
379 {
380 	int i;
381 	int have_mmap_sem = 0;
382 
383 	if (!start_backtrace())
384 		return;
385 	if (headers) {
386 		/*
387 		 * Add a blank line since if we are called from panic(),
388 		 * then bust_spinlocks() spit out a space in front of us
389 		 * and it will mess up our KERN_ERR.
390 		 */
391 		pr_err("Starting stack dump of tid %d, pid %d (%s) on cpu %d at cycle %lld\n",
392 		       kbt->task->pid, kbt->task->tgid, kbt->task->comm,
393 		       raw_smp_processor_id(), get_cycles());
394 	}
395 	kbt->verbose = 1;
396 	i = 0;
397 	for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) {
398 		char namebuf[KSYM_NAME_LEN+100];
399 		unsigned long address = kbt->it.pc;
400 
401 		/* Try to acquire the mmap_sem as we pass into userspace. */
402 		if (address < PAGE_OFFSET && !have_mmap_sem && kbt->task->mm)
403 			have_mmap_sem =
404 				down_read_trylock(&kbt->task->mm->mmap_sem);
405 
406 		describe_addr(kbt, address, have_mmap_sem,
407 			      namebuf, sizeof(namebuf));
408 
409 		pr_err("  frame %d: 0x%lx %s(sp 0x%lx)\n",
410 		       i++, address, namebuf, (unsigned long)(kbt->it.sp));
411 
412 		if (i >= 100) {
413 			pr_err("Stack dump truncated (%d frames)\n", i);
414 			break;
415 		}
416 	}
417 	if (kbt->end == KBT_LOOP)
418 		pr_err("Stack dump stopped; next frame identical to this one\n");
419 	if (headers)
420 		pr_err("Stack dump complete\n");
421 	if (have_mmap_sem)
422 		up_read(&kbt->task->mm->mmap_sem);
423 	end_backtrace();
424 }
425 EXPORT_SYMBOL(tile_show_stack);
426 
427 
428 /* This is called from show_regs() and _dump_stack() */
dump_stack_regs(struct pt_regs * regs)429 void dump_stack_regs(struct pt_regs *regs)
430 {
431 	struct KBacktraceIterator kbt;
432 	KBacktraceIterator_init(&kbt, NULL, regs);
433 	tile_show_stack(&kbt, 1);
434 }
435 EXPORT_SYMBOL(dump_stack_regs);
436 
regs_to_pt_regs(struct pt_regs * regs,ulong pc,ulong lr,ulong sp,ulong r52)437 static struct pt_regs *regs_to_pt_regs(struct pt_regs *regs,
438 				       ulong pc, ulong lr, ulong sp, ulong r52)
439 {
440 	memset(regs, 0, sizeof(struct pt_regs));
441 	regs->pc = pc;
442 	regs->lr = lr;
443 	regs->sp = sp;
444 	regs->regs[52] = r52;
445 	return regs;
446 }
447 
448 /* This is called from dump_stack() and just converts to pt_regs */
_dump_stack(int dummy,ulong pc,ulong lr,ulong sp,ulong r52)449 void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52)
450 {
451 	struct pt_regs regs;
452 	dump_stack_regs(regs_to_pt_regs(&regs, pc, lr, sp, r52));
453 }
454 
455 /* This is called from KBacktraceIterator_init_current() */
_KBacktraceIterator_init_current(struct KBacktraceIterator * kbt,ulong pc,ulong lr,ulong sp,ulong r52)456 void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt, ulong pc,
457 				      ulong lr, ulong sp, ulong r52)
458 {
459 	struct pt_regs regs;
460 	KBacktraceIterator_init(kbt, NULL,
461 				regs_to_pt_regs(&regs, pc, lr, sp, r52));
462 }
463 
464 /* This is called only from kernel/sched/core.c, with esp == NULL */
show_stack(struct task_struct * task,unsigned long * esp)465 void show_stack(struct task_struct *task, unsigned long *esp)
466 {
467 	struct KBacktraceIterator kbt;
468 	if (task == NULL || task == current)
469 		KBacktraceIterator_init_current(&kbt);
470 	else
471 		KBacktraceIterator_init(&kbt, task, NULL);
472 	tile_show_stack(&kbt, 0);
473 }
474 
475 #ifdef CONFIG_STACKTRACE
476 
477 /* Support generic Linux stack API too */
478 
save_stack_trace_tsk(struct task_struct * task,struct stack_trace * trace)479 void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
480 {
481 	struct KBacktraceIterator kbt;
482 	int skip = trace->skip;
483 	int i = 0;
484 
485 	if (!start_backtrace())
486 		goto done;
487 	if (task == NULL || task == current)
488 		KBacktraceIterator_init_current(&kbt);
489 	else
490 		KBacktraceIterator_init(&kbt, task, NULL);
491 	for (; !KBacktraceIterator_end(&kbt); KBacktraceIterator_next(&kbt)) {
492 		if (skip) {
493 			--skip;
494 			continue;
495 		}
496 		if (i >= trace->max_entries || kbt.it.pc < PAGE_OFFSET)
497 			break;
498 		trace->entries[i++] = kbt.it.pc;
499 	}
500 	end_backtrace();
501 done:
502 	trace->nr_entries = i;
503 }
504 EXPORT_SYMBOL(save_stack_trace_tsk);
505 
save_stack_trace(struct stack_trace * trace)506 void save_stack_trace(struct stack_trace *trace)
507 {
508 	save_stack_trace_tsk(NULL, trace);
509 }
510 EXPORT_SYMBOL_GPL(save_stack_trace);
511 
512 #endif
513 
514 /* In entry.S */
515 EXPORT_SYMBOL(KBacktraceIterator_init_current);
516