This source file includes following definitions.
- walk_stackframe
- walk_stackframe
- print_trace_address
- show_stack
- save_wchan
- get_wchan
- __save_trace
- save_trace
- save_stack_trace_tsk
- save_stack_trace
1
2
3
4
5
6
7 #include <linux/export.h>
8 #include <linux/kallsyms.h>
9 #include <linux/sched.h>
10 #include <linux/sched/debug.h>
11 #include <linux/sched/task_stack.h>
12 #include <linux/stacktrace.h>
13 #include <linux/ftrace.h>
14
15 #ifdef CONFIG_FRAME_POINTER
16
17 struct stackframe {
18 unsigned long fp;
19 unsigned long ra;
20 };
21
22 void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
23 bool (*fn)(unsigned long, void *), void *arg)
24 {
25 unsigned long fp, sp, pc;
26
27 if (regs) {
28 fp = frame_pointer(regs);
29 sp = user_stack_pointer(regs);
30 pc = instruction_pointer(regs);
31 } else if (task == NULL || task == current) {
32 const register unsigned long current_sp __asm__ ("sp");
33 fp = (unsigned long)__builtin_frame_address(0);
34 sp = current_sp;
35 pc = (unsigned long)walk_stackframe;
36 } else {
37
38 fp = task->thread.s[0];
39 sp = task->thread.sp;
40 pc = task->thread.ra;
41 }
42
43 for (;;) {
44 unsigned long low, high;
45 struct stackframe *frame;
46
47 if (unlikely(!__kernel_text_address(pc) || fn(pc, arg)))
48 break;
49
50
51 low = sp + sizeof(struct stackframe);
52 high = ALIGN(sp, THREAD_SIZE);
53 if (unlikely(fp < low || fp > high || fp & 0x7))
54 break;
55
56 frame = (struct stackframe *)fp - 1;
57 sp = fp;
58 fp = frame->fp;
59 pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
60 (unsigned long *)(fp - 8));
61 }
62 }
63
64 #else
65
66 void notrace walk_stackframe(struct task_struct *task,
67 struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg)
68 {
69 unsigned long sp, pc;
70 unsigned long *ksp;
71
72 if (regs) {
73 sp = user_stack_pointer(regs);
74 pc = instruction_pointer(regs);
75 } else if (task == NULL || task == current) {
76 const register unsigned long current_sp __asm__ ("sp");
77 sp = current_sp;
78 pc = (unsigned long)walk_stackframe;
79 } else {
80
81 sp = task->thread.sp;
82 pc = task->thread.ra;
83 }
84
85 if (unlikely(sp & 0x7))
86 return;
87
88 ksp = (unsigned long *)sp;
89 while (!kstack_end(ksp)) {
90 if (__kernel_text_address(pc) && unlikely(fn(pc, arg)))
91 break;
92 pc = (*ksp++) - 0x4;
93 }
94 }
95
96 #endif
97
98
99 static bool print_trace_address(unsigned long pc, void *arg)
100 {
101 print_ip_sym(pc);
102 return false;
103 }
104
105 void show_stack(struct task_struct *task, unsigned long *sp)
106 {
107 pr_cont("Call Trace:\n");
108 walk_stackframe(task, NULL, print_trace_address, NULL);
109 }
110
111
112 static bool save_wchan(unsigned long pc, void *arg)
113 {
114 if (!in_sched_functions(pc)) {
115 unsigned long *p = arg;
116 *p = pc;
117 return true;
118 }
119 return false;
120 }
121
122 unsigned long get_wchan(struct task_struct *task)
123 {
124 unsigned long pc = 0;
125
126 if (likely(task && task != current && task->state != TASK_RUNNING))
127 walk_stackframe(task, NULL, save_wchan, &pc);
128 return pc;
129 }
130
131
132 #ifdef CONFIG_STACKTRACE
133
134 static bool __save_trace(unsigned long pc, void *arg, bool nosched)
135 {
136 struct stack_trace *trace = arg;
137
138 if (unlikely(nosched && in_sched_functions(pc)))
139 return false;
140 if (unlikely(trace->skip > 0)) {
141 trace->skip--;
142 return false;
143 }
144
145 trace->entries[trace->nr_entries++] = pc;
146 return (trace->nr_entries >= trace->max_entries);
147 }
148
149 static bool save_trace(unsigned long pc, void *arg)
150 {
151 return __save_trace(pc, arg, false);
152 }
153
154
155
156
157 void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
158 {
159 walk_stackframe(tsk, NULL, save_trace, trace);
160 }
161 EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
162
163 void save_stack_trace(struct stack_trace *trace)
164 {
165 save_stack_trace_tsk(NULL, trace);
166 }
167 EXPORT_SYMBOL_GPL(save_stack_trace);
168
169 #endif