This source file includes following definitions.
- stack_trace_print
- stack_trace_snprint
- stack_trace_consume_entry
- stack_trace_consume_entry_nosched
- stack_trace_save
- stack_trace_save_tsk
- stack_trace_save_regs
- stack_trace_save_tsk_reliable
- stack_trace_save_user
- save_stack_trace_tsk
- save_stack_trace_regs
- stack_trace_save
- stack_trace_save_tsk
- stack_trace_save_regs
- stack_trace_save_tsk_reliable
- stack_trace_save_user
1
2
3
4
5
6
7
8
9 #include <linux/sched/task_stack.h>
10 #include <linux/sched/debug.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/export.h>
14 #include <linux/kallsyms.h>
15 #include <linux/stacktrace.h>
16
17
18
19
20
21
22
23 void stack_trace_print(const unsigned long *entries, unsigned int nr_entries,
24 int spaces)
25 {
26 unsigned int i;
27
28 if (WARN_ON(!entries))
29 return;
30
31 for (i = 0; i < nr_entries; i++)
32 printk("%*c%pS\n", 1 + spaces, ' ', (void *)entries[i]);
33 }
34 EXPORT_SYMBOL_GPL(stack_trace_print);
35
36
37
38
39
40
41
42
43
44
45
46 int stack_trace_snprint(char *buf, size_t size, const unsigned long *entries,
47 unsigned int nr_entries, int spaces)
48 {
49 unsigned int generated, i, total = 0;
50
51 if (WARN_ON(!entries))
52 return 0;
53
54 for (i = 0; i < nr_entries && size; i++) {
55 generated = snprintf(buf, size, "%*c%pS\n", 1 + spaces, ' ',
56 (void *)entries[i]);
57
58 total += generated;
59 if (generated >= size) {
60 buf += size;
61 size = 0;
62 } else {
63 buf += generated;
64 size -= generated;
65 }
66 }
67
68 return total;
69 }
70 EXPORT_SYMBOL_GPL(stack_trace_snprint);
71
72 #ifdef CONFIG_ARCH_STACKWALK
73
74 struct stacktrace_cookie {
75 unsigned long *store;
76 unsigned int size;
77 unsigned int skip;
78 unsigned int len;
79 };
80
81 static bool stack_trace_consume_entry(void *cookie, unsigned long addr,
82 bool reliable)
83 {
84 struct stacktrace_cookie *c = cookie;
85
86 if (c->len >= c->size)
87 return false;
88
89 if (c->skip > 0) {
90 c->skip--;
91 return true;
92 }
93 c->store[c->len++] = addr;
94 return c->len < c->size;
95 }
96
97 static bool stack_trace_consume_entry_nosched(void *cookie, unsigned long addr,
98 bool reliable)
99 {
100 if (in_sched_functions(addr))
101 return true;
102 return stack_trace_consume_entry(cookie, addr, reliable);
103 }
104
105
106
107
108
109
110
111
112
113 unsigned int stack_trace_save(unsigned long *store, unsigned int size,
114 unsigned int skipnr)
115 {
116 stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
117 struct stacktrace_cookie c = {
118 .store = store,
119 .size = size,
120 .skip = skipnr + 1,
121 };
122
123 arch_stack_walk(consume_entry, &c, current, NULL);
124 return c.len;
125 }
126 EXPORT_SYMBOL_GPL(stack_trace_save);
127
128
129
130
131
132
133
134
135
136
137 unsigned int stack_trace_save_tsk(struct task_struct *tsk, unsigned long *store,
138 unsigned int size, unsigned int skipnr)
139 {
140 stack_trace_consume_fn consume_entry = stack_trace_consume_entry_nosched;
141 struct stacktrace_cookie c = {
142 .store = store,
143 .size = size,
144
145 .skip = skipnr + !!(current == tsk),
146 };
147
148 if (!try_get_task_stack(tsk))
149 return 0;
150
151 arch_stack_walk(consume_entry, &c, tsk, NULL);
152 put_task_stack(tsk);
153 return c.len;
154 }
155
156
157
158
159
160
161
162
163
164
165 unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store,
166 unsigned int size, unsigned int skipnr)
167 {
168 stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
169 struct stacktrace_cookie c = {
170 .store = store,
171 .size = size,
172 .skip = skipnr,
173 };
174
175 arch_stack_walk(consume_entry, &c, current, regs);
176 return c.len;
177 }
178
179 #ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
180
181
182
183
184
185
186
187
188
189
190
191
192 int stack_trace_save_tsk_reliable(struct task_struct *tsk, unsigned long *store,
193 unsigned int size)
194 {
195 stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
196 struct stacktrace_cookie c = {
197 .store = store,
198 .size = size,
199 };
200 int ret;
201
202
203
204
205
206 if (!try_get_task_stack(tsk))
207 return 0;
208
209 ret = arch_stack_walk_reliable(consume_entry, &c, tsk);
210 put_task_stack(tsk);
211 return ret ? ret : c.len;
212 }
213 #endif
214
215 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
216
217
218
219
220
221
222
223 unsigned int stack_trace_save_user(unsigned long *store, unsigned int size)
224 {
225 stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
226 struct stacktrace_cookie c = {
227 .store = store,
228 .size = size,
229 };
230 mm_segment_t fs;
231
232
233 if (current->flags & PF_KTHREAD)
234 return 0;
235
236 fs = get_fs();
237 set_fs(USER_DS);
238 arch_stack_walk_user(consume_entry, &c, task_pt_regs(current));
239 set_fs(fs);
240
241 return c.len;
242 }
243 #endif
244
245 #else
246
247
248
249
250
251
252 __weak void
253 save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
254 {
255 WARN_ONCE(1, KERN_INFO "save_stack_trace_tsk() not implemented yet.\n");
256 }
257
258 __weak void
259 save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
260 {
261 WARN_ONCE(1, KERN_INFO "save_stack_trace_regs() not implemented yet.\n");
262 }
263
264
265
266
267
268
269
270
271
272 unsigned int stack_trace_save(unsigned long *store, unsigned int size,
273 unsigned int skipnr)
274 {
275 struct stack_trace trace = {
276 .entries = store,
277 .max_entries = size,
278 .skip = skipnr + 1,
279 };
280
281 save_stack_trace(&trace);
282 return trace.nr_entries;
283 }
284 EXPORT_SYMBOL_GPL(stack_trace_save);
285
286
287
288
289
290
291
292
293
294
295 unsigned int stack_trace_save_tsk(struct task_struct *task,
296 unsigned long *store, unsigned int size,
297 unsigned int skipnr)
298 {
299 struct stack_trace trace = {
300 .entries = store,
301 .max_entries = size,
302
303 .skip = skipnr + !!(current == task),
304 };
305
306 save_stack_trace_tsk(task, &trace);
307 return trace.nr_entries;
308 }
309
310
311
312
313
314
315
316
317
318
319 unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store,
320 unsigned int size, unsigned int skipnr)
321 {
322 struct stack_trace trace = {
323 .entries = store,
324 .max_entries = size,
325 .skip = skipnr,
326 };
327
328 save_stack_trace_regs(regs, &trace);
329 return trace.nr_entries;
330 }
331
332 #ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
333
334
335
336
337
338
339
340
341
342
343
344
345 int stack_trace_save_tsk_reliable(struct task_struct *tsk, unsigned long *store,
346 unsigned int size)
347 {
348 struct stack_trace trace = {
349 .entries = store,
350 .max_entries = size,
351 };
352 int ret = save_stack_trace_tsk_reliable(tsk, &trace);
353
354 return ret ? ret : trace.nr_entries;
355 }
356 #endif
357
358 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
359
360
361
362
363
364
365
366 unsigned int stack_trace_save_user(unsigned long *store, unsigned int size)
367 {
368 struct stack_trace trace = {
369 .entries = store,
370 .max_entries = size,
371 };
372
373 save_stack_trace_user(&trace);
374 return trace.nr_entries;
375 }
376 #endif
377
378 #endif