This source file includes following definitions.
- print_max_stack
- check_stack
- stack_trace_call
- stack_max_size_read
- stack_max_size_write
- __next
- t_next
- t_start
- t_stop
- trace_lookup_stack
- print_disabled
- t_show
- stack_trace_open
- stack_trace_filter_open
- stack_trace_sysctl
- enable_stacktrace
- stack_trace_init
1
2
3
4
5
6 #include <linux/sched/task_stack.h>
7 #include <linux/stacktrace.h>
8 #include <linux/security.h>
9 #include <linux/kallsyms.h>
10 #include <linux/seq_file.h>
11 #include <linux/spinlock.h>
12 #include <linux/uaccess.h>
13 #include <linux/ftrace.h>
14 #include <linux/module.h>
15 #include <linux/sysctl.h>
16 #include <linux/init.h>
17
18 #include <asm/setup.h>
19
20 #include "trace.h"
21
22 #define STACK_TRACE_ENTRIES 500
23
24 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES];
25 static unsigned stack_trace_index[STACK_TRACE_ENTRIES];
26
27 static unsigned int stack_trace_nr_entries;
28 static unsigned long stack_trace_max_size;
29 static arch_spinlock_t stack_trace_max_lock =
30 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
31
32 DEFINE_PER_CPU(int, disable_stack_tracer);
33 static DEFINE_MUTEX(stack_sysctl_mutex);
34
35 int stack_tracer_enabled;
36
37 static void print_max_stack(void)
38 {
39 long i;
40 int size;
41
42 pr_emerg(" Depth Size Location (%d entries)\n"
43 " ----- ---- --------\n",
44 stack_trace_nr_entries);
45
46 for (i = 0; i < stack_trace_nr_entries; i++) {
47 if (i + 1 == stack_trace_nr_entries)
48 size = stack_trace_index[i];
49 else
50 size = stack_trace_index[i] - stack_trace_index[i+1];
51
52 pr_emerg("%3ld) %8d %5d %pS\n", i, stack_trace_index[i],
53 size, (void *)stack_dump_trace[i]);
54 }
55 }
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155 static void check_stack(unsigned long ip, unsigned long *stack)
156 {
157 unsigned long this_size, flags; unsigned long *p, *top, *start;
158 static int tracer_frame;
159 int frame_size = READ_ONCE(tracer_frame);
160 int i, x;
161
162 this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
163 this_size = THREAD_SIZE - this_size;
164
165 this_size -= frame_size;
166
167 if (this_size <= stack_trace_max_size)
168 return;
169
170
171 if (!object_is_on_stack(stack))
172 return;
173
174
175 if (in_nmi())
176 return;
177
178 local_irq_save(flags);
179 arch_spin_lock(&stack_trace_max_lock);
180
181
182 if (unlikely(!frame_size))
183 this_size -= tracer_frame;
184
185
186 if (this_size <= stack_trace_max_size)
187 goto out;
188
189 stack_trace_max_size = this_size;
190
191 stack_trace_nr_entries = stack_trace_save(stack_dump_trace,
192 ARRAY_SIZE(stack_dump_trace) - 1,
193 0);
194
195
196 for (i = 0; i < stack_trace_nr_entries; i++) {
197 if (stack_dump_trace[i] == ip)
198 break;
199 }
200
201
202
203
204
205 if (i == stack_trace_nr_entries)
206 i = 0;
207
208
209
210
211 x = 0;
212 start = stack;
213 top = (unsigned long *)
214 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
215
216
217
218
219
220
221
222
223 while (i < stack_trace_nr_entries) {
224 int found = 0;
225
226 stack_trace_index[x] = this_size;
227 p = start;
228
229 for (; p < top && i < stack_trace_nr_entries; p++) {
230
231
232
233
234 if ((READ_ONCE_NOCHECK(*p)) == stack_dump_trace[i]) {
235 stack_dump_trace[x] = stack_dump_trace[i++];
236 this_size = stack_trace_index[x++] =
237 (top - p) * sizeof(unsigned long);
238 found = 1;
239
240 start = p + 1;
241
242
243
244
245
246
247
248 if (unlikely(!tracer_frame)) {
249 tracer_frame = (p - stack) *
250 sizeof(unsigned long);
251 stack_trace_max_size -= tracer_frame;
252 }
253 }
254 }
255
256 if (!found)
257 i++;
258 }
259
260 #ifdef ARCH_FTRACE_SHIFT_STACK_TRACER
261
262
263
264
265
266
267 if (x > 1) {
268 memmove(&stack_trace_index[0], &stack_trace_index[1],
269 sizeof(stack_trace_index[0]) * (x - 1));
270 x--;
271 }
272 #endif
273
274 stack_trace_nr_entries = x;
275
276 if (task_stack_end_corrupted(current)) {
277 print_max_stack();
278 BUG();
279 }
280
281 out:
282 arch_spin_unlock(&stack_trace_max_lock);
283 local_irq_restore(flags);
284 }
285
286
287 #ifndef MCOUNT_INSN_SIZE
288 # define MCOUNT_INSN_SIZE 0
289 #endif
290
291 static void
292 stack_trace_call(unsigned long ip, unsigned long parent_ip,
293 struct ftrace_ops *op, struct pt_regs *pt_regs)
294 {
295 unsigned long stack;
296
297 preempt_disable_notrace();
298
299
300 __this_cpu_inc(disable_stack_tracer);
301 if (__this_cpu_read(disable_stack_tracer) != 1)
302 goto out;
303
304
305 if (!rcu_is_watching())
306 goto out;
307
308 ip += MCOUNT_INSN_SIZE;
309
310 check_stack(ip, &stack);
311
312 out:
313 __this_cpu_dec(disable_stack_tracer);
314
315 preempt_enable_notrace();
316 }
317
318 static struct ftrace_ops trace_ops __read_mostly =
319 {
320 .func = stack_trace_call,
321 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
322 };
323
324 static ssize_t
325 stack_max_size_read(struct file *filp, char __user *ubuf,
326 size_t count, loff_t *ppos)
327 {
328 unsigned long *ptr = filp->private_data;
329 char buf[64];
330 int r;
331
332 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
333 if (r > sizeof(buf))
334 r = sizeof(buf);
335 return simple_read_from_buffer(ubuf, count, ppos, buf, r);
336 }
337
338 static ssize_t
339 stack_max_size_write(struct file *filp, const char __user *ubuf,
340 size_t count, loff_t *ppos)
341 {
342 long *ptr = filp->private_data;
343 unsigned long val, flags;
344 int ret;
345
346 ret = kstrtoul_from_user(ubuf, count, 10, &val);
347 if (ret)
348 return ret;
349
350 local_irq_save(flags);
351
352
353
354
355
356
357 __this_cpu_inc(disable_stack_tracer);
358
359 arch_spin_lock(&stack_trace_max_lock);
360 *ptr = val;
361 arch_spin_unlock(&stack_trace_max_lock);
362
363 __this_cpu_dec(disable_stack_tracer);
364 local_irq_restore(flags);
365
366 return count;
367 }
368
369 static const struct file_operations stack_max_size_fops = {
370 .open = tracing_open_generic,
371 .read = stack_max_size_read,
372 .write = stack_max_size_write,
373 .llseek = default_llseek,
374 };
375
376 static void *
377 __next(struct seq_file *m, loff_t *pos)
378 {
379 long n = *pos - 1;
380
381 if (n >= stack_trace_nr_entries)
382 return NULL;
383
384 m->private = (void *)n;
385 return &m->private;
386 }
387
388 static void *
389 t_next(struct seq_file *m, void *v, loff_t *pos)
390 {
391 (*pos)++;
392 return __next(m, pos);
393 }
394
395 static void *t_start(struct seq_file *m, loff_t *pos)
396 {
397 local_irq_disable();
398
399 __this_cpu_inc(disable_stack_tracer);
400
401 arch_spin_lock(&stack_trace_max_lock);
402
403 if (*pos == 0)
404 return SEQ_START_TOKEN;
405
406 return __next(m, pos);
407 }
408
409 static void t_stop(struct seq_file *m, void *p)
410 {
411 arch_spin_unlock(&stack_trace_max_lock);
412
413 __this_cpu_dec(disable_stack_tracer);
414
415 local_irq_enable();
416 }
417
418 static void trace_lookup_stack(struct seq_file *m, long i)
419 {
420 unsigned long addr = stack_dump_trace[i];
421
422 seq_printf(m, "%pS\n", (void *)addr);
423 }
424
425 static void print_disabled(struct seq_file *m)
426 {
427 seq_puts(m, "#\n"
428 "# Stack tracer disabled\n"
429 "#\n"
430 "# To enable the stack tracer, either add 'stacktrace' to the\n"
431 "# kernel command line\n"
432 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
433 "#\n");
434 }
435
436 static int t_show(struct seq_file *m, void *v)
437 {
438 long i;
439 int size;
440
441 if (v == SEQ_START_TOKEN) {
442 seq_printf(m, " Depth Size Location"
443 " (%d entries)\n"
444 " ----- ---- --------\n",
445 stack_trace_nr_entries);
446
447 if (!stack_tracer_enabled && !stack_trace_max_size)
448 print_disabled(m);
449
450 return 0;
451 }
452
453 i = *(long *)v;
454
455 if (i >= stack_trace_nr_entries)
456 return 0;
457
458 if (i + 1 == stack_trace_nr_entries)
459 size = stack_trace_index[i];
460 else
461 size = stack_trace_index[i] - stack_trace_index[i+1];
462
463 seq_printf(m, "%3ld) %8d %5d ", i, stack_trace_index[i], size);
464
465 trace_lookup_stack(m, i);
466
467 return 0;
468 }
469
470 static const struct seq_operations stack_trace_seq_ops = {
471 .start = t_start,
472 .next = t_next,
473 .stop = t_stop,
474 .show = t_show,
475 };
476
477 static int stack_trace_open(struct inode *inode, struct file *file)
478 {
479 int ret;
480
481 ret = security_locked_down(LOCKDOWN_TRACEFS);
482 if (ret)
483 return ret;
484
485 return seq_open(file, &stack_trace_seq_ops);
486 }
487
488 static const struct file_operations stack_trace_fops = {
489 .open = stack_trace_open,
490 .read = seq_read,
491 .llseek = seq_lseek,
492 .release = seq_release,
493 };
494
495 #ifdef CONFIG_DYNAMIC_FTRACE
496
497 static int
498 stack_trace_filter_open(struct inode *inode, struct file *file)
499 {
500 struct ftrace_ops *ops = inode->i_private;
501
502
503 return ftrace_regex_open(ops, FTRACE_ITER_FILTER,
504 inode, file);
505 }
506
507 static const struct file_operations stack_trace_filter_fops = {
508 .open = stack_trace_filter_open,
509 .read = seq_read,
510 .write = ftrace_filter_write,
511 .llseek = tracing_lseek,
512 .release = ftrace_regex_release,
513 };
514
515 #endif
516
517 int
518 stack_trace_sysctl(struct ctl_table *table, int write,
519 void __user *buffer, size_t *lenp,
520 loff_t *ppos)
521 {
522 int was_enabled;
523 int ret;
524
525 mutex_lock(&stack_sysctl_mutex);
526 was_enabled = !!stack_tracer_enabled;
527
528 ret = proc_dointvec(table, write, buffer, lenp, ppos);
529
530 if (ret || !write || (was_enabled == !!stack_tracer_enabled))
531 goto out;
532
533 if (stack_tracer_enabled)
534 register_ftrace_function(&trace_ops);
535 else
536 unregister_ftrace_function(&trace_ops);
537 out:
538 mutex_unlock(&stack_sysctl_mutex);
539 return ret;
540 }
541
542 static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
543
544 static __init int enable_stacktrace(char *str)
545 {
546 int len;
547
548 if ((len = str_has_prefix(str, "_filter=")))
549 strncpy(stack_trace_filter_buf, str + len, COMMAND_LINE_SIZE);
550
551 stack_tracer_enabled = 1;
552 return 1;
553 }
554 __setup("stacktrace", enable_stacktrace);
555
556 static __init int stack_trace_init(void)
557 {
558 struct dentry *d_tracer;
559
560 d_tracer = tracing_init_dentry();
561 if (IS_ERR(d_tracer))
562 return 0;
563
564 trace_create_file("stack_max_size", 0644, d_tracer,
565 &stack_trace_max_size, &stack_max_size_fops);
566
567 trace_create_file("stack_trace", 0444, d_tracer,
568 NULL, &stack_trace_fops);
569
570 #ifdef CONFIG_DYNAMIC_FTRACE
571 trace_create_file("stack_trace_filter", 0644, d_tracer,
572 &trace_ops, &stack_trace_filter_fops);
573 #endif
574
575 if (stack_trace_filter_buf[0])
576 ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
577
578 if (stack_tracer_enabled)
579 register_ftrace_function(&trace_ops);
580
581 return 0;
582 }
583
584 device_initcall(stack_trace_init);