1/*
2 *
3 * Function graph tracer.
4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7 *
8 */
9#include <linux/uaccess.h>
10#include <linux/ftrace.h>
11#include <linux/slab.h>
12#include <linux/fs.h>
13
14#include "trace.h"
15#include "trace_output.h"
16
17static bool kill_ftrace_graph;
18
19/**
20 * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
21 *
22 * ftrace_graph_stop() is called when a severe error is detected in
23 * the function graph tracing. This function is called by the critical
24 * paths of function graph to keep those paths from doing any more harm.
25 */
26bool ftrace_graph_is_dead(void)
27{
28	return kill_ftrace_graph;
29}
30
31/**
32 * ftrace_graph_stop - set to permanently disable function graph tracincg
33 *
34 * In case of an error int function graph tracing, this is called
35 * to try to keep function graph tracing from causing any more harm.
36 * Usually this is pretty severe and this is called to try to at least
37 * get a warning out to the user.
38 */
39void ftrace_graph_stop(void)
40{
41	kill_ftrace_graph = true;
42}
43
44/* When set, irq functions will be ignored */
45static int ftrace_graph_skip_irqs;
46
47struct fgraph_cpu_data {
48	pid_t		last_pid;
49	int		depth;
50	int		depth_irq;
51	int		ignore;
52	unsigned long	enter_funcs[FTRACE_RETFUNC_DEPTH];
53};
54
55struct fgraph_data {
56	struct fgraph_cpu_data __percpu *cpu_data;
57
58	/* Place to preserve last processed entry. */
59	struct ftrace_graph_ent_entry	ent;
60	struct ftrace_graph_ret_entry	ret;
61	int				failed;
62	int				cpu;
63};
64
65#define TRACE_GRAPH_INDENT	2
66
67static unsigned int max_depth;
68
69static struct tracer_opt trace_opts[] = {
70	/* Display overruns? (for self-debug purpose) */
71	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
72	/* Display CPU ? */
73	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
74	/* Display Overhead ? */
75	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
76	/* Display proc name/pid */
77	{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
78	/* Display duration of execution */
79	{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
80	/* Display absolute time of an entry */
81	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
82	/* Display interrupts */
83	{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
84	/* Display function name after trailing } */
85	{ TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
86	/* Include sleep time (scheduled out) between entry and return */
87	{ TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
88	/* Include time within nested functions */
89	{ TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
90	{ } /* Empty entry */
91};
92
93static struct tracer_flags tracer_flags = {
94	/* Don't display overruns, proc, or tail by default */
95	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
96	       TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
97	       TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
98	.opts = trace_opts
99};
100
101static struct trace_array *graph_array;
102
103/*
104 * DURATION column is being also used to display IRQ signs,
105 * following values are used by print_graph_irq and others
106 * to fill in space into DURATION column.
107 */
108enum {
109	FLAGS_FILL_FULL  = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
110	FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
111	FLAGS_FILL_END   = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
112};
113
114static void
115print_graph_duration(struct trace_array *tr, unsigned long long duration,
116		     struct trace_seq *s, u32 flags);
117
118/* Add a function return address to the trace stack on thread info.*/
119int
120ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
121			 unsigned long frame_pointer)
122{
123	unsigned long long calltime;
124	int index;
125
126	if (unlikely(ftrace_graph_is_dead()))
127		return -EBUSY;
128
129	if (!current->ret_stack)
130		return -EBUSY;
131
132	/*
133	 * We must make sure the ret_stack is tested before we read
134	 * anything else.
135	 */
136	smp_rmb();
137
138	/* The return trace stack is full */
139	if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
140		atomic_inc(&current->trace_overrun);
141		return -EBUSY;
142	}
143
144	/*
145	 * The curr_ret_stack is an index to ftrace return stack of
146	 * current task.  Its value should be in [0, FTRACE_RETFUNC_
147	 * DEPTH) when the function graph tracer is used.  To support
148	 * filtering out specific functions, it makes the index
149	 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
150	 * so when it sees a negative index the ftrace will ignore
151	 * the record.  And the index gets recovered when returning
152	 * from the filtered function by adding the FTRACE_NOTRACE_
153	 * DEPTH and then it'll continue to record functions normally.
154	 *
155	 * The curr_ret_stack is initialized to -1 and get increased
156	 * in this function.  So it can be less than -1 only if it was
157	 * filtered out via ftrace_graph_notrace_addr() which can be
158	 * set from set_graph_notrace file in tracefs by user.
159	 */
160	if (current->curr_ret_stack < -1)
161		return -EBUSY;
162
163	calltime = trace_clock_local();
164
165	index = ++current->curr_ret_stack;
166	if (ftrace_graph_notrace_addr(func))
167		current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
168	barrier();
169	current->ret_stack[index].ret = ret;
170	current->ret_stack[index].func = func;
171	current->ret_stack[index].calltime = calltime;
172	current->ret_stack[index].subtime = 0;
173	current->ret_stack[index].fp = frame_pointer;
174	*depth = current->curr_ret_stack;
175
176	return 0;
177}
178
179/* Retrieve a function return address to the trace stack on thread info.*/
180static void
181ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
182			unsigned long frame_pointer)
183{
184	int index;
185
186	index = current->curr_ret_stack;
187
188	/*
189	 * A negative index here means that it's just returned from a
190	 * notrace'd function.  Recover index to get an original
191	 * return address.  See ftrace_push_return_trace().
192	 *
193	 * TODO: Need to check whether the stack gets corrupted.
194	 */
195	if (index < 0)
196		index += FTRACE_NOTRACE_DEPTH;
197
198	if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
199		ftrace_graph_stop();
200		WARN_ON(1);
201		/* Might as well panic, otherwise we have no where to go */
202		*ret = (unsigned long)panic;
203		return;
204	}
205
206#if defined(CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST) && !defined(CC_USING_FENTRY)
207	/*
208	 * The arch may choose to record the frame pointer used
209	 * and check it here to make sure that it is what we expect it
210	 * to be. If gcc does not set the place holder of the return
211	 * address in the frame pointer, and does a copy instead, then
212	 * the function graph trace will fail. This test detects this
213	 * case.
214	 *
215	 * Currently, x86_32 with optimize for size (-Os) makes the latest
216	 * gcc do the above.
217	 *
218	 * Note, -mfentry does not use frame pointers, and this test
219	 *  is not needed if CC_USING_FENTRY is set.
220	 */
221	if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
222		ftrace_graph_stop();
223		WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
224		     "  from func %ps return to %lx\n",
225		     current->ret_stack[index].fp,
226		     frame_pointer,
227		     (void *)current->ret_stack[index].func,
228		     current->ret_stack[index].ret);
229		*ret = (unsigned long)panic;
230		return;
231	}
232#endif
233
234	*ret = current->ret_stack[index].ret;
235	trace->func = current->ret_stack[index].func;
236	trace->calltime = current->ret_stack[index].calltime;
237	trace->overrun = atomic_read(&current->trace_overrun);
238	trace->depth = index;
239}
240
241/*
242 * Send the trace to the ring-buffer.
243 * @return the original return address.
244 */
245unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
246{
247	struct ftrace_graph_ret trace;
248	unsigned long ret;
249
250	ftrace_pop_return_trace(&trace, &ret, frame_pointer);
251	trace.rettime = trace_clock_local();
252	barrier();
253	current->curr_ret_stack--;
254	/*
255	 * The curr_ret_stack can be less than -1 only if it was
256	 * filtered out and it's about to return from the function.
257	 * Recover the index and continue to trace normal functions.
258	 */
259	if (current->curr_ret_stack < -1) {
260		current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
261		return ret;
262	}
263
264	/*
265	 * The trace should run after decrementing the ret counter
266	 * in case an interrupt were to come in. We don't want to
267	 * lose the interrupt if max_depth is set.
268	 */
269	ftrace_graph_return(&trace);
270
271	if (unlikely(!ret)) {
272		ftrace_graph_stop();
273		WARN_ON(1);
274		/* Might as well panic. What else to do? */
275		ret = (unsigned long)panic;
276	}
277
278	return ret;
279}
280
281int __trace_graph_entry(struct trace_array *tr,
282				struct ftrace_graph_ent *trace,
283				unsigned long flags,
284				int pc)
285{
286	struct trace_event_call *call = &event_funcgraph_entry;
287	struct ring_buffer_event *event;
288	struct ring_buffer *buffer = tr->trace_buffer.buffer;
289	struct ftrace_graph_ent_entry *entry;
290
291	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
292					  sizeof(*entry), flags, pc);
293	if (!event)
294		return 0;
295	entry	= ring_buffer_event_data(event);
296	entry->graph_ent			= *trace;
297	if (!call_filter_check_discard(call, entry, buffer, event))
298		__buffer_unlock_commit(buffer, event);
299
300	return 1;
301}
302
303static inline int ftrace_graph_ignore_irqs(void)
304{
305	if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
306		return 0;
307
308	return in_irq();
309}
310
311int trace_graph_entry(struct ftrace_graph_ent *trace)
312{
313	struct trace_array *tr = graph_array;
314	struct trace_array_cpu *data;
315	unsigned long flags;
316	long disabled;
317	int ret;
318	int cpu;
319	int pc;
320
321	if (!ftrace_trace_task(current))
322		return 0;
323
324	/* trace it when it is-nested-in or is a function enabled. */
325	if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
326	     ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
327	    (max_depth && trace->depth >= max_depth))
328		return 0;
329
330	/*
331	 * Do not trace a function if it's filtered by set_graph_notrace.
332	 * Make the index of ret stack negative to indicate that it should
333	 * ignore further functions.  But it needs its own ret stack entry
334	 * to recover the original index in order to continue tracing after
335	 * returning from the function.
336	 */
337	if (ftrace_graph_notrace_addr(trace->func))
338		return 1;
339
340	local_irq_save(flags);
341	cpu = raw_smp_processor_id();
342	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
343	disabled = atomic_inc_return(&data->disabled);
344	if (likely(disabled == 1)) {
345		pc = preempt_count();
346		ret = __trace_graph_entry(tr, trace, flags, pc);
347	} else {
348		ret = 0;
349	}
350
351	atomic_dec(&data->disabled);
352	local_irq_restore(flags);
353
354	return ret;
355}
356
357static int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
358{
359	if (tracing_thresh)
360		return 1;
361	else
362		return trace_graph_entry(trace);
363}
364
365static void
366__trace_graph_function(struct trace_array *tr,
367		unsigned long ip, unsigned long flags, int pc)
368{
369	u64 time = trace_clock_local();
370	struct ftrace_graph_ent ent = {
371		.func  = ip,
372		.depth = 0,
373	};
374	struct ftrace_graph_ret ret = {
375		.func     = ip,
376		.depth    = 0,
377		.calltime = time,
378		.rettime  = time,
379	};
380
381	__trace_graph_entry(tr, &ent, flags, pc);
382	__trace_graph_return(tr, &ret, flags, pc);
383}
384
385void
386trace_graph_function(struct trace_array *tr,
387		unsigned long ip, unsigned long parent_ip,
388		unsigned long flags, int pc)
389{
390	__trace_graph_function(tr, ip, flags, pc);
391}
392
393void __trace_graph_return(struct trace_array *tr,
394				struct ftrace_graph_ret *trace,
395				unsigned long flags,
396				int pc)
397{
398	struct trace_event_call *call = &event_funcgraph_exit;
399	struct ring_buffer_event *event;
400	struct ring_buffer *buffer = tr->trace_buffer.buffer;
401	struct ftrace_graph_ret_entry *entry;
402
403	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
404					  sizeof(*entry), flags, pc);
405	if (!event)
406		return;
407	entry	= ring_buffer_event_data(event);
408	entry->ret				= *trace;
409	if (!call_filter_check_discard(call, entry, buffer, event))
410		__buffer_unlock_commit(buffer, event);
411}
412
413void trace_graph_return(struct ftrace_graph_ret *trace)
414{
415	struct trace_array *tr = graph_array;
416	struct trace_array_cpu *data;
417	unsigned long flags;
418	long disabled;
419	int cpu;
420	int pc;
421
422	local_irq_save(flags);
423	cpu = raw_smp_processor_id();
424	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
425	disabled = atomic_inc_return(&data->disabled);
426	if (likely(disabled == 1)) {
427		pc = preempt_count();
428		__trace_graph_return(tr, trace, flags, pc);
429	}
430	atomic_dec(&data->disabled);
431	local_irq_restore(flags);
432}
433
434void set_graph_array(struct trace_array *tr)
435{
436	graph_array = tr;
437
438	/* Make graph_array visible before we start tracing */
439
440	smp_mb();
441}
442
443static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
444{
445	if (tracing_thresh &&
446	    (trace->rettime - trace->calltime < tracing_thresh))
447		return;
448	else
449		trace_graph_return(trace);
450}
451
452static int graph_trace_init(struct trace_array *tr)
453{
454	int ret;
455
456	set_graph_array(tr);
457	if (tracing_thresh)
458		ret = register_ftrace_graph(&trace_graph_thresh_return,
459					    &trace_graph_thresh_entry);
460	else
461		ret = register_ftrace_graph(&trace_graph_return,
462					    &trace_graph_entry);
463	if (ret)
464		return ret;
465	tracing_start_cmdline_record();
466
467	return 0;
468}
469
470static void graph_trace_reset(struct trace_array *tr)
471{
472	tracing_stop_cmdline_record();
473	unregister_ftrace_graph();
474}
475
476static int graph_trace_update_thresh(struct trace_array *tr)
477{
478	graph_trace_reset(tr);
479	return graph_trace_init(tr);
480}
481
482static int max_bytes_for_cpu;
483
484static void print_graph_cpu(struct trace_seq *s, int cpu)
485{
486	/*
487	 * Start with a space character - to make it stand out
488	 * to the right a bit when trace output is pasted into
489	 * email:
490	 */
491	trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
492}
493
494#define TRACE_GRAPH_PROCINFO_LENGTH	14
495
496static void print_graph_proc(struct trace_seq *s, pid_t pid)
497{
498	char comm[TASK_COMM_LEN];
499	/* sign + log10(MAX_INT) + '\0' */
500	char pid_str[11];
501	int spaces = 0;
502	int len;
503	int i;
504
505	trace_find_cmdline(pid, comm);
506	comm[7] = '\0';
507	sprintf(pid_str, "%d", pid);
508
509	/* 1 stands for the "-" character */
510	len = strlen(comm) + strlen(pid_str) + 1;
511
512	if (len < TRACE_GRAPH_PROCINFO_LENGTH)
513		spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
514
515	/* First spaces to align center */
516	for (i = 0; i < spaces / 2; i++)
517		trace_seq_putc(s, ' ');
518
519	trace_seq_printf(s, "%s-%s", comm, pid_str);
520
521	/* Last spaces to align center */
522	for (i = 0; i < spaces - (spaces / 2); i++)
523		trace_seq_putc(s, ' ');
524}
525
526
527static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
528{
529	trace_seq_putc(s, ' ');
530	trace_print_lat_fmt(s, entry);
531}
532
533/* If the pid changed since the last trace, output this event */
534static void
535verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
536{
537	pid_t prev_pid;
538	pid_t *last_pid;
539
540	if (!data)
541		return;
542
543	last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
544
545	if (*last_pid == pid)
546		return;
547
548	prev_pid = *last_pid;
549	*last_pid = pid;
550
551	if (prev_pid == -1)
552		return;
553/*
554 * Context-switch trace line:
555
556 ------------------------------------------
557 | 1)  migration/0--1  =>  sshd-1755
558 ------------------------------------------
559
560 */
561	trace_seq_puts(s, " ------------------------------------------\n");
562	print_graph_cpu(s, cpu);
563	print_graph_proc(s, prev_pid);
564	trace_seq_puts(s, " => ");
565	print_graph_proc(s, pid);
566	trace_seq_puts(s, "\n ------------------------------------------\n\n");
567}
568
569static struct ftrace_graph_ret_entry *
570get_return_for_leaf(struct trace_iterator *iter,
571		struct ftrace_graph_ent_entry *curr)
572{
573	struct fgraph_data *data = iter->private;
574	struct ring_buffer_iter *ring_iter = NULL;
575	struct ring_buffer_event *event;
576	struct ftrace_graph_ret_entry *next;
577
578	/*
579	 * If the previous output failed to write to the seq buffer,
580	 * then we just reuse the data from before.
581	 */
582	if (data && data->failed) {
583		curr = &data->ent;
584		next = &data->ret;
585	} else {
586
587		ring_iter = trace_buffer_iter(iter, iter->cpu);
588
589		/* First peek to compare current entry and the next one */
590		if (ring_iter)
591			event = ring_buffer_iter_peek(ring_iter, NULL);
592		else {
593			/*
594			 * We need to consume the current entry to see
595			 * the next one.
596			 */
597			ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
598					    NULL, NULL);
599			event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
600						 NULL, NULL);
601		}
602
603		if (!event)
604			return NULL;
605
606		next = ring_buffer_event_data(event);
607
608		if (data) {
609			/*
610			 * Save current and next entries for later reference
611			 * if the output fails.
612			 */
613			data->ent = *curr;
614			/*
615			 * If the next event is not a return type, then
616			 * we only care about what type it is. Otherwise we can
617			 * safely copy the entire event.
618			 */
619			if (next->ent.type == TRACE_GRAPH_RET)
620				data->ret = *next;
621			else
622				data->ret.ent.type = next->ent.type;
623		}
624	}
625
626	if (next->ent.type != TRACE_GRAPH_RET)
627		return NULL;
628
629	if (curr->ent.pid != next->ent.pid ||
630			curr->graph_ent.func != next->ret.func)
631		return NULL;
632
633	/* this is a leaf, now advance the iterator */
634	if (ring_iter)
635		ring_buffer_read(ring_iter, NULL);
636
637	return next;
638}
639
640static void print_graph_abs_time(u64 t, struct trace_seq *s)
641{
642	unsigned long usecs_rem;
643
644	usecs_rem = do_div(t, NSEC_PER_SEC);
645	usecs_rem /= 1000;
646
647	trace_seq_printf(s, "%5lu.%06lu |  ",
648			 (unsigned long)t, usecs_rem);
649}
650
651static void
652print_graph_irq(struct trace_iterator *iter, unsigned long addr,
653		enum trace_type type, int cpu, pid_t pid, u32 flags)
654{
655	struct trace_array *tr = iter->tr;
656	struct trace_seq *s = &iter->seq;
657	struct trace_entry *ent = iter->ent;
658
659	if (addr < (unsigned long)__irqentry_text_start ||
660		addr >= (unsigned long)__irqentry_text_end)
661		return;
662
663	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
664		/* Absolute time */
665		if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
666			print_graph_abs_time(iter->ts, s);
667
668		/* Cpu */
669		if (flags & TRACE_GRAPH_PRINT_CPU)
670			print_graph_cpu(s, cpu);
671
672		/* Proc */
673		if (flags & TRACE_GRAPH_PRINT_PROC) {
674			print_graph_proc(s, pid);
675			trace_seq_puts(s, " | ");
676		}
677
678		/* Latency format */
679		if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
680			print_graph_lat_fmt(s, ent);
681	}
682
683	/* No overhead */
684	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
685
686	if (type == TRACE_GRAPH_ENT)
687		trace_seq_puts(s, "==========>");
688	else
689		trace_seq_puts(s, "<==========");
690
691	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
692	trace_seq_putc(s, '\n');
693}
694
695void
696trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
697{
698	unsigned long nsecs_rem = do_div(duration, 1000);
699	/* log10(ULONG_MAX) + '\0' */
700	char usecs_str[21];
701	char nsecs_str[5];
702	int len;
703	int i;
704
705	sprintf(usecs_str, "%lu", (unsigned long) duration);
706
707	/* Print msecs */
708	trace_seq_printf(s, "%s", usecs_str);
709
710	len = strlen(usecs_str);
711
712	/* Print nsecs (we don't want to exceed 7 numbers) */
713	if (len < 7) {
714		size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
715
716		snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
717		trace_seq_printf(s, ".%s", nsecs_str);
718		len += strlen(nsecs_str) + 1;
719	}
720
721	trace_seq_puts(s, " us ");
722
723	/* Print remaining spaces to fit the row's width */
724	for (i = len; i < 8; i++)
725		trace_seq_putc(s, ' ');
726}
727
728static void
729print_graph_duration(struct trace_array *tr, unsigned long long duration,
730		     struct trace_seq *s, u32 flags)
731{
732	if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
733	    !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
734		return;
735
736	/* No real adata, just filling the column with spaces */
737	switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
738	case FLAGS_FILL_FULL:
739		trace_seq_puts(s, "              |  ");
740		return;
741	case FLAGS_FILL_START:
742		trace_seq_puts(s, "  ");
743		return;
744	case FLAGS_FILL_END:
745		trace_seq_puts(s, " |");
746		return;
747	}
748
749	/* Signal a overhead of time execution to the output */
750	if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
751		trace_seq_printf(s, "%c ", trace_find_mark(duration));
752	else
753		trace_seq_puts(s, "  ");
754
755	trace_print_graph_duration(duration, s);
756	trace_seq_puts(s, "|  ");
757}
758
759/* Case of a leaf function on its call entry */
760static enum print_line_t
761print_graph_entry_leaf(struct trace_iterator *iter,
762		struct ftrace_graph_ent_entry *entry,
763		struct ftrace_graph_ret_entry *ret_entry,
764		struct trace_seq *s, u32 flags)
765{
766	struct fgraph_data *data = iter->private;
767	struct trace_array *tr = iter->tr;
768	struct ftrace_graph_ret *graph_ret;
769	struct ftrace_graph_ent *call;
770	unsigned long long duration;
771	int i;
772
773	graph_ret = &ret_entry->ret;
774	call = &entry->graph_ent;
775	duration = graph_ret->rettime - graph_ret->calltime;
776
777	if (data) {
778		struct fgraph_cpu_data *cpu_data;
779		int cpu = iter->cpu;
780
781		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
782
783		/*
784		 * Comments display at + 1 to depth. Since
785		 * this is a leaf function, keep the comments
786		 * equal to this depth.
787		 */
788		cpu_data->depth = call->depth - 1;
789
790		/* No need to keep this function around for this depth */
791		if (call->depth < FTRACE_RETFUNC_DEPTH)
792			cpu_data->enter_funcs[call->depth] = 0;
793	}
794
795	/* Overhead and duration */
796	print_graph_duration(tr, duration, s, flags);
797
798	/* Function */
799	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
800		trace_seq_putc(s, ' ');
801
802	trace_seq_printf(s, "%ps();\n", (void *)call->func);
803
804	return trace_handle_return(s);
805}
806
807static enum print_line_t
808print_graph_entry_nested(struct trace_iterator *iter,
809			 struct ftrace_graph_ent_entry *entry,
810			 struct trace_seq *s, int cpu, u32 flags)
811{
812	struct ftrace_graph_ent *call = &entry->graph_ent;
813	struct fgraph_data *data = iter->private;
814	struct trace_array *tr = iter->tr;
815	int i;
816
817	if (data) {
818		struct fgraph_cpu_data *cpu_data;
819		int cpu = iter->cpu;
820
821		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
822		cpu_data->depth = call->depth;
823
824		/* Save this function pointer to see if the exit matches */
825		if (call->depth < FTRACE_RETFUNC_DEPTH)
826			cpu_data->enter_funcs[call->depth] = call->func;
827	}
828
829	/* No time */
830	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
831
832	/* Function */
833	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
834		trace_seq_putc(s, ' ');
835
836	trace_seq_printf(s, "%ps() {\n", (void *)call->func);
837
838	if (trace_seq_has_overflowed(s))
839		return TRACE_TYPE_PARTIAL_LINE;
840
841	/*
842	 * we already consumed the current entry to check the next one
843	 * and see if this is a leaf.
844	 */
845	return TRACE_TYPE_NO_CONSUME;
846}
847
848static void
849print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
850		     int type, unsigned long addr, u32 flags)
851{
852	struct fgraph_data *data = iter->private;
853	struct trace_entry *ent = iter->ent;
854	struct trace_array *tr = iter->tr;
855	int cpu = iter->cpu;
856
857	/* Pid */
858	verif_pid(s, ent->pid, cpu, data);
859
860	if (type)
861		/* Interrupt */
862		print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
863
864	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
865		return;
866
867	/* Absolute time */
868	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
869		print_graph_abs_time(iter->ts, s);
870
871	/* Cpu */
872	if (flags & TRACE_GRAPH_PRINT_CPU)
873		print_graph_cpu(s, cpu);
874
875	/* Proc */
876	if (flags & TRACE_GRAPH_PRINT_PROC) {
877		print_graph_proc(s, ent->pid);
878		trace_seq_puts(s, " | ");
879	}
880
881	/* Latency format */
882	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
883		print_graph_lat_fmt(s, ent);
884
885	return;
886}
887
888/*
889 * Entry check for irq code
890 *
891 * returns 1 if
892 *  - we are inside irq code
893 *  - we just entered irq code
894 *
895 * retunns 0 if
896 *  - funcgraph-interrupts option is set
897 *  - we are not inside irq code
898 */
899static int
900check_irq_entry(struct trace_iterator *iter, u32 flags,
901		unsigned long addr, int depth)
902{
903	int cpu = iter->cpu;
904	int *depth_irq;
905	struct fgraph_data *data = iter->private;
906
907	/*
908	 * If we are either displaying irqs, or we got called as
909	 * a graph event and private data does not exist,
910	 * then we bypass the irq check.
911	 */
912	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
913	    (!data))
914		return 0;
915
916	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
917
918	/*
919	 * We are inside the irq code
920	 */
921	if (*depth_irq >= 0)
922		return 1;
923
924	if ((addr < (unsigned long)__irqentry_text_start) ||
925	    (addr >= (unsigned long)__irqentry_text_end))
926		return 0;
927
928	/*
929	 * We are entering irq code.
930	 */
931	*depth_irq = depth;
932	return 1;
933}
934
935/*
936 * Return check for irq code
937 *
938 * returns 1 if
939 *  - we are inside irq code
940 *  - we just left irq code
941 *
942 * returns 0 if
943 *  - funcgraph-interrupts option is set
944 *  - we are not inside irq code
945 */
946static int
947check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
948{
949	int cpu = iter->cpu;
950	int *depth_irq;
951	struct fgraph_data *data = iter->private;
952
953	/*
954	 * If we are either displaying irqs, or we got called as
955	 * a graph event and private data does not exist,
956	 * then we bypass the irq check.
957	 */
958	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
959	    (!data))
960		return 0;
961
962	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
963
964	/*
965	 * We are not inside the irq code.
966	 */
967	if (*depth_irq == -1)
968		return 0;
969
970	/*
971	 * We are inside the irq code, and this is returning entry.
972	 * Let's not trace it and clear the entry depth, since
973	 * we are out of irq code.
974	 *
975	 * This condition ensures that we 'leave the irq code' once
976	 * we are out of the entry depth. Thus protecting us from
977	 * the RETURN entry loss.
978	 */
979	if (*depth_irq >= depth) {
980		*depth_irq = -1;
981		return 1;
982	}
983
984	/*
985	 * We are inside the irq code, and this is not the entry.
986	 */
987	return 1;
988}
989
990static enum print_line_t
991print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
992			struct trace_iterator *iter, u32 flags)
993{
994	struct fgraph_data *data = iter->private;
995	struct ftrace_graph_ent *call = &field->graph_ent;
996	struct ftrace_graph_ret_entry *leaf_ret;
997	static enum print_line_t ret;
998	int cpu = iter->cpu;
999
1000	if (check_irq_entry(iter, flags, call->func, call->depth))
1001		return TRACE_TYPE_HANDLED;
1002
1003	print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
1004
1005	leaf_ret = get_return_for_leaf(iter, field);
1006	if (leaf_ret)
1007		ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1008	else
1009		ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1010
1011	if (data) {
1012		/*
1013		 * If we failed to write our output, then we need to make
1014		 * note of it. Because we already consumed our entry.
1015		 */
1016		if (s->full) {
1017			data->failed = 1;
1018			data->cpu = cpu;
1019		} else
1020			data->failed = 0;
1021	}
1022
1023	return ret;
1024}
1025
1026static enum print_line_t
1027print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1028		   struct trace_entry *ent, struct trace_iterator *iter,
1029		   u32 flags)
1030{
1031	unsigned long long duration = trace->rettime - trace->calltime;
1032	struct fgraph_data *data = iter->private;
1033	struct trace_array *tr = iter->tr;
1034	pid_t pid = ent->pid;
1035	int cpu = iter->cpu;
1036	int func_match = 1;
1037	int i;
1038
1039	if (check_irq_return(iter, flags, trace->depth))
1040		return TRACE_TYPE_HANDLED;
1041
1042	if (data) {
1043		struct fgraph_cpu_data *cpu_data;
1044		int cpu = iter->cpu;
1045
1046		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1047
1048		/*
1049		 * Comments display at + 1 to depth. This is the
1050		 * return from a function, we now want the comments
1051		 * to display at the same level of the bracket.
1052		 */
1053		cpu_data->depth = trace->depth - 1;
1054
1055		if (trace->depth < FTRACE_RETFUNC_DEPTH) {
1056			if (cpu_data->enter_funcs[trace->depth] != trace->func)
1057				func_match = 0;
1058			cpu_data->enter_funcs[trace->depth] = 0;
1059		}
1060	}
1061
1062	print_graph_prologue(iter, s, 0, 0, flags);
1063
1064	/* Overhead and duration */
1065	print_graph_duration(tr, duration, s, flags);
1066
1067	/* Closing brace */
1068	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
1069		trace_seq_putc(s, ' ');
1070
1071	/*
1072	 * If the return function does not have a matching entry,
1073	 * then the entry was lost. Instead of just printing
1074	 * the '}' and letting the user guess what function this
1075	 * belongs to, write out the function name. Always do
1076	 * that if the funcgraph-tail option is enabled.
1077	 */
1078	if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
1079		trace_seq_puts(s, "}\n");
1080	else
1081		trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1082
1083	/* Overrun */
1084	if (flags & TRACE_GRAPH_PRINT_OVERRUN)
1085		trace_seq_printf(s, " (Overruns: %lu)\n",
1086				 trace->overrun);
1087
1088	print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1089			cpu, pid, flags);
1090
1091	return trace_handle_return(s);
1092}
1093
1094static enum print_line_t
1095print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1096		    struct trace_iterator *iter, u32 flags)
1097{
1098	struct trace_array *tr = iter->tr;
1099	unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
1100	struct fgraph_data *data = iter->private;
1101	struct trace_event *event;
1102	int depth = 0;
1103	int ret;
1104	int i;
1105
1106	if (data)
1107		depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1108
1109	print_graph_prologue(iter, s, 0, 0, flags);
1110
1111	/* No time */
1112	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
1113
1114	/* Indentation */
1115	if (depth > 0)
1116		for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
1117			trace_seq_putc(s, ' ');
1118
1119	/* The comment */
1120	trace_seq_puts(s, "/* ");
1121
1122	switch (iter->ent->type) {
1123	case TRACE_BPRINT:
1124		ret = trace_print_bprintk_msg_only(iter);
1125		if (ret != TRACE_TYPE_HANDLED)
1126			return ret;
1127		break;
1128	case TRACE_PRINT:
1129		ret = trace_print_printk_msg_only(iter);
1130		if (ret != TRACE_TYPE_HANDLED)
1131			return ret;
1132		break;
1133	default:
1134		event = ftrace_find_event(ent->type);
1135		if (!event)
1136			return TRACE_TYPE_UNHANDLED;
1137
1138		ret = event->funcs->trace(iter, sym_flags, event);
1139		if (ret != TRACE_TYPE_HANDLED)
1140			return ret;
1141	}
1142
1143	if (trace_seq_has_overflowed(s))
1144		goto out;
1145
1146	/* Strip ending newline */
1147	if (s->buffer[s->seq.len - 1] == '\n') {
1148		s->buffer[s->seq.len - 1] = '\0';
1149		s->seq.len--;
1150	}
1151
1152	trace_seq_puts(s, " */\n");
1153 out:
1154	return trace_handle_return(s);
1155}
1156
1157
1158enum print_line_t
1159print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1160{
1161	struct ftrace_graph_ent_entry *field;
1162	struct fgraph_data *data = iter->private;
1163	struct trace_entry *entry = iter->ent;
1164	struct trace_seq *s = &iter->seq;
1165	int cpu = iter->cpu;
1166	int ret;
1167
1168	if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1169		per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1170		return TRACE_TYPE_HANDLED;
1171	}
1172
1173	/*
1174	 * If the last output failed, there's a possibility we need
1175	 * to print out the missing entry which would never go out.
1176	 */
1177	if (data && data->failed) {
1178		field = &data->ent;
1179		iter->cpu = data->cpu;
1180		ret = print_graph_entry(field, s, iter, flags);
1181		if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1182			per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1183			ret = TRACE_TYPE_NO_CONSUME;
1184		}
1185		iter->cpu = cpu;
1186		return ret;
1187	}
1188
1189	switch (entry->type) {
1190	case TRACE_GRAPH_ENT: {
1191		/*
1192		 * print_graph_entry() may consume the current event,
1193		 * thus @field may become invalid, so we need to save it.
1194		 * sizeof(struct ftrace_graph_ent_entry) is very small,
1195		 * it can be safely saved at the stack.
1196		 */
1197		struct ftrace_graph_ent_entry saved;
1198		trace_assign_type(field, entry);
1199		saved = *field;
1200		return print_graph_entry(&saved, s, iter, flags);
1201	}
1202	case TRACE_GRAPH_RET: {
1203		struct ftrace_graph_ret_entry *field;
1204		trace_assign_type(field, entry);
1205		return print_graph_return(&field->ret, s, entry, iter, flags);
1206	}
1207	case TRACE_STACK:
1208	case TRACE_FN:
1209		/* dont trace stack and functions as comments */
1210		return TRACE_TYPE_UNHANDLED;
1211
1212	default:
1213		return print_graph_comment(s, entry, iter, flags);
1214	}
1215
1216	return TRACE_TYPE_HANDLED;
1217}
1218
1219static enum print_line_t
1220print_graph_function(struct trace_iterator *iter)
1221{
1222	return print_graph_function_flags(iter, tracer_flags.val);
1223}
1224
1225static enum print_line_t
1226print_graph_function_event(struct trace_iterator *iter, int flags,
1227			   struct trace_event *event)
1228{
1229	return print_graph_function(iter);
1230}
1231
1232static void print_lat_header(struct seq_file *s, u32 flags)
1233{
1234	static const char spaces[] = "                "	/* 16 spaces */
1235		"    "					/* 4 spaces */
1236		"                 ";			/* 17 spaces */
1237	int size = 0;
1238
1239	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1240		size += 16;
1241	if (flags & TRACE_GRAPH_PRINT_CPU)
1242		size += 4;
1243	if (flags & TRACE_GRAPH_PRINT_PROC)
1244		size += 17;
1245
1246	seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
1247	seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
1248	seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1249	seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1250	seq_printf(s, "#%.*s||| /                      \n", size, spaces);
1251}
1252
1253static void __print_graph_headers_flags(struct trace_array *tr,
1254					struct seq_file *s, u32 flags)
1255{
1256	int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1257
1258	if (lat)
1259		print_lat_header(s, flags);
1260
1261	/* 1st line */
1262	seq_putc(s, '#');
1263	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1264		seq_puts(s, "     TIME       ");
1265	if (flags & TRACE_GRAPH_PRINT_CPU)
1266		seq_puts(s, " CPU");
1267	if (flags & TRACE_GRAPH_PRINT_PROC)
1268		seq_puts(s, "  TASK/PID       ");
1269	if (lat)
1270		seq_puts(s, "||||");
1271	if (flags & TRACE_GRAPH_PRINT_DURATION)
1272		seq_puts(s, "  DURATION   ");
1273	seq_puts(s, "               FUNCTION CALLS\n");
1274
1275	/* 2nd line */
1276	seq_putc(s, '#');
1277	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1278		seq_puts(s, "      |         ");
1279	if (flags & TRACE_GRAPH_PRINT_CPU)
1280		seq_puts(s, " |  ");
1281	if (flags & TRACE_GRAPH_PRINT_PROC)
1282		seq_puts(s, "   |    |        ");
1283	if (lat)
1284		seq_puts(s, "||||");
1285	if (flags & TRACE_GRAPH_PRINT_DURATION)
1286		seq_puts(s, "   |   |      ");
1287	seq_puts(s, "               |   |   |   |\n");
1288}
1289
1290static void print_graph_headers(struct seq_file *s)
1291{
1292	print_graph_headers_flags(s, tracer_flags.val);
1293}
1294
1295void print_graph_headers_flags(struct seq_file *s, u32 flags)
1296{
1297	struct trace_iterator *iter = s->private;
1298	struct trace_array *tr = iter->tr;
1299
1300	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1301		return;
1302
1303	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1304		/* print nothing if the buffers are empty */
1305		if (trace_empty(iter))
1306			return;
1307
1308		print_trace_header(s, iter);
1309	}
1310
1311	__print_graph_headers_flags(tr, s, flags);
1312}
1313
1314void graph_trace_open(struct trace_iterator *iter)
1315{
1316	/* pid and depth on the last trace processed */
1317	struct fgraph_data *data;
1318	gfp_t gfpflags;
1319	int cpu;
1320
1321	iter->private = NULL;
1322
1323	/* We can be called in atomic context via ftrace_dump() */
1324	gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1325
1326	data = kzalloc(sizeof(*data), gfpflags);
1327	if (!data)
1328		goto out_err;
1329
1330	data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1331	if (!data->cpu_data)
1332		goto out_err_free;
1333
1334	for_each_possible_cpu(cpu) {
1335		pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1336		int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1337		int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1338		int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1339
1340		*pid = -1;
1341		*depth = 0;
1342		*ignore = 0;
1343		*depth_irq = -1;
1344	}
1345
1346	iter->private = data;
1347
1348	return;
1349
1350 out_err_free:
1351	kfree(data);
1352 out_err:
1353	pr_warning("function graph tracer: not enough memory\n");
1354}
1355
1356void graph_trace_close(struct trace_iterator *iter)
1357{
1358	struct fgraph_data *data = iter->private;
1359
1360	if (data) {
1361		free_percpu(data->cpu_data);
1362		kfree(data);
1363	}
1364}
1365
1366static int
1367func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1368{
1369	if (bit == TRACE_GRAPH_PRINT_IRQS)
1370		ftrace_graph_skip_irqs = !set;
1371
1372	if (bit == TRACE_GRAPH_SLEEP_TIME)
1373		ftrace_graph_sleep_time_control(set);
1374
1375	if (bit == TRACE_GRAPH_GRAPH_TIME)
1376		ftrace_graph_graph_time_control(set);
1377
1378	return 0;
1379}
1380
1381static struct trace_event_functions graph_functions = {
1382	.trace		= print_graph_function_event,
1383};
1384
1385static struct trace_event graph_trace_entry_event = {
1386	.type		= TRACE_GRAPH_ENT,
1387	.funcs		= &graph_functions,
1388};
1389
1390static struct trace_event graph_trace_ret_event = {
1391	.type		= TRACE_GRAPH_RET,
1392	.funcs		= &graph_functions
1393};
1394
1395static struct tracer graph_trace __tracer_data = {
1396	.name		= "function_graph",
1397	.update_thresh	= graph_trace_update_thresh,
1398	.open		= graph_trace_open,
1399	.pipe_open	= graph_trace_open,
1400	.close		= graph_trace_close,
1401	.pipe_close	= graph_trace_close,
1402	.init		= graph_trace_init,
1403	.reset		= graph_trace_reset,
1404	.print_line	= print_graph_function,
1405	.print_header	= print_graph_headers,
1406	.flags		= &tracer_flags,
1407	.set_flag	= func_graph_set_flag,
1408#ifdef CONFIG_FTRACE_SELFTEST
1409	.selftest	= trace_selftest_startup_function_graph,
1410#endif
1411};
1412
1413
1414static ssize_t
1415graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1416		  loff_t *ppos)
1417{
1418	unsigned long val;
1419	int ret;
1420
1421	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1422	if (ret)
1423		return ret;
1424
1425	max_depth = val;
1426
1427	*ppos += cnt;
1428
1429	return cnt;
1430}
1431
1432static ssize_t
1433graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1434		 loff_t *ppos)
1435{
1436	char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1437	int n;
1438
1439	n = sprintf(buf, "%d\n", max_depth);
1440
1441	return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1442}
1443
1444static const struct file_operations graph_depth_fops = {
1445	.open		= tracing_open_generic,
1446	.write		= graph_depth_write,
1447	.read		= graph_depth_read,
1448	.llseek		= generic_file_llseek,
1449};
1450
1451static __init int init_graph_tracefs(void)
1452{
1453	struct dentry *d_tracer;
1454
1455	d_tracer = tracing_init_dentry();
1456	if (IS_ERR(d_tracer))
1457		return 0;
1458
1459	trace_create_file("max_graph_depth", 0644, d_tracer,
1460			  NULL, &graph_depth_fops);
1461
1462	return 0;
1463}
1464fs_initcall(init_graph_tracefs);
1465
1466static __init int init_graph_trace(void)
1467{
1468	max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1469
1470	if (!register_trace_event(&graph_trace_entry_event)) {
1471		pr_warning("Warning: could not register graph trace events\n");
1472		return 1;
1473	}
1474
1475	if (!register_trace_event(&graph_trace_ret_event)) {
1476		pr_warning("Warning: could not register graph trace events\n");
1477		return 1;
1478	}
1479
1480	return register_tracer(&graph_trace);
1481}
1482
1483core_initcall(init_graph_trace);
1484