1/*
2 * mcount and friends -- ftrace stuff
3 *
4 * Copyright (C) 2009-2010 Analog Devices Inc.
5 * Licensed under the GPL-2 or later.
6 */
7
8#include <linux/linkage.h>
9#include <asm/ftrace.h>
10
11.text
12
13#ifdef CONFIG_DYNAMIC_FTRACE
14
15/* Simple stub so we can boot the kernel until runtime patching has
16 * disabled all calls to this.  Then it'll be unused.
17 */
18ENTRY(__mcount)
19# if ANOMALY_05000371
20	nop; nop; nop; nop;
21# endif
22	rts;
23ENDPROC(__mcount)
24
25/* GCC will have called us before setting up the function prologue, so we
26 * can clobber the normal scratch registers, but we need to make sure to
27 * save/restore the registers used for argument passing (R0-R2) in case
28 * the profiled function is using them.  With data registers, R3 is the
29 * only one we can blow away.  With pointer registers, we have P0-P2.
30 *
31 * Upon entry, the RETS will point to the top of the current profiled
32 * function.  And since GCC pushed the previous RETS for us, the previous
33 * function will be waiting there.  mmmm pie.
34 */
35ENTRY(_ftrace_caller)
36	/* save first/second/third function arg and the return register */
37	[--sp] = r2;
38	[--sp] = r0;
39	[--sp] = r1;
40	[--sp] = rets;
41
42	/* function_trace_call(unsigned long ip, unsigned long parent_ip):
43	 *  ip: this point was called by ...
44	 *  parent_ip: ... this function
45	 * the ip itself will need adjusting for the mcount call
46	 */
47	r0 = rets;
48	r1 = [sp + 16];	/* skip the 4 local regs on stack */
49	r0 += -MCOUNT_INSN_SIZE;
50
51.globl _ftrace_call
52_ftrace_call:
53	call _ftrace_stub
54
55# ifdef CONFIG_FUNCTION_GRAPH_TRACER
56.globl _ftrace_graph_call
57_ftrace_graph_call:
58	nop;	/* jump _ftrace_graph_caller; */
59# endif
60
61	/* restore state and get out of dodge */
62.Lfinish_trace:
63	rets = [sp++];
64	r1 = [sp++];
65	r0 = [sp++];
66	r2 = [sp++];
67
68.globl _ftrace_stub
69_ftrace_stub:
70	rts;
71ENDPROC(_ftrace_caller)
72
73#else
74
75/* See documentation for _ftrace_caller */
76ENTRY(__mcount)
77	/* save third function arg early so we can do testing below */
78	[--sp] = r2;
79
80	/* load the function pointer to the tracer */
81	p0.l = _ftrace_trace_function;
82	p0.h = _ftrace_trace_function;
83	r3 = [p0];
84
85	/* optional micro optimization: don't call the stub tracer */
86	r2.l = _ftrace_stub;
87	r2.h = _ftrace_stub;
88	cc = r2 == r3;
89	if ! cc jump .Ldo_trace;
90
91# ifdef CONFIG_FUNCTION_GRAPH_TRACER
92	/* if the ftrace_graph_return function pointer is not set to
93	 * the ftrace_stub entry, call prepare_ftrace_return().
94	 */
95	p0.l = _ftrace_graph_return;
96	p0.h = _ftrace_graph_return;
97	r3 = [p0];
98	cc = r2 == r3;
99	if ! cc jump _ftrace_graph_caller;
100
101	/* similarly, if the ftrace_graph_entry function pointer is not
102	 * set to the ftrace_graph_entry_stub entry, ...
103	 */
104	p0.l = _ftrace_graph_entry;
105	p0.h = _ftrace_graph_entry;
106	r2.l = _ftrace_graph_entry_stub;
107	r2.h = _ftrace_graph_entry_stub;
108	r3 = [p0];
109	cc = r2 == r3;
110	if ! cc jump _ftrace_graph_caller;
111# endif
112
113	r2 = [sp++];
114	rts;
115
116.Ldo_trace:
117
118	/* save first/second function arg and the return register */
119	[--sp] = r0;
120	[--sp] = r1;
121	[--sp] = rets;
122
123	/* setup the tracer function */
124	p0 = r3;
125
126	/* function_trace_call(unsigned long ip, unsigned long parent_ip):
127	 *  ip: this point was called by ...
128	 *  parent_ip: ... this function
129	 * the ip itself will need adjusting for the mcount call
130	 */
131	r0 = rets;
132	r1 = [sp + 16];	/* skip the 4 local regs on stack */
133	r0 += -MCOUNT_INSN_SIZE;
134
135	/* call the tracer */
136	call (p0);
137
138	/* restore state and get out of dodge */
139.Lfinish_trace:
140	rets = [sp++];
141	r1 = [sp++];
142	r0 = [sp++];
143	r2 = [sp++];
144
145.globl _ftrace_stub
146_ftrace_stub:
147	rts;
148ENDPROC(__mcount)
149
150#endif
151
152#ifdef CONFIG_FUNCTION_GRAPH_TRACER
153/* The prepare_ftrace_return() function is similar to the trace function
154 * except it takes a pointer to the location of the frompc.  This is so
155 * the prepare_ftrace_return() can hijack it temporarily for probing
156 * purposes.
157 */
158ENTRY(_ftrace_graph_caller)
159# ifndef CONFIG_DYNAMIC_FTRACE
160	/* save first/second function arg and the return register */
161	[--sp] = r0;
162	[--sp] = r1;
163	[--sp] = rets;
164
165	/* prepare_ftrace_return(parent, self_addr, frame_pointer) */
166	r0 = sp;	/* unsigned long *parent */
167	r1 = rets;	/* unsigned long self_addr */
168# else
169	r0 = sp;	/* unsigned long *parent */
170	r1 = [sp];	/* unsigned long self_addr */
171# endif
172# ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
173	r2 = fp;	/* unsigned long frame_pointer */
174# endif
175	r0 += 16;	/* skip the 4 local regs on stack */
176	r1 += -MCOUNT_INSN_SIZE;
177	call _prepare_ftrace_return;
178
179	jump .Lfinish_trace;
180ENDPROC(_ftrace_graph_caller)
181
182/* Undo the rewrite caused by ftrace_graph_caller().  The common function
183 * ftrace_return_to_handler() will return the original rets so we can
184 * restore it and be on our way.
185 */
186ENTRY(_return_to_handler)
187	/* make sure original return values are saved */
188	[--sp] = p0;
189	[--sp] = r0;
190	[--sp] = r1;
191
192	/* get original return address */
193# ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
194	r0 = fp;	/* Blackfin is sane, so omit this */
195# endif
196	call _ftrace_return_to_handler;
197	rets = r0;
198
199	/* anomaly 05000371 - make sure we have at least three instructions
200	 * between rets setting and the return
201	 */
202	r1 = [sp++];
203	r0 = [sp++];
204	p0 = [sp++];
205	rts;
206ENDPROC(_return_to_handler)
207#endif
208