This source file includes following definitions.
- ftrace_nop_replace
- ftrace_call_replace
- clear_mod_flag
- ftrace_mod_code
- arch_ftrace_nmi_enter
- arch_ftrace_nmi_exit
- wait_for_nmi_and_set_mod_flag
- wait_for_nmi
- do_ftrace_mod_code
- ftrace_modify_code
- ftrace_update_ftrace_func
- ftrace_make_nop
- ftrace_make_call
- ftrace_dyn_arch_init
- ftrace_mod
- ftrace_enable_ftrace_graph_caller
- ftrace_disable_ftrace_graph_caller
- prepare_ftrace_return
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15 #include <linux/uaccess.h>
16 #include <linux/ftrace.h>
17 #include <linux/string.h>
18 #include <linux/init.h>
19 #include <linux/io.h>
20 #include <linux/kernel.h>
21 #include <asm/ftrace.h>
22 #include <asm/cacheflush.h>
23 #include <asm/unistd.h>
24 #include <trace/syscall.h>
25
26 #ifdef CONFIG_DYNAMIC_FTRACE
27 static unsigned char ftrace_replaced_code[MCOUNT_INSN_SIZE];
28
29 static unsigned char ftrace_nop[4];
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48 static unsigned char *ftrace_nop_replace(unsigned long ip)
49 {
50 __raw_writel(ip + MCOUNT_INSN_SIZE, ftrace_nop);
51 return ftrace_nop;
52 }
53
54 static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
55 {
56
57 __raw_writel(addr, ftrace_replaced_code);
58
59
60
61
62
63 return ftrace_replaced_code;
64 }
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94 #define MOD_CODE_WRITE_FLAG (1 << 31)
95 static atomic_t nmi_running = ATOMIC_INIT(0);
96 static int mod_code_status;
97 static void *mod_code_ip;
98 static void *mod_code_newcode;
99
100 static void clear_mod_flag(void)
101 {
102 int old = atomic_read(&nmi_running);
103
104 for (;;) {
105 int new = old & ~MOD_CODE_WRITE_FLAG;
106
107 if (old == new)
108 break;
109
110 old = atomic_cmpxchg(&nmi_running, old, new);
111 }
112 }
113
114 static void ftrace_mod_code(void)
115 {
116
117
118
119
120
121
122 mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode,
123 MCOUNT_INSN_SIZE);
124
125
126 if (mod_code_status)
127 clear_mod_flag();
128 }
129
130 void arch_ftrace_nmi_enter(void)
131 {
132 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
133 smp_rmb();
134 ftrace_mod_code();
135 }
136
137 smp_mb();
138 }
139
140 void arch_ftrace_nmi_exit(void)
141 {
142
143 smp_mb();
144 atomic_dec(&nmi_running);
145 }
146
147 static void wait_for_nmi_and_set_mod_flag(void)
148 {
149 if (!atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG))
150 return;
151
152 do {
153 cpu_relax();
154 } while (atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG));
155 }
156
157 static void wait_for_nmi(void)
158 {
159 if (!atomic_read(&nmi_running))
160 return;
161
162 do {
163 cpu_relax();
164 } while (atomic_read(&nmi_running));
165 }
166
167 static int
168 do_ftrace_mod_code(unsigned long ip, void *new_code)
169 {
170 mod_code_ip = (void *)ip;
171 mod_code_newcode = new_code;
172
173
174 smp_mb();
175
176 wait_for_nmi_and_set_mod_flag();
177
178
179 smp_mb();
180
181 ftrace_mod_code();
182
183
184 smp_mb();
185
186 clear_mod_flag();
187 wait_for_nmi();
188
189 return mod_code_status;
190 }
191
192 static int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
193 unsigned char *new_code)
194 {
195 unsigned char replaced[MCOUNT_INSN_SIZE];
196
197
198
199
200
201
202
203
204
205
206 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
207 return -EFAULT;
208
209
210 if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
211 return -EINVAL;
212
213
214 if (do_ftrace_mod_code(ip, new_code))
215 return -EPERM;
216
217 flush_icache_range(ip, ip + MCOUNT_INSN_SIZE);
218
219 return 0;
220 }
221
222 int ftrace_update_ftrace_func(ftrace_func_t func)
223 {
224 unsigned long ip = (unsigned long)(&ftrace_call) + MCOUNT_INSN_OFFSET;
225 unsigned char old[MCOUNT_INSN_SIZE], *new;
226
227 memcpy(old, (unsigned char *)ip, MCOUNT_INSN_SIZE);
228 new = ftrace_call_replace(ip, (unsigned long)func);
229
230 return ftrace_modify_code(ip, old, new);
231 }
232
233 int ftrace_make_nop(struct module *mod,
234 struct dyn_ftrace *rec, unsigned long addr)
235 {
236 unsigned char *new, *old;
237 unsigned long ip = rec->ip;
238
239 old = ftrace_call_replace(ip, addr);
240 new = ftrace_nop_replace(ip);
241
242 return ftrace_modify_code(rec->ip, old, new);
243 }
244
245 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
246 {
247 unsigned char *new, *old;
248 unsigned long ip = rec->ip;
249
250 old = ftrace_nop_replace(ip);
251 new = ftrace_call_replace(ip, addr);
252
253 return ftrace_modify_code(rec->ip, old, new);
254 }
255
256 int __init ftrace_dyn_arch_init(void)
257 {
258 return 0;
259 }
260 #endif
261
262 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
263 #ifdef CONFIG_DYNAMIC_FTRACE
264 extern void ftrace_graph_call(void);
265
266 static int ftrace_mod(unsigned long ip, unsigned long old_addr,
267 unsigned long new_addr)
268 {
269 unsigned char code[MCOUNT_INSN_SIZE];
270
271 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
272 return -EFAULT;
273
274 if (old_addr != __raw_readl((unsigned long *)code))
275 return -EINVAL;
276
277 __raw_writel(new_addr, ip);
278 return 0;
279 }
280
281 int ftrace_enable_ftrace_graph_caller(void)
282 {
283 unsigned long ip, old_addr, new_addr;
284
285 ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET;
286 old_addr = (unsigned long)(&skip_trace);
287 new_addr = (unsigned long)(&ftrace_graph_caller);
288
289 return ftrace_mod(ip, old_addr, new_addr);
290 }
291
292 int ftrace_disable_ftrace_graph_caller(void)
293 {
294 unsigned long ip, old_addr, new_addr;
295
296 ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET;
297 old_addr = (unsigned long)(&ftrace_graph_caller);
298 new_addr = (unsigned long)(&skip_trace);
299
300 return ftrace_mod(ip, old_addr, new_addr);
301 }
302 #endif
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
322 {
323 unsigned long old;
324 int faulted;
325 unsigned long return_hooker = (unsigned long)&return_to_handler;
326
327 if (unlikely(ftrace_graph_is_dead()))
328 return;
329
330 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
331 return;
332
333
334
335
336
337
338 __asm__ __volatile__(
339 "1: \n\t"
340 "mov.l @%2, %0 \n\t"
341 "2: \n\t"
342 "mov.l %3, @%2 \n\t"
343 "mov #0, %1 \n\t"
344 "3: \n\t"
345 ".section .fixup, \"ax\" \n\t"
346 "4: \n\t"
347 "mov.l 5f, %0 \n\t"
348 "jmp @%0 \n\t"
349 " mov #1, %1 \n\t"
350 ".balign 4 \n\t"
351 "5: .long 3b \n\t"
352 ".previous \n\t"
353 ".section __ex_table,\"a\" \n\t"
354 ".long 1b, 4b \n\t"
355 ".long 2b, 4b \n\t"
356 ".previous \n\t"
357 : "=&r" (old), "=r" (faulted)
358 : "r" (parent), "r" (return_hooker)
359 );
360
361 if (unlikely(faulted)) {
362 ftrace_graph_stop();
363 WARN_ON(1);
364 return;
365 }
366
367 if (function_graph_enter(old, self_addr, 0, NULL))
368 __raw_writel(old, parent);
369 }
370 #endif