1/*
2 * Dynamic function tracer architecture backend.
3 *
4 * Copyright IBM Corp. 2009,2014
5 *
6 *   Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
7 *		Martin Schwidefsky <schwidefsky@de.ibm.com>
8 */
9
10#include <linux/moduleloader.h>
11#include <linux/hardirq.h>
12#include <linux/uaccess.h>
13#include <linux/ftrace.h>
14#include <linux/kernel.h>
15#include <linux/types.h>
16#include <linux/kprobes.h>
17#include <trace/syscall.h>
18#include <asm/asm-offsets.h>
19#include <asm/cacheflush.h>
20#include "entry.h"
21
22/*
23 * The mcount code looks like this:
24 *	stg	%r14,8(%r15)		# offset 0
25 *	larl	%r1,<&counter>		# offset 6
26 *	brasl	%r14,_mcount		# offset 12
27 *	lg	%r14,8(%r15)		# offset 18
28 * Total length is 24 bytes. Only the first instruction will be patched
29 * by ftrace_make_call / ftrace_make_nop.
30 * The enabled ftrace code block looks like this:
31 * >	brasl	%r0,ftrace_caller	# offset 0
32 *	larl	%r1,<&counter>		# offset 6
33 *	brasl	%r14,_mcount		# offset 12
34 *	lg	%r14,8(%r15)		# offset 18
35 * The ftrace function gets called with a non-standard C function call ABI
36 * where r0 contains the return address. It is also expected that the called
37 * function only clobbers r0 and r1, but restores r2-r15.
38 * For module code we can't directly jump to ftrace caller, but need a
39 * trampoline (ftrace_plt), which clobbers also r1.
40 * The return point of the ftrace function has offset 24, so execution
41 * continues behind the mcount block.
42 * The disabled ftrace code block looks like this:
43 * >	jg	.+24			# offset 0
44 *	larl	%r1,<&counter>		# offset 6
45 *	brasl	%r14,_mcount		# offset 12
46 *	lg	%r14,8(%r15)		# offset 18
47 * The jg instruction branches to offset 24 to skip as many instructions
48 * as possible.
49 * In case we use gcc's hotpatch feature the original and also the disabled
50 * function prologue contains only a single six byte instruction and looks
51 * like this:
52 * >	brcl	0,0			# offset 0
53 * To enable ftrace the code gets patched like above and afterwards looks
54 * like this:
55 * >	brasl	%r0,ftrace_caller	# offset 0
56 */
57
58unsigned long ftrace_plt;
59
60static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn)
61{
62#ifdef CC_USING_HOTPATCH
63	/* brcl 0,0 */
64	insn->opc = 0xc004;
65	insn->disp = 0;
66#else
67	/* stg r14,8(r15) */
68	insn->opc = 0xe3e0;
69	insn->disp = 0xf0080024;
70#endif
71}
72
73static inline int is_kprobe_on_ftrace(struct ftrace_insn *insn)
74{
75#ifdef CONFIG_KPROBES
76	if (insn->opc == BREAKPOINT_INSTRUCTION)
77		return 1;
78#endif
79	return 0;
80}
81
82static inline void ftrace_generate_kprobe_nop_insn(struct ftrace_insn *insn)
83{
84#ifdef CONFIG_KPROBES
85	insn->opc = BREAKPOINT_INSTRUCTION;
86	insn->disp = KPROBE_ON_FTRACE_NOP;
87#endif
88}
89
90static inline void ftrace_generate_kprobe_call_insn(struct ftrace_insn *insn)
91{
92#ifdef CONFIG_KPROBES
93	insn->opc = BREAKPOINT_INSTRUCTION;
94	insn->disp = KPROBE_ON_FTRACE_CALL;
95#endif
96}
97
98int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
99		       unsigned long addr)
100{
101	return 0;
102}
103
104int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
105		    unsigned long addr)
106{
107	struct ftrace_insn orig, new, old;
108
109	if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
110		return -EFAULT;
111	if (addr == MCOUNT_ADDR) {
112		/* Initial code replacement */
113		ftrace_generate_orig_insn(&orig);
114		ftrace_generate_nop_insn(&new);
115	} else if (is_kprobe_on_ftrace(&old)) {
116		/*
117		 * If we find a breakpoint instruction, a kprobe has been
118		 * placed at the beginning of the function. We write the
119		 * constant KPROBE_ON_FTRACE_NOP into the remaining four
120		 * bytes of the original instruction so that the kprobes
121		 * handler can execute a nop, if it reaches this breakpoint.
122		 */
123		ftrace_generate_kprobe_call_insn(&orig);
124		ftrace_generate_kprobe_nop_insn(&new);
125	} else {
126		/* Replace ftrace call with a nop. */
127		ftrace_generate_call_insn(&orig, rec->ip);
128		ftrace_generate_nop_insn(&new);
129	}
130	/* Verify that the to be replaced code matches what we expect. */
131	if (memcmp(&orig, &old, sizeof(old)))
132		return -EINVAL;
133	s390_kernel_write((void *) rec->ip, &new, sizeof(new));
134	return 0;
135}
136
137int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
138{
139	struct ftrace_insn orig, new, old;
140
141	if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
142		return -EFAULT;
143	if (is_kprobe_on_ftrace(&old)) {
144		/*
145		 * If we find a breakpoint instruction, a kprobe has been
146		 * placed at the beginning of the function. We write the
147		 * constant KPROBE_ON_FTRACE_CALL into the remaining four
148		 * bytes of the original instruction so that the kprobes
149		 * handler can execute a brasl if it reaches this breakpoint.
150		 */
151		ftrace_generate_kprobe_nop_insn(&orig);
152		ftrace_generate_kprobe_call_insn(&new);
153	} else {
154		/* Replace nop with an ftrace call. */
155		ftrace_generate_nop_insn(&orig);
156		ftrace_generate_call_insn(&new, rec->ip);
157	}
158	/* Verify that the to be replaced code matches what we expect. */
159	if (memcmp(&orig, &old, sizeof(old)))
160		return -EINVAL;
161	s390_kernel_write((void *) rec->ip, &new, sizeof(new));
162	return 0;
163}
164
165int ftrace_update_ftrace_func(ftrace_func_t func)
166{
167	return 0;
168}
169
170int __init ftrace_dyn_arch_init(void)
171{
172	return 0;
173}
174
175static int __init ftrace_plt_init(void)
176{
177	unsigned int *ip;
178
179	ftrace_plt = (unsigned long) module_alloc(PAGE_SIZE);
180	if (!ftrace_plt)
181		panic("cannot allocate ftrace plt\n");
182	ip = (unsigned int *) ftrace_plt;
183	ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
184	ip[1] = 0x100a0004;
185	ip[2] = 0x07f10000;
186	ip[3] = FTRACE_ADDR >> 32;
187	ip[4] = FTRACE_ADDR & 0xffffffff;
188	set_memory_ro(ftrace_plt, 1);
189	return 0;
190}
191device_initcall(ftrace_plt_init);
192
193#ifdef CONFIG_FUNCTION_GRAPH_TRACER
194/*
195 * Hook the return address and push it in the stack of return addresses
196 * in current thread info.
197 */
198unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
199{
200	struct ftrace_graph_ent trace;
201
202	if (unlikely(ftrace_graph_is_dead()))
203		goto out;
204	if (unlikely(atomic_read(&current->tracing_graph_pause)))
205		goto out;
206	ip = (ip & PSW_ADDR_INSN) - MCOUNT_INSN_SIZE;
207	trace.func = ip;
208	trace.depth = current->curr_ret_stack + 1;
209	/* Only trace if the calling function expects to. */
210	if (!ftrace_graph_entry(&trace))
211		goto out;
212	if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
213		goto out;
214	parent = (unsigned long) return_to_handler;
215out:
216	return parent;
217}
218NOKPROBE_SYMBOL(prepare_ftrace_return);
219
220/*
221 * Patch the kernel code at ftrace_graph_caller location. The instruction
222 * there is branch relative on condition. To enable the ftrace graph code
223 * block, we simply patch the mask field of the instruction to zero and
224 * turn the instruction into a nop.
225 * To disable the ftrace graph code the mask field will be patched to
226 * all ones, which turns the instruction into an unconditional branch.
227 */
228int ftrace_enable_ftrace_graph_caller(void)
229{
230	u8 op = 0x04; /* set mask field to zero */
231
232	s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
233	return 0;
234}
235
236int ftrace_disable_ftrace_graph_caller(void)
237{
238	u8 op = 0xf4; /* set mask field to all ones */
239
240	s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
241	return 0;
242}
243
244#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
245