1/*
2 * Kernel probes (kprobes) for SuperH
3 *
4 * Copyright (C) 2007 Chris Smith <chris.smith@st.com>
5 * Copyright (C) 2006 Lineo Solutions, Inc.
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License.  See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#include <linux/kprobes.h>
12#include <linux/module.h>
13#include <linux/ptrace.h>
14#include <linux/preempt.h>
15#include <linux/kdebug.h>
16#include <linux/slab.h>
17#include <asm/cacheflush.h>
18#include <asm/uaccess.h>
19
20DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
21DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
22
23static DEFINE_PER_CPU(struct kprobe, saved_current_opcode);
24static DEFINE_PER_CPU(struct kprobe, saved_next_opcode);
25static DEFINE_PER_CPU(struct kprobe, saved_next_opcode2);
26
27#define OPCODE_JMP(x)	(((x) & 0xF0FF) == 0x402b)
28#define OPCODE_JSR(x)	(((x) & 0xF0FF) == 0x400b)
29#define OPCODE_BRA(x)	(((x) & 0xF000) == 0xa000)
30#define OPCODE_BRAF(x)	(((x) & 0xF0FF) == 0x0023)
31#define OPCODE_BSR(x)	(((x) & 0xF000) == 0xb000)
32#define OPCODE_BSRF(x)	(((x) & 0xF0FF) == 0x0003)
33
34#define OPCODE_BF_S(x)	(((x) & 0xFF00) == 0x8f00)
35#define OPCODE_BT_S(x)	(((x) & 0xFF00) == 0x8d00)
36
37#define OPCODE_BF(x)	(((x) & 0xFF00) == 0x8b00)
38#define OPCODE_BT(x)	(((x) & 0xFF00) == 0x8900)
39
40#define OPCODE_RTS(x)	(((x) & 0x000F) == 0x000b)
41#define OPCODE_RTE(x)	(((x) & 0xFFFF) == 0x002b)
42
43int __kprobes arch_prepare_kprobe(struct kprobe *p)
44{
45	kprobe_opcode_t opcode = *(kprobe_opcode_t *) (p->addr);
46
47	if (OPCODE_RTE(opcode))
48		return -EFAULT;	/* Bad breakpoint */
49
50	p->opcode = opcode;
51
52	return 0;
53}
54
55void __kprobes arch_copy_kprobe(struct kprobe *p)
56{
57	memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
58	p->opcode = *p->addr;
59}
60
61void __kprobes arch_arm_kprobe(struct kprobe *p)
62{
63	*p->addr = BREAKPOINT_INSTRUCTION;
64	flush_icache_range((unsigned long)p->addr,
65			   (unsigned long)p->addr + sizeof(kprobe_opcode_t));
66}
67
68void __kprobes arch_disarm_kprobe(struct kprobe *p)
69{
70	*p->addr = p->opcode;
71	flush_icache_range((unsigned long)p->addr,
72			   (unsigned long)p->addr + sizeof(kprobe_opcode_t));
73}
74
75int __kprobes arch_trampoline_kprobe(struct kprobe *p)
76{
77	if (*p->addr == BREAKPOINT_INSTRUCTION)
78		return 1;
79
80	return 0;
81}
82
83/**
84 * If an illegal slot instruction exception occurs for an address
85 * containing a kprobe, remove the probe.
86 *
87 * Returns 0 if the exception was handled successfully, 1 otherwise.
88 */
89int __kprobes kprobe_handle_illslot(unsigned long pc)
90{
91	struct kprobe *p = get_kprobe((kprobe_opcode_t *) pc + 1);
92
93	if (p != NULL) {
94		printk("Warning: removing kprobe from delay slot: 0x%.8x\n",
95		       (unsigned int)pc + 2);
96		unregister_kprobe(p);
97		return 0;
98	}
99
100	return 1;
101}
102
103void __kprobes arch_remove_kprobe(struct kprobe *p)
104{
105	struct kprobe *saved = this_cpu_ptr(&saved_next_opcode);
106
107	if (saved->addr) {
108		arch_disarm_kprobe(p);
109		arch_disarm_kprobe(saved);
110
111		saved->addr = NULL;
112		saved->opcode = 0;
113
114		saved = this_cpu_ptr(&saved_next_opcode2);
115		if (saved->addr) {
116			arch_disarm_kprobe(saved);
117
118			saved->addr = NULL;
119			saved->opcode = 0;
120		}
121	}
122}
123
124static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
125{
126	kcb->prev_kprobe.kp = kprobe_running();
127	kcb->prev_kprobe.status = kcb->kprobe_status;
128}
129
130static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
131{
132	__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
133	kcb->kprobe_status = kcb->prev_kprobe.status;
134}
135
136static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
137					 struct kprobe_ctlblk *kcb)
138{
139	__this_cpu_write(current_kprobe, p);
140}
141
142/*
143 * Singlestep is implemented by disabling the current kprobe and setting one
144 * on the next instruction, following branches. Two probes are set if the
145 * branch is conditional.
146 */
147static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
148{
149	__this_cpu_write(saved_current_opcode.addr, (kprobe_opcode_t *)regs->pc);
150
151	if (p != NULL) {
152		struct kprobe *op1, *op2;
153
154		arch_disarm_kprobe(p);
155
156		op1 = this_cpu_ptr(&saved_next_opcode);
157		op2 = this_cpu_ptr(&saved_next_opcode2);
158
159		if (OPCODE_JSR(p->opcode) || OPCODE_JMP(p->opcode)) {
160			unsigned int reg_nr = ((p->opcode >> 8) & 0x000F);
161			op1->addr = (kprobe_opcode_t *) regs->regs[reg_nr];
162		} else if (OPCODE_BRA(p->opcode) || OPCODE_BSR(p->opcode)) {
163			unsigned long disp = (p->opcode & 0x0FFF);
164			op1->addr =
165			    (kprobe_opcode_t *) (regs->pc + 4 + disp * 2);
166
167		} else if (OPCODE_BRAF(p->opcode) || OPCODE_BSRF(p->opcode)) {
168			unsigned int reg_nr = ((p->opcode >> 8) & 0x000F);
169			op1->addr =
170			    (kprobe_opcode_t *) (regs->pc + 4 +
171						 regs->regs[reg_nr]);
172
173		} else if (OPCODE_RTS(p->opcode)) {
174			op1->addr = (kprobe_opcode_t *) regs->pr;
175
176		} else if (OPCODE_BF(p->opcode) || OPCODE_BT(p->opcode)) {
177			unsigned long disp = (p->opcode & 0x00FF);
178			/* case 1 */
179			op1->addr = p->addr + 1;
180			/* case 2 */
181			op2->addr =
182			    (kprobe_opcode_t *) (regs->pc + 4 + disp * 2);
183			op2->opcode = *(op2->addr);
184			arch_arm_kprobe(op2);
185
186		} else if (OPCODE_BF_S(p->opcode) || OPCODE_BT_S(p->opcode)) {
187			unsigned long disp = (p->opcode & 0x00FF);
188			/* case 1 */
189			op1->addr = p->addr + 2;
190			/* case 2 */
191			op2->addr =
192			    (kprobe_opcode_t *) (regs->pc + 4 + disp * 2);
193			op2->opcode = *(op2->addr);
194			arch_arm_kprobe(op2);
195
196		} else {
197			op1->addr = p->addr + 1;
198		}
199
200		op1->opcode = *(op1->addr);
201		arch_arm_kprobe(op1);
202	}
203}
204
205/* Called with kretprobe_lock held */
206void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
207				      struct pt_regs *regs)
208{
209	ri->ret_addr = (kprobe_opcode_t *) regs->pr;
210
211	/* Replace the return addr with trampoline addr */
212	regs->pr = (unsigned long)kretprobe_trampoline;
213}
214
215static int __kprobes kprobe_handler(struct pt_regs *regs)
216{
217	struct kprobe *p;
218	int ret = 0;
219	kprobe_opcode_t *addr = NULL;
220	struct kprobe_ctlblk *kcb;
221
222	/*
223	 * We don't want to be preempted for the entire
224	 * duration of kprobe processing
225	 */
226	preempt_disable();
227	kcb = get_kprobe_ctlblk();
228
229	addr = (kprobe_opcode_t *) (regs->pc);
230
231	/* Check we're not actually recursing */
232	if (kprobe_running()) {
233		p = get_kprobe(addr);
234		if (p) {
235			if (kcb->kprobe_status == KPROBE_HIT_SS &&
236			    *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
237				goto no_kprobe;
238			}
239			/* We have reentered the kprobe_handler(), since
240			 * another probe was hit while within the handler.
241			 * We here save the original kprobes variables and
242			 * just single step on the instruction of the new probe
243			 * without calling any user handlers.
244			 */
245			save_previous_kprobe(kcb);
246			set_current_kprobe(p, regs, kcb);
247			kprobes_inc_nmissed_count(p);
248			prepare_singlestep(p, regs);
249			kcb->kprobe_status = KPROBE_REENTER;
250			return 1;
251		} else {
252			p = __this_cpu_read(current_kprobe);
253			if (p->break_handler && p->break_handler(p, regs)) {
254				goto ss_probe;
255			}
256		}
257		goto no_kprobe;
258	}
259
260	p = get_kprobe(addr);
261	if (!p) {
262		/* Not one of ours: let kernel handle it */
263		if (*(kprobe_opcode_t *)addr != BREAKPOINT_INSTRUCTION) {
264			/*
265			 * The breakpoint instruction was removed right
266			 * after we hit it. Another cpu has removed
267			 * either a probepoint or a debugger breakpoint
268			 * at this address. In either case, no further
269			 * handling of this interrupt is appropriate.
270			 */
271			ret = 1;
272		}
273
274		goto no_kprobe;
275	}
276
277	set_current_kprobe(p, regs, kcb);
278	kcb->kprobe_status = KPROBE_HIT_ACTIVE;
279
280	if (p->pre_handler && p->pre_handler(p, regs))
281		/* handler has already set things up, so skip ss setup */
282		return 1;
283
284ss_probe:
285	prepare_singlestep(p, regs);
286	kcb->kprobe_status = KPROBE_HIT_SS;
287	return 1;
288
289no_kprobe:
290	preempt_enable_no_resched();
291	return ret;
292}
293
294/*
295 * For function-return probes, init_kprobes() establishes a probepoint
296 * here. When a retprobed function returns, this probe is hit and
297 * trampoline_probe_handler() runs, calling the kretprobe's handler.
298 */
299static void __used kretprobe_trampoline_holder(void)
300{
301	asm volatile (".globl kretprobe_trampoline\n"
302		      "kretprobe_trampoline:\n\t"
303		      "nop\n");
304}
305
306/*
307 * Called when we hit the probe point at kretprobe_trampoline
308 */
309int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
310{
311	struct kretprobe_instance *ri = NULL;
312	struct hlist_head *head, empty_rp;
313	struct hlist_node *tmp;
314	unsigned long flags, orig_ret_address = 0;
315	unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
316
317	INIT_HLIST_HEAD(&empty_rp);
318	kretprobe_hash_lock(current, &head, &flags);
319
320	/*
321	 * It is possible to have multiple instances associated with a given
322	 * task either because an multiple functions in the call path
323	 * have a return probe installed on them, and/or more then one return
324	 * return probe was registered for a target function.
325	 *
326	 * We can handle this because:
327	 *     - instances are always inserted at the head of the list
328	 *     - when multiple return probes are registered for the same
329	 *       function, the first instance's ret_addr will point to the
330	 *       real return address, and all the rest will point to
331	 *       kretprobe_trampoline
332	 */
333	hlist_for_each_entry_safe(ri, tmp, head, hlist) {
334		if (ri->task != current)
335			/* another task is sharing our hash bucket */
336			continue;
337
338		if (ri->rp && ri->rp->handler) {
339			__this_cpu_write(current_kprobe, &ri->rp->kp);
340			ri->rp->handler(ri, regs);
341			__this_cpu_write(current_kprobe, NULL);
342		}
343
344		orig_ret_address = (unsigned long)ri->ret_addr;
345		recycle_rp_inst(ri, &empty_rp);
346
347		if (orig_ret_address != trampoline_address)
348			/*
349			 * This is the real return address. Any other
350			 * instances associated with this task are for
351			 * other calls deeper on the call stack
352			 */
353			break;
354	}
355
356	kretprobe_assert(ri, orig_ret_address, trampoline_address);
357
358	regs->pc = orig_ret_address;
359	kretprobe_hash_unlock(current, &flags);
360
361	preempt_enable_no_resched();
362
363	hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
364		hlist_del(&ri->hlist);
365		kfree(ri);
366	}
367
368	return orig_ret_address;
369}
370
371static int __kprobes post_kprobe_handler(struct pt_regs *regs)
372{
373	struct kprobe *cur = kprobe_running();
374	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
375	kprobe_opcode_t *addr = NULL;
376	struct kprobe *p = NULL;
377
378	if (!cur)
379		return 0;
380
381	if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
382		kcb->kprobe_status = KPROBE_HIT_SSDONE;
383		cur->post_handler(cur, regs, 0);
384	}
385
386	p = this_cpu_ptr(&saved_next_opcode);
387	if (p->addr) {
388		arch_disarm_kprobe(p);
389		p->addr = NULL;
390		p->opcode = 0;
391
392		addr = __this_cpu_read(saved_current_opcode.addr);
393		__this_cpu_write(saved_current_opcode.addr, NULL);
394
395		p = get_kprobe(addr);
396		arch_arm_kprobe(p);
397
398		p = this_cpu_ptr(&saved_next_opcode2);
399		if (p->addr) {
400			arch_disarm_kprobe(p);
401			p->addr = NULL;
402			p->opcode = 0;
403		}
404	}
405
406	/* Restore back the original saved kprobes variables and continue. */
407	if (kcb->kprobe_status == KPROBE_REENTER) {
408		restore_previous_kprobe(kcb);
409		goto out;
410	}
411
412	reset_current_kprobe();
413
414out:
415	preempt_enable_no_resched();
416
417	return 1;
418}
419
420int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
421{
422	struct kprobe *cur = kprobe_running();
423	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
424	const struct exception_table_entry *entry;
425
426	switch (kcb->kprobe_status) {
427	case KPROBE_HIT_SS:
428	case KPROBE_REENTER:
429		/*
430		 * We are here because the instruction being single
431		 * stepped caused a page fault. We reset the current
432		 * kprobe, point the pc back to the probe address
433		 * and allow the page fault handler to continue as a
434		 * normal page fault.
435		 */
436		regs->pc = (unsigned long)cur->addr;
437		if (kcb->kprobe_status == KPROBE_REENTER)
438			restore_previous_kprobe(kcb);
439		else
440			reset_current_kprobe();
441		preempt_enable_no_resched();
442		break;
443	case KPROBE_HIT_ACTIVE:
444	case KPROBE_HIT_SSDONE:
445		/*
446		 * We increment the nmissed count for accounting,
447		 * we can also use npre/npostfault count for accounting
448		 * these specific fault cases.
449		 */
450		kprobes_inc_nmissed_count(cur);
451
452		/*
453		 * We come here because instructions in the pre/post
454		 * handler caused the page_fault, this could happen
455		 * if handler tries to access user space by
456		 * copy_from_user(), get_user() etc. Let the
457		 * user-specified handler try to fix it first.
458		 */
459		if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
460			return 1;
461
462		/*
463		 * In case the user-specified fault handler returned
464		 * zero, try to fix up.
465		 */
466		if ((entry = search_exception_tables(regs->pc)) != NULL) {
467			regs->pc = entry->fixup;
468			return 1;
469		}
470
471		/*
472		 * fixup_exception() could not handle it,
473		 * Let do_page_fault() fix it.
474		 */
475		break;
476	default:
477		break;
478	}
479
480	return 0;
481}
482
483/*
484 * Wrapper routine to for handling exceptions.
485 */
486int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
487				       unsigned long val, void *data)
488{
489	struct kprobe *p = NULL;
490	struct die_args *args = (struct die_args *)data;
491	int ret = NOTIFY_DONE;
492	kprobe_opcode_t *addr = NULL;
493	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
494
495	addr = (kprobe_opcode_t *) (args->regs->pc);
496	if (val == DIE_TRAP) {
497		if (!kprobe_running()) {
498			if (kprobe_handler(args->regs)) {
499				ret = NOTIFY_STOP;
500			} else {
501				/* Not a kprobe trap */
502				ret = NOTIFY_DONE;
503			}
504		} else {
505			p = get_kprobe(addr);
506			if ((kcb->kprobe_status == KPROBE_HIT_SS) ||
507			    (kcb->kprobe_status == KPROBE_REENTER)) {
508				if (post_kprobe_handler(args->regs))
509					ret = NOTIFY_STOP;
510			} else {
511				if (kprobe_handler(args->regs)) {
512					ret = NOTIFY_STOP;
513				} else {
514					p = __this_cpu_read(current_kprobe);
515					if (p->break_handler &&
516					    p->break_handler(p, args->regs))
517						ret = NOTIFY_STOP;
518				}
519			}
520		}
521	}
522
523	return ret;
524}
525
526int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
527{
528	struct jprobe *jp = container_of(p, struct jprobe, kp);
529	unsigned long addr;
530	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
531
532	kcb->jprobe_saved_regs = *regs;
533	kcb->jprobe_saved_r15 = regs->regs[15];
534	addr = kcb->jprobe_saved_r15;
535
536	/*
537	 * TBD: As Linus pointed out, gcc assumes that the callee
538	 * owns the argument space and could overwrite it, e.g.
539	 * tailcall optimization. So, to be absolutely safe
540	 * we also save and restore enough stack bytes to cover
541	 * the argument area.
542	 */
543	memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr,
544	       MIN_STACK_SIZE(addr));
545
546	regs->pc = (unsigned long)(jp->entry);
547
548	return 1;
549}
550
551void __kprobes jprobe_return(void)
552{
553	asm volatile ("trapa #0x3a\n\t" "jprobe_return_end:\n\t" "nop\n\t");
554}
555
556int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
557{
558	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
559	unsigned long stack_addr = kcb->jprobe_saved_r15;
560	u8 *addr = (u8 *)regs->pc;
561
562	if ((addr >= (u8 *)jprobe_return) &&
563	    (addr <= (u8 *)jprobe_return_end)) {
564		*regs = kcb->jprobe_saved_regs;
565
566		memcpy((kprobe_opcode_t *)stack_addr, kcb->jprobes_stack,
567		       MIN_STACK_SIZE(stack_addr));
568
569		kcb->kprobe_status = KPROBE_HIT_SS;
570		preempt_enable_no_resched();
571		return 1;
572	}
573
574	return 0;
575}
576
577static struct kprobe trampoline_p = {
578	.addr = (kprobe_opcode_t *)&kretprobe_trampoline,
579	.pre_handler = trampoline_probe_handler
580};
581
582int __init arch_init_kprobes(void)
583{
584	return register_kprobe(&trampoline_p);
585}
586