1/*
2 * arch/sh/kernel/hw_breakpoint.c
3 *
4 * Unified kernel/user-space hardware breakpoint facility for the on-chip UBC.
5 *
6 * Copyright (C) 2009 - 2010  Paul Mundt
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License.  See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/init.h>
13#include <linux/perf_event.h>
14#include <linux/hw_breakpoint.h>
15#include <linux/percpu.h>
16#include <linux/kallsyms.h>
17#include <linux/notifier.h>
18#include <linux/kprobes.h>
19#include <linux/kdebug.h>
20#include <linux/io.h>
21#include <linux/clk.h>
22#include <asm/hw_breakpoint.h>
23#include <asm/mmu_context.h>
24#include <asm/ptrace.h>
25#include <asm/traps.h>
26
27/*
28 * Stores the breakpoints currently in use on each breakpoint address
29 * register for each cpus
30 */
31static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]);
32
33/*
34 * A dummy placeholder for early accesses until the CPUs get a chance to
35 * register their UBCs later in the boot process.
36 */
37static struct sh_ubc ubc_dummy = { .num_events = 0 };
38
39static struct sh_ubc *sh_ubc __read_mostly = &ubc_dummy;
40
41/*
42 * Install a perf counter breakpoint.
43 *
44 * We seek a free UBC channel and use it for this breakpoint.
45 *
46 * Atomic: we hold the counter->ctx->lock and we only handle variables
47 * and registers local to this cpu.
48 */
49int arch_install_hw_breakpoint(struct perf_event *bp)
50{
51	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
52	int i;
53
54	for (i = 0; i < sh_ubc->num_events; i++) {
55		struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
56
57		if (!*slot) {
58			*slot = bp;
59			break;
60		}
61	}
62
63	if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
64		return -EBUSY;
65
66	clk_enable(sh_ubc->clk);
67	sh_ubc->enable(info, i);
68
69	return 0;
70}
71
72/*
73 * Uninstall the breakpoint contained in the given counter.
74 *
75 * First we search the debug address register it uses and then we disable
76 * it.
77 *
78 * Atomic: we hold the counter->ctx->lock and we only handle variables
79 * and registers local to this cpu.
80 */
81void arch_uninstall_hw_breakpoint(struct perf_event *bp)
82{
83	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
84	int i;
85
86	for (i = 0; i < sh_ubc->num_events; i++) {
87		struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
88
89		if (*slot == bp) {
90			*slot = NULL;
91			break;
92		}
93	}
94
95	if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
96		return;
97
98	sh_ubc->disable(info, i);
99	clk_disable(sh_ubc->clk);
100}
101
102static int get_hbp_len(u16 hbp_len)
103{
104	unsigned int len_in_bytes = 0;
105
106	switch (hbp_len) {
107	case SH_BREAKPOINT_LEN_1:
108		len_in_bytes = 1;
109		break;
110	case SH_BREAKPOINT_LEN_2:
111		len_in_bytes = 2;
112		break;
113	case SH_BREAKPOINT_LEN_4:
114		len_in_bytes = 4;
115		break;
116	case SH_BREAKPOINT_LEN_8:
117		len_in_bytes = 8;
118		break;
119	}
120	return len_in_bytes;
121}
122
123/*
124 * Check for virtual address in kernel space.
125 */
126int arch_check_bp_in_kernelspace(struct perf_event *bp)
127{
128	unsigned int len;
129	unsigned long va;
130	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
131
132	va = info->address;
133	len = get_hbp_len(info->len);
134
135	return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
136}
137
138int arch_bp_generic_fields(int sh_len, int sh_type,
139			   int *gen_len, int *gen_type)
140{
141	/* Len */
142	switch (sh_len) {
143	case SH_BREAKPOINT_LEN_1:
144		*gen_len = HW_BREAKPOINT_LEN_1;
145		break;
146	case SH_BREAKPOINT_LEN_2:
147		*gen_len = HW_BREAKPOINT_LEN_2;
148		break;
149	case SH_BREAKPOINT_LEN_4:
150		*gen_len = HW_BREAKPOINT_LEN_4;
151		break;
152	case SH_BREAKPOINT_LEN_8:
153		*gen_len = HW_BREAKPOINT_LEN_8;
154		break;
155	default:
156		return -EINVAL;
157	}
158
159	/* Type */
160	switch (sh_type) {
161	case SH_BREAKPOINT_READ:
162		*gen_type = HW_BREAKPOINT_R;
163	case SH_BREAKPOINT_WRITE:
164		*gen_type = HW_BREAKPOINT_W;
165		break;
166	case SH_BREAKPOINT_RW:
167		*gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
168		break;
169	default:
170		return -EINVAL;
171	}
172
173	return 0;
174}
175
176static int arch_build_bp_info(struct perf_event *bp)
177{
178	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
179
180	info->address = bp->attr.bp_addr;
181
182	/* Len */
183	switch (bp->attr.bp_len) {
184	case HW_BREAKPOINT_LEN_1:
185		info->len = SH_BREAKPOINT_LEN_1;
186		break;
187	case HW_BREAKPOINT_LEN_2:
188		info->len = SH_BREAKPOINT_LEN_2;
189		break;
190	case HW_BREAKPOINT_LEN_4:
191		info->len = SH_BREAKPOINT_LEN_4;
192		break;
193	case HW_BREAKPOINT_LEN_8:
194		info->len = SH_BREAKPOINT_LEN_8;
195		break;
196	default:
197		return -EINVAL;
198	}
199
200	/* Type */
201	switch (bp->attr.bp_type) {
202	case HW_BREAKPOINT_R:
203		info->type = SH_BREAKPOINT_READ;
204		break;
205	case HW_BREAKPOINT_W:
206		info->type = SH_BREAKPOINT_WRITE;
207		break;
208	case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
209		info->type = SH_BREAKPOINT_RW;
210		break;
211	default:
212		return -EINVAL;
213	}
214
215	return 0;
216}
217
218/*
219 * Validate the arch-specific HW Breakpoint register settings
220 */
221int arch_validate_hwbkpt_settings(struct perf_event *bp)
222{
223	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
224	unsigned int align;
225	int ret;
226
227	ret = arch_build_bp_info(bp);
228	if (ret)
229		return ret;
230
231	ret = -EINVAL;
232
233	switch (info->len) {
234	case SH_BREAKPOINT_LEN_1:
235		align = 0;
236		break;
237	case SH_BREAKPOINT_LEN_2:
238		align = 1;
239		break;
240	case SH_BREAKPOINT_LEN_4:
241		align = 3;
242		break;
243	case SH_BREAKPOINT_LEN_8:
244		align = 7;
245		break;
246	default:
247		return ret;
248	}
249
250	/*
251	 * For kernel-addresses, either the address or symbol name can be
252	 * specified.
253	 */
254	if (info->name)
255		info->address = (unsigned long)kallsyms_lookup_name(info->name);
256
257	/*
258	 * Check that the low-order bits of the address are appropriate
259	 * for the alignment implied by len.
260	 */
261	if (info->address & align)
262		return -EINVAL;
263
264	return 0;
265}
266
267/*
268 * Release the user breakpoints used by ptrace
269 */
270void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
271{
272	int i;
273	struct thread_struct *t = &tsk->thread;
274
275	for (i = 0; i < sh_ubc->num_events; i++) {
276		unregister_hw_breakpoint(t->ptrace_bps[i]);
277		t->ptrace_bps[i] = NULL;
278	}
279}
280
281static int __kprobes hw_breakpoint_handler(struct die_args *args)
282{
283	int cpu, i, rc = NOTIFY_STOP;
284	struct perf_event *bp;
285	unsigned int cmf, resume_mask;
286
287	/*
288	 * Do an early return if none of the channels triggered.
289	 */
290	cmf = sh_ubc->triggered_mask();
291	if (unlikely(!cmf))
292		return NOTIFY_DONE;
293
294	/*
295	 * By default, resume all of the active channels.
296	 */
297	resume_mask = sh_ubc->active_mask();
298
299	/*
300	 * Disable breakpoints during exception handling.
301	 */
302	sh_ubc->disable_all();
303
304	cpu = get_cpu();
305	for (i = 0; i < sh_ubc->num_events; i++) {
306		unsigned long event_mask = (1 << i);
307
308		if (likely(!(cmf & event_mask)))
309			continue;
310
311		/*
312		 * The counter may be concurrently released but that can only
313		 * occur from a call_rcu() path. We can then safely fetch
314		 * the breakpoint, use its callback, touch its counter
315		 * while we are in an rcu_read_lock() path.
316		 */
317		rcu_read_lock();
318
319		bp = per_cpu(bp_per_reg[i], cpu);
320		if (bp)
321			rc = NOTIFY_DONE;
322
323		/*
324		 * Reset the condition match flag to denote completion of
325		 * exception handling.
326		 */
327		sh_ubc->clear_triggered_mask(event_mask);
328
329		/*
330		 * bp can be NULL due to concurrent perf counter
331		 * removing.
332		 */
333		if (!bp) {
334			rcu_read_unlock();
335			break;
336		}
337
338		/*
339		 * Don't restore the channel if the breakpoint is from
340		 * ptrace, as it always operates in one-shot mode.
341		 */
342		if (bp->overflow_handler == ptrace_triggered)
343			resume_mask &= ~(1 << i);
344
345		perf_bp_event(bp, args->regs);
346
347		/* Deliver the signal to userspace */
348		if (!arch_check_bp_in_kernelspace(bp)) {
349			siginfo_t info;
350
351			info.si_signo = args->signr;
352			info.si_errno = notifier_to_errno(rc);
353			info.si_code = TRAP_HWBKPT;
354
355			force_sig_info(args->signr, &info, current);
356		}
357
358		rcu_read_unlock();
359	}
360
361	if (cmf == 0)
362		rc = NOTIFY_DONE;
363
364	sh_ubc->enable_all(resume_mask);
365
366	put_cpu();
367
368	return rc;
369}
370
371BUILD_TRAP_HANDLER(breakpoint)
372{
373	unsigned long ex = lookup_exception_vector();
374	TRAP_HANDLER_DECL;
375
376	notify_die(DIE_BREAKPOINT, "breakpoint", regs, 0, ex, SIGTRAP);
377}
378
379/*
380 * Handle debug exception notifications.
381 */
382int __kprobes hw_breakpoint_exceptions_notify(struct notifier_block *unused,
383				    unsigned long val, void *data)
384{
385	struct die_args *args = data;
386
387	if (val != DIE_BREAKPOINT)
388		return NOTIFY_DONE;
389
390	/*
391	 * If the breakpoint hasn't been triggered by the UBC, it's
392	 * probably from a debugger, so don't do anything more here.
393	 *
394	 * This also permits the UBC interface clock to remain off for
395	 * non-UBC breakpoints, as we don't need to check the triggered
396	 * or active channel masks.
397	 */
398	if (args->trapnr != sh_ubc->trap_nr)
399		return NOTIFY_DONE;
400
401	return hw_breakpoint_handler(data);
402}
403
404void hw_breakpoint_pmu_read(struct perf_event *bp)
405{
406	/* TODO */
407}
408
409int register_sh_ubc(struct sh_ubc *ubc)
410{
411	/* Bail if it's already assigned */
412	if (sh_ubc != &ubc_dummy)
413		return -EBUSY;
414	sh_ubc = ubc;
415
416	pr_info("HW Breakpoints: %s UBC support registered\n", ubc->name);
417
418	WARN_ON(ubc->num_events > HBP_NUM);
419
420	return 0;
421}
422