1/*
2 *  Kernel Probes (KProbes)
3 *  arch/ia64/kernel/kprobes.c
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * Copyright (C) IBM Corporation, 2002, 2004
20 * Copyright (C) Intel Corporation, 2005
21 *
22 * 2005-Apr     Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy
23 *              <anil.s.keshavamurthy@intel.com> adapted from i386
24 */
25
26#include <linux/kprobes.h>
27#include <linux/ptrace.h>
28#include <linux/string.h>
29#include <linux/slab.h>
30#include <linux/preempt.h>
31#include <linux/moduleloader.h>
32#include <linux/kdebug.h>
33
34#include <asm/pgtable.h>
35#include <asm/sections.h>
36#include <asm/uaccess.h>
37
38extern void jprobe_inst_return(void);
39
40DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
41DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
42
43struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
44
45enum instruction_type {A, I, M, F, B, L, X, u};
46static enum instruction_type bundle_encoding[32][3] = {
47  { M, I, I },				/* 00 */
48  { M, I, I },				/* 01 */
49  { M, I, I },				/* 02 */
50  { M, I, I },				/* 03 */
51  { M, L, X },				/* 04 */
52  { M, L, X },				/* 05 */
53  { u, u, u },  			/* 06 */
54  { u, u, u },  			/* 07 */
55  { M, M, I },				/* 08 */
56  { M, M, I },				/* 09 */
57  { M, M, I },				/* 0A */
58  { M, M, I },				/* 0B */
59  { M, F, I },				/* 0C */
60  { M, F, I },				/* 0D */
61  { M, M, F },				/* 0E */
62  { M, M, F },				/* 0F */
63  { M, I, B },				/* 10 */
64  { M, I, B },				/* 11 */
65  { M, B, B },				/* 12 */
66  { M, B, B },				/* 13 */
67  { u, u, u },  			/* 14 */
68  { u, u, u },  			/* 15 */
69  { B, B, B },				/* 16 */
70  { B, B, B },				/* 17 */
71  { M, M, B },				/* 18 */
72  { M, M, B },				/* 19 */
73  { u, u, u },  			/* 1A */
74  { u, u, u },  			/* 1B */
75  { M, F, B },				/* 1C */
76  { M, F, B },				/* 1D */
77  { u, u, u },  			/* 1E */
78  { u, u, u },  			/* 1F */
79};
80
81/* Insert a long branch code */
82static void __kprobes set_brl_inst(void *from, void *to)
83{
84	s64 rel = ((s64) to - (s64) from) >> 4;
85	bundle_t *brl;
86	brl = (bundle_t *) ((u64) from & ~0xf);
87	brl->quad0.template = 0x05;	/* [MLX](stop) */
88	brl->quad0.slot0 = NOP_M_INST;	/* nop.m 0x0 */
89	brl->quad0.slot1_p0 = ((rel >> 20) & 0x7fffffffff) << 2;
90	brl->quad1.slot1_p1 = (((rel >> 20) & 0x7fffffffff) << 2) >> (64 - 46);
91	/* brl.cond.sptk.many.clr rel<<4 (qp=0) */
92	brl->quad1.slot2 = BRL_INST(rel >> 59, rel & 0xfffff);
93}
94
95/*
96 * In this function we check to see if the instruction
97 * is IP relative instruction and update the kprobe
98 * inst flag accordingly
99 */
100static void __kprobes update_kprobe_inst_flag(uint template, uint  slot,
101					      uint major_opcode,
102					      unsigned long kprobe_inst,
103					      struct kprobe *p)
104{
105	p->ainsn.inst_flag = 0;
106	p->ainsn.target_br_reg = 0;
107	p->ainsn.slot = slot;
108
109	/* Check for Break instruction
110	 * Bits 37:40 Major opcode to be zero
111	 * Bits 27:32 X6 to be zero
112	 * Bits 32:35 X3 to be zero
113	 */
114	if ((!major_opcode) && (!((kprobe_inst >> 27) & 0x1FF)) ) {
115		/* is a break instruction */
116	 	p->ainsn.inst_flag |= INST_FLAG_BREAK_INST;
117		return;
118	}
119
120	if (bundle_encoding[template][slot] == B) {
121		switch (major_opcode) {
122		  case INDIRECT_CALL_OPCODE:
123	 		p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
124			p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
125			break;
126		  case IP_RELATIVE_PREDICT_OPCODE:
127		  case IP_RELATIVE_BRANCH_OPCODE:
128			p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR;
129			break;
130		  case IP_RELATIVE_CALL_OPCODE:
131			p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR;
132			p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
133			p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
134			break;
135		}
136	} else if (bundle_encoding[template][slot] == X) {
137		switch (major_opcode) {
138		  case LONG_CALL_OPCODE:
139			p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
140			p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
141		  break;
142		}
143	}
144	return;
145}
146
147/*
148 * In this function we check to see if the instruction
149 * (qp) cmpx.crel.ctype p1,p2=r2,r3
150 * on which we are inserting kprobe is cmp instruction
151 * with ctype as unc.
152 */
153static uint __kprobes is_cmp_ctype_unc_inst(uint template, uint slot,
154					    uint major_opcode,
155					    unsigned long kprobe_inst)
156{
157	cmp_inst_t cmp_inst;
158	uint ctype_unc = 0;
159
160	if (!((bundle_encoding[template][slot] == I) ||
161		(bundle_encoding[template][slot] == M)))
162		goto out;
163
164	if (!((major_opcode == 0xC) || (major_opcode == 0xD) ||
165		(major_opcode == 0xE)))
166		goto out;
167
168	cmp_inst.l = kprobe_inst;
169	if ((cmp_inst.f.x2 == 0) || (cmp_inst.f.x2 == 1)) {
170		/* Integer compare - Register Register (A6 type)*/
171		if ((cmp_inst.f.tb == 0) && (cmp_inst.f.ta == 0)
172				&&(cmp_inst.f.c == 1))
173			ctype_unc = 1;
174	} else if ((cmp_inst.f.x2 == 2)||(cmp_inst.f.x2 == 3)) {
175		/* Integer compare - Immediate Register (A8 type)*/
176		if ((cmp_inst.f.ta == 0) &&(cmp_inst.f.c == 1))
177			ctype_unc = 1;
178	}
179out:
180	return ctype_unc;
181}
182
183/*
184 * In this function we check to see if the instruction
185 * on which we are inserting kprobe is supported.
186 * Returns qp value if supported
187 * Returns -EINVAL if unsupported
188 */
189static int __kprobes unsupported_inst(uint template, uint  slot,
190				      uint major_opcode,
191				      unsigned long kprobe_inst,
192				      unsigned long addr)
193{
194	int qp;
195
196	qp = kprobe_inst & 0x3f;
197	if (is_cmp_ctype_unc_inst(template, slot, major_opcode, kprobe_inst)) {
198		if (slot == 1 && qp)  {
199			printk(KERN_WARNING "Kprobes on cmp unc "
200					"instruction on slot 1 at <0x%lx> "
201					"is not supported\n", addr);
202			return -EINVAL;
203
204		}
205		qp = 0;
206	}
207	else if (bundle_encoding[template][slot] == I) {
208		if (major_opcode == 0) {
209			/*
210			 * Check for Integer speculation instruction
211			 * - Bit 33-35 to be equal to 0x1
212			 */
213			if (((kprobe_inst >> 33) & 0x7) == 1) {
214				printk(KERN_WARNING
215					"Kprobes on speculation inst at <0x%lx> not supported\n",
216						addr);
217				return -EINVAL;
218			}
219			/*
220			 * IP relative mov instruction
221			 *  - Bit 27-35 to be equal to 0x30
222			 */
223			if (((kprobe_inst >> 27) & 0x1FF) == 0x30) {
224				printk(KERN_WARNING
225					"Kprobes on \"mov r1=ip\" at <0x%lx> not supported\n",
226						addr);
227				return -EINVAL;
228
229			}
230		}
231		else if ((major_opcode == 5) &&	!(kprobe_inst & (0xFUl << 33)) &&
232				(kprobe_inst & (0x1UL << 12))) {
233			/* test bit instructions, tbit,tnat,tf
234			 * bit 33-36 to be equal to 0
235			 * bit 12 to be equal to 1
236			 */
237			if (slot == 1 && qp) {
238				printk(KERN_WARNING "Kprobes on test bit "
239						"instruction on slot at <0x%lx> "
240						"is not supported\n", addr);
241				return -EINVAL;
242			}
243			qp = 0;
244		}
245	}
246	else if (bundle_encoding[template][slot] == B) {
247		if (major_opcode == 7) {
248			/* IP-Relative Predict major code is 7 */
249			printk(KERN_WARNING "Kprobes on IP-Relative"
250					"Predict is not supported\n");
251			return -EINVAL;
252		}
253		else if (major_opcode == 2) {
254			/* Indirect Predict, major code is 2
255			 * bit 27-32 to be equal to 10 or 11
256			 */
257			int x6=(kprobe_inst >> 27) & 0x3F;
258			if ((x6 == 0x10) || (x6 == 0x11)) {
259				printk(KERN_WARNING "Kprobes on "
260					"Indirect Predict is not supported\n");
261				return -EINVAL;
262			}
263		}
264	}
265	/* kernel does not use float instruction, here for safety kprobe
266	 * will judge whether it is fcmp/flass/float approximation instruction
267	 */
268	else if (unlikely(bundle_encoding[template][slot] == F)) {
269		if ((major_opcode == 4 || major_opcode == 5) &&
270				(kprobe_inst  & (0x1 << 12))) {
271			/* fcmp/fclass unc instruction */
272			if (slot == 1 && qp) {
273				printk(KERN_WARNING "Kprobes on fcmp/fclass "
274					"instruction on slot at <0x%lx> "
275					"is not supported\n", addr);
276				return -EINVAL;
277
278			}
279			qp = 0;
280		}
281		if ((major_opcode == 0 || major_opcode == 1) &&
282			(kprobe_inst & (0x1UL << 33))) {
283			/* float Approximation instruction */
284			if (slot == 1 && qp) {
285				printk(KERN_WARNING "Kprobes on float Approx "
286					"instr at <0x%lx> is not supported\n",
287						addr);
288				return -EINVAL;
289			}
290			qp = 0;
291		}
292	}
293	return qp;
294}
295
296/*
297 * In this function we override the bundle with
298 * the break instruction at the given slot.
299 */
300static void __kprobes prepare_break_inst(uint template, uint  slot,
301					 uint major_opcode,
302					 unsigned long kprobe_inst,
303					 struct kprobe *p,
304					 int qp)
305{
306	unsigned long break_inst = BREAK_INST;
307	bundle_t *bundle = &p->opcode.bundle;
308
309	/*
310	 * Copy the original kprobe_inst qualifying predicate(qp)
311	 * to the break instruction
312	 */
313	break_inst |= qp;
314
315	switch (slot) {
316	  case 0:
317		bundle->quad0.slot0 = break_inst;
318		break;
319	  case 1:
320		bundle->quad0.slot1_p0 = break_inst;
321		bundle->quad1.slot1_p1 = break_inst >> (64-46);
322		break;
323	  case 2:
324		bundle->quad1.slot2 = break_inst;
325		break;
326	}
327
328	/*
329	 * Update the instruction flag, so that we can
330	 * emulate the instruction properly after we
331	 * single step on original instruction
332	 */
333	update_kprobe_inst_flag(template, slot, major_opcode, kprobe_inst, p);
334}
335
336static void __kprobes get_kprobe_inst(bundle_t *bundle, uint slot,
337	       	unsigned long *kprobe_inst, uint *major_opcode)
338{
339	unsigned long kprobe_inst_p0, kprobe_inst_p1;
340	unsigned int template;
341
342	template = bundle->quad0.template;
343
344	switch (slot) {
345	  case 0:
346		*major_opcode = (bundle->quad0.slot0 >> SLOT0_OPCODE_SHIFT);
347		*kprobe_inst = bundle->quad0.slot0;
348		  break;
349	  case 1:
350		*major_opcode = (bundle->quad1.slot1_p1 >> SLOT1_p1_OPCODE_SHIFT);
351		kprobe_inst_p0 = bundle->quad0.slot1_p0;
352		kprobe_inst_p1 = bundle->quad1.slot1_p1;
353		*kprobe_inst = kprobe_inst_p0 | (kprobe_inst_p1 << (64-46));
354		break;
355	  case 2:
356		*major_opcode = (bundle->quad1.slot2 >> SLOT2_OPCODE_SHIFT);
357		*kprobe_inst = bundle->quad1.slot2;
358		break;
359	}
360}
361
362/* Returns non-zero if the addr is in the Interrupt Vector Table */
363static int __kprobes in_ivt_functions(unsigned long addr)
364{
365	return (addr >= (unsigned long)__start_ivt_text
366		&& addr < (unsigned long)__end_ivt_text);
367}
368
369static int __kprobes valid_kprobe_addr(int template, int slot,
370				       unsigned long addr)
371{
372	if ((slot > 2) || ((bundle_encoding[template][1] == L) && slot > 1)) {
373		printk(KERN_WARNING "Attempting to insert unaligned kprobe "
374				"at 0x%lx\n", addr);
375		return -EINVAL;
376	}
377
378	if (in_ivt_functions(addr)) {
379		printk(KERN_WARNING "Kprobes can't be inserted inside "
380				"IVT functions at 0x%lx\n", addr);
381		return -EINVAL;
382	}
383
384	return 0;
385}
386
387static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
388{
389	unsigned int i;
390	i = atomic_add_return(1, &kcb->prev_kprobe_index);
391	kcb->prev_kprobe[i-1].kp = kprobe_running();
392	kcb->prev_kprobe[i-1].status = kcb->kprobe_status;
393}
394
395static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
396{
397	unsigned int i;
398	i = atomic_read(&kcb->prev_kprobe_index);
399	__this_cpu_write(current_kprobe, kcb->prev_kprobe[i-1].kp);
400	kcb->kprobe_status = kcb->prev_kprobe[i-1].status;
401	atomic_sub(1, &kcb->prev_kprobe_index);
402}
403
404static void __kprobes set_current_kprobe(struct kprobe *p,
405			struct kprobe_ctlblk *kcb)
406{
407	__this_cpu_write(current_kprobe, p);
408}
409
410static void kretprobe_trampoline(void)
411{
412}
413
414/*
415 * At this point the target function has been tricked into
416 * returning into our trampoline.  Lookup the associated instance
417 * and then:
418 *    - call the handler function
419 *    - cleanup by marking the instance as unused
420 *    - long jump back to the original return address
421 */
422int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
423{
424	struct kretprobe_instance *ri = NULL;
425	struct hlist_head *head, empty_rp;
426	struct hlist_node *tmp;
427	unsigned long flags, orig_ret_address = 0;
428	unsigned long trampoline_address =
429		((struct fnptr *)kretprobe_trampoline)->ip;
430
431	INIT_HLIST_HEAD(&empty_rp);
432	kretprobe_hash_lock(current, &head, &flags);
433
434	/*
435	 * It is possible to have multiple instances associated with a given
436	 * task either because an multiple functions in the call path
437	 * have a return probe installed on them, and/or more than one return
438	 * return probe was registered for a target function.
439	 *
440	 * We can handle this because:
441	 *     - instances are always inserted at the head of the list
442	 *     - when multiple return probes are registered for the same
443	 *       function, the first instance's ret_addr will point to the
444	 *       real return address, and all the rest will point to
445	 *       kretprobe_trampoline
446	 */
447	hlist_for_each_entry_safe(ri, tmp, head, hlist) {
448		if (ri->task != current)
449			/* another task is sharing our hash bucket */
450			continue;
451
452		orig_ret_address = (unsigned long)ri->ret_addr;
453		if (orig_ret_address != trampoline_address)
454			/*
455			 * This is the real return address. Any other
456			 * instances associated with this task are for
457			 * other calls deeper on the call stack
458			 */
459			break;
460	}
461
462	regs->cr_iip = orig_ret_address;
463
464	hlist_for_each_entry_safe(ri, tmp, head, hlist) {
465		if (ri->task != current)
466			/* another task is sharing our hash bucket */
467			continue;
468
469		if (ri->rp && ri->rp->handler)
470			ri->rp->handler(ri, regs);
471
472		orig_ret_address = (unsigned long)ri->ret_addr;
473		recycle_rp_inst(ri, &empty_rp);
474
475		if (orig_ret_address != trampoline_address)
476			/*
477			 * This is the real return address. Any other
478			 * instances associated with this task are for
479			 * other calls deeper on the call stack
480			 */
481			break;
482	}
483
484	kretprobe_assert(ri, orig_ret_address, trampoline_address);
485
486	reset_current_kprobe();
487	kretprobe_hash_unlock(current, &flags);
488	preempt_enable_no_resched();
489
490	hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
491		hlist_del(&ri->hlist);
492		kfree(ri);
493	}
494	/*
495	 * By returning a non-zero value, we are telling
496	 * kprobe_handler() that we don't want the post_handler
497	 * to run (and have re-enabled preemption)
498	 */
499	return 1;
500}
501
502void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
503				      struct pt_regs *regs)
504{
505	ri->ret_addr = (kprobe_opcode_t *)regs->b0;
506
507	/* Replace the return addr with trampoline addr */
508	regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip;
509}
510
511/* Check the instruction in the slot is break */
512static int __kprobes __is_ia64_break_inst(bundle_t *bundle, uint slot)
513{
514	unsigned int major_opcode;
515	unsigned int template = bundle->quad0.template;
516	unsigned long kprobe_inst;
517
518	/* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */
519	if (slot == 1 && bundle_encoding[template][1] == L)
520		slot++;
521
522	/* Get Kprobe probe instruction at given slot*/
523	get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode);
524
525	/* For break instruction,
526	 * Bits 37:40 Major opcode to be zero
527	 * Bits 27:32 X6 to be zero
528	 * Bits 32:35 X3 to be zero
529	 */
530	if (major_opcode || ((kprobe_inst >> 27) & 0x1FF)) {
531		/* Not a break instruction */
532		return 0;
533	}
534
535	/* Is a break instruction */
536	return 1;
537}
538
539/*
540 * In this function, we check whether the target bundle modifies IP or
541 * it triggers an exception. If so, it cannot be boostable.
542 */
543static int __kprobes can_boost(bundle_t *bundle, uint slot,
544			       unsigned long bundle_addr)
545{
546	unsigned int template = bundle->quad0.template;
547
548	do {
549		if (search_exception_tables(bundle_addr + slot) ||
550		    __is_ia64_break_inst(bundle, slot))
551			return 0;	/* exception may occur in this bundle*/
552	} while ((++slot) < 3);
553	template &= 0x1e;
554	if (template >= 0x10 /* including B unit */ ||
555	    template == 0x04 /* including X unit */ ||
556	    template == 0x06) /* undefined */
557		return 0;
558
559	return 1;
560}
561
562/* Prepare long jump bundle and disables other boosters if need */
563static void __kprobes prepare_booster(struct kprobe *p)
564{
565	unsigned long addr = (unsigned long)p->addr & ~0xFULL;
566	unsigned int slot = (unsigned long)p->addr & 0xf;
567	struct kprobe *other_kp;
568
569	if (can_boost(&p->ainsn.insn[0].bundle, slot, addr)) {
570		set_brl_inst(&p->ainsn.insn[1].bundle, (bundle_t *)addr + 1);
571		p->ainsn.inst_flag |= INST_FLAG_BOOSTABLE;
572	}
573
574	/* disables boosters in previous slots */
575	for (; addr < (unsigned long)p->addr; addr++) {
576		other_kp = get_kprobe((void *)addr);
577		if (other_kp)
578			other_kp->ainsn.inst_flag &= ~INST_FLAG_BOOSTABLE;
579	}
580}
581
582int __kprobes arch_prepare_kprobe(struct kprobe *p)
583{
584	unsigned long addr = (unsigned long) p->addr;
585	unsigned long *kprobe_addr = (unsigned long *)(addr & ~0xFULL);
586	unsigned long kprobe_inst=0;
587	unsigned int slot = addr & 0xf, template, major_opcode = 0;
588	bundle_t *bundle;
589	int qp;
590
591	bundle = &((kprobe_opcode_t *)kprobe_addr)->bundle;
592	template = bundle->quad0.template;
593
594	if(valid_kprobe_addr(template, slot, addr))
595		return -EINVAL;
596
597	/* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */
598	if (slot == 1 && bundle_encoding[template][1] == L)
599		slot++;
600
601	/* Get kprobe_inst and major_opcode from the bundle */
602	get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode);
603
604	qp = unsupported_inst(template, slot, major_opcode, kprobe_inst, addr);
605	if (qp < 0)
606		return -EINVAL;
607
608	p->ainsn.insn = get_insn_slot();
609	if (!p->ainsn.insn)
610		return -ENOMEM;
611	memcpy(&p->opcode, kprobe_addr, sizeof(kprobe_opcode_t));
612	memcpy(p->ainsn.insn, kprobe_addr, sizeof(kprobe_opcode_t));
613
614	prepare_break_inst(template, slot, major_opcode, kprobe_inst, p, qp);
615
616	prepare_booster(p);
617
618	return 0;
619}
620
621void __kprobes arch_arm_kprobe(struct kprobe *p)
622{
623	unsigned long arm_addr;
624	bundle_t *src, *dest;
625
626	arm_addr = ((unsigned long)p->addr) & ~0xFUL;
627	dest = &((kprobe_opcode_t *)arm_addr)->bundle;
628	src = &p->opcode.bundle;
629
630	flush_icache_range((unsigned long)p->ainsn.insn,
631			   (unsigned long)p->ainsn.insn +
632			   sizeof(kprobe_opcode_t) * MAX_INSN_SIZE);
633
634	switch (p->ainsn.slot) {
635		case 0:
636			dest->quad0.slot0 = src->quad0.slot0;
637			break;
638		case 1:
639			dest->quad1.slot1_p1 = src->quad1.slot1_p1;
640			break;
641		case 2:
642			dest->quad1.slot2 = src->quad1.slot2;
643			break;
644	}
645	flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t));
646}
647
648void __kprobes arch_disarm_kprobe(struct kprobe *p)
649{
650	unsigned long arm_addr;
651	bundle_t *src, *dest;
652
653	arm_addr = ((unsigned long)p->addr) & ~0xFUL;
654	dest = &((kprobe_opcode_t *)arm_addr)->bundle;
655	/* p->ainsn.insn contains the original unaltered kprobe_opcode_t */
656	src = &p->ainsn.insn->bundle;
657	switch (p->ainsn.slot) {
658		case 0:
659			dest->quad0.slot0 = src->quad0.slot0;
660			break;
661		case 1:
662			dest->quad1.slot1_p1 = src->quad1.slot1_p1;
663			break;
664		case 2:
665			dest->quad1.slot2 = src->quad1.slot2;
666			break;
667	}
668	flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t));
669}
670
671void __kprobes arch_remove_kprobe(struct kprobe *p)
672{
673	if (p->ainsn.insn) {
674		free_insn_slot(p->ainsn.insn,
675			       p->ainsn.inst_flag & INST_FLAG_BOOSTABLE);
676		p->ainsn.insn = NULL;
677	}
678}
679/*
680 * We are resuming execution after a single step fault, so the pt_regs
681 * structure reflects the register state after we executed the instruction
682 * located in the kprobe (p->ainsn.insn->bundle).  We still need to adjust
683 * the ip to point back to the original stack address. To set the IP address
684 * to original stack address, handle the case where we need to fixup the
685 * relative IP address and/or fixup branch register.
686 */
687static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
688{
689	unsigned long bundle_addr = (unsigned long) (&p->ainsn.insn->bundle);
690	unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL;
691	unsigned long template;
692	int slot = ((unsigned long)p->addr & 0xf);
693
694	template = p->ainsn.insn->bundle.quad0.template;
695
696	if (slot == 1 && bundle_encoding[template][1] == L)
697		slot = 2;
698
699	if (p->ainsn.inst_flag & ~INST_FLAG_BOOSTABLE) {
700
701		if (p->ainsn.inst_flag & INST_FLAG_FIX_RELATIVE_IP_ADDR) {
702			/* Fix relative IP address */
703			regs->cr_iip = (regs->cr_iip - bundle_addr) +
704					resume_addr;
705		}
706
707		if (p->ainsn.inst_flag & INST_FLAG_FIX_BRANCH_REG) {
708		/*
709		 * Fix target branch register, software convention is
710		 * to use either b0 or b6 or b7, so just checking
711		 * only those registers
712		 */
713			switch (p->ainsn.target_br_reg) {
714			case 0:
715				if ((regs->b0 == bundle_addr) ||
716					(regs->b0 == bundle_addr + 0x10)) {
717					regs->b0 = (regs->b0 - bundle_addr) +
718						resume_addr;
719				}
720				break;
721			case 6:
722				if ((regs->b6 == bundle_addr) ||
723					(regs->b6 == bundle_addr + 0x10)) {
724					regs->b6 = (regs->b6 - bundle_addr) +
725						resume_addr;
726				}
727				break;
728			case 7:
729				if ((regs->b7 == bundle_addr) ||
730					(regs->b7 == bundle_addr + 0x10)) {
731					regs->b7 = (regs->b7 - bundle_addr) +
732						resume_addr;
733				}
734				break;
735			} /* end switch */
736		}
737		goto turn_ss_off;
738	}
739
740	if (slot == 2) {
741		if (regs->cr_iip == bundle_addr + 0x10) {
742			regs->cr_iip = resume_addr + 0x10;
743		}
744	} else {
745		if (regs->cr_iip == bundle_addr) {
746			regs->cr_iip = resume_addr;
747		}
748	}
749
750turn_ss_off:
751	/* Turn off Single Step bit */
752	ia64_psr(regs)->ss = 0;
753}
754
755static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs)
756{
757	unsigned long bundle_addr = (unsigned long) &p->ainsn.insn->bundle;
758	unsigned long slot = (unsigned long)p->addr & 0xf;
759
760	/* single step inline if break instruction */
761	if (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)
762		regs->cr_iip = (unsigned long)p->addr & ~0xFULL;
763	else
764		regs->cr_iip = bundle_addr & ~0xFULL;
765
766	if (slot > 2)
767		slot = 0;
768
769	ia64_psr(regs)->ri = slot;
770
771	/* turn on single stepping */
772	ia64_psr(regs)->ss = 1;
773}
774
775static int __kprobes is_ia64_break_inst(struct pt_regs *regs)
776{
777	unsigned int slot = ia64_psr(regs)->ri;
778	unsigned long *kprobe_addr = (unsigned long *)regs->cr_iip;
779	bundle_t bundle;
780
781	memcpy(&bundle, kprobe_addr, sizeof(bundle_t));
782
783	return __is_ia64_break_inst(&bundle, slot);
784}
785
786static int __kprobes pre_kprobes_handler(struct die_args *args)
787{
788	struct kprobe *p;
789	int ret = 0;
790	struct pt_regs *regs = args->regs;
791	kprobe_opcode_t *addr = (kprobe_opcode_t *)instruction_pointer(regs);
792	struct kprobe_ctlblk *kcb;
793
794	/*
795	 * We don't want to be preempted for the entire
796	 * duration of kprobe processing
797	 */
798	preempt_disable();
799	kcb = get_kprobe_ctlblk();
800
801	/* Handle recursion cases */
802	if (kprobe_running()) {
803		p = get_kprobe(addr);
804		if (p) {
805			if ((kcb->kprobe_status == KPROBE_HIT_SS) &&
806	 		     (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)) {
807				ia64_psr(regs)->ss = 0;
808				goto no_kprobe;
809			}
810			/* We have reentered the pre_kprobe_handler(), since
811			 * another probe was hit while within the handler.
812			 * We here save the original kprobes variables and
813			 * just single step on the instruction of the new probe
814			 * without calling any user handlers.
815			 */
816			save_previous_kprobe(kcb);
817			set_current_kprobe(p, kcb);
818			kprobes_inc_nmissed_count(p);
819			prepare_ss(p, regs);
820			kcb->kprobe_status = KPROBE_REENTER;
821			return 1;
822		} else if (args->err == __IA64_BREAK_JPROBE) {
823			/*
824			 * jprobe instrumented function just completed
825			 */
826			p = __this_cpu_read(current_kprobe);
827			if (p->break_handler && p->break_handler(p, regs)) {
828				goto ss_probe;
829			}
830		} else if (!is_ia64_break_inst(regs)) {
831			/* The breakpoint instruction was removed by
832			 * another cpu right after we hit, no further
833			 * handling of this interrupt is appropriate
834			 */
835			ret = 1;
836			goto no_kprobe;
837		} else {
838			/* Not our break */
839			goto no_kprobe;
840		}
841	}
842
843	p = get_kprobe(addr);
844	if (!p) {
845		if (!is_ia64_break_inst(regs)) {
846			/*
847			 * The breakpoint instruction was removed right
848			 * after we hit it.  Another cpu has removed
849			 * either a probepoint or a debugger breakpoint
850			 * at this address.  In either case, no further
851			 * handling of this interrupt is appropriate.
852			 */
853			ret = 1;
854
855		}
856
857		/* Not one of our break, let kernel handle it */
858		goto no_kprobe;
859	}
860
861	set_current_kprobe(p, kcb);
862	kcb->kprobe_status = KPROBE_HIT_ACTIVE;
863
864	if (p->pre_handler && p->pre_handler(p, regs))
865		/*
866		 * Our pre-handler is specifically requesting that we just
867		 * do a return.  This is used for both the jprobe pre-handler
868		 * and the kretprobe trampoline
869		 */
870		return 1;
871
872ss_probe:
873#if !defined(CONFIG_PREEMPT)
874	if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) {
875		/* Boost up -- we can execute copied instructions directly */
876		ia64_psr(regs)->ri = p->ainsn.slot;
877		regs->cr_iip = (unsigned long)&p->ainsn.insn->bundle & ~0xFULL;
878		/* turn single stepping off */
879		ia64_psr(regs)->ss = 0;
880
881		reset_current_kprobe();
882		preempt_enable_no_resched();
883		return 1;
884	}
885#endif
886	prepare_ss(p, regs);
887	kcb->kprobe_status = KPROBE_HIT_SS;
888	return 1;
889
890no_kprobe:
891	preempt_enable_no_resched();
892	return ret;
893}
894
895static int __kprobes post_kprobes_handler(struct pt_regs *regs)
896{
897	struct kprobe *cur = kprobe_running();
898	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
899
900	if (!cur)
901		return 0;
902
903	if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
904		kcb->kprobe_status = KPROBE_HIT_SSDONE;
905		cur->post_handler(cur, regs, 0);
906	}
907
908	resume_execution(cur, regs);
909
910	/*Restore back the original saved kprobes variables and continue. */
911	if (kcb->kprobe_status == KPROBE_REENTER) {
912		restore_previous_kprobe(kcb);
913		goto out;
914	}
915	reset_current_kprobe();
916
917out:
918	preempt_enable_no_resched();
919	return 1;
920}
921
922int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
923{
924	struct kprobe *cur = kprobe_running();
925	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
926
927
928	switch(kcb->kprobe_status) {
929	case KPROBE_HIT_SS:
930	case KPROBE_REENTER:
931		/*
932		 * We are here because the instruction being single
933		 * stepped caused a page fault. We reset the current
934		 * kprobe and the instruction pointer points back to
935		 * the probe address and allow the page fault handler
936		 * to continue as a normal page fault.
937		 */
938		regs->cr_iip = ((unsigned long)cur->addr) & ~0xFULL;
939		ia64_psr(regs)->ri = ((unsigned long)cur->addr) & 0xf;
940		if (kcb->kprobe_status == KPROBE_REENTER)
941			restore_previous_kprobe(kcb);
942		else
943			reset_current_kprobe();
944		preempt_enable_no_resched();
945		break;
946	case KPROBE_HIT_ACTIVE:
947	case KPROBE_HIT_SSDONE:
948		/*
949		 * We increment the nmissed count for accounting,
950		 * we can also use npre/npostfault count for accounting
951		 * these specific fault cases.
952		 */
953		kprobes_inc_nmissed_count(cur);
954
955		/*
956		 * We come here because instructions in the pre/post
957		 * handler caused the page_fault, this could happen
958		 * if handler tries to access user space by
959		 * copy_from_user(), get_user() etc. Let the
960		 * user-specified handler try to fix it first.
961		 */
962		if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
963			return 1;
964		/*
965		 * In case the user-specified fault handler returned
966		 * zero, try to fix up.
967		 */
968		if (ia64_done_with_exception(regs))
969			return 1;
970
971		/*
972		 * Let ia64_do_page_fault() fix it.
973		 */
974		break;
975	default:
976		break;
977	}
978
979	return 0;
980}
981
982int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
983				       unsigned long val, void *data)
984{
985	struct die_args *args = (struct die_args *)data;
986	int ret = NOTIFY_DONE;
987
988	if (args->regs && user_mode(args->regs))
989		return ret;
990
991	switch(val) {
992	case DIE_BREAK:
993		/* err is break number from ia64_bad_break() */
994		if ((args->err >> 12) == (__IA64_BREAK_KPROBE >> 12)
995			|| args->err == __IA64_BREAK_JPROBE
996			|| args->err == 0)
997			if (pre_kprobes_handler(args))
998				ret = NOTIFY_STOP;
999		break;
1000	case DIE_FAULT:
1001		/* err is vector number from ia64_fault() */
1002		if (args->err == 36)
1003			if (post_kprobes_handler(args->regs))
1004				ret = NOTIFY_STOP;
1005		break;
1006	default:
1007		break;
1008	}
1009	return ret;
1010}
1011
1012struct param_bsp_cfm {
1013	unsigned long ip;
1014	unsigned long *bsp;
1015	unsigned long cfm;
1016};
1017
1018static void ia64_get_bsp_cfm(struct unw_frame_info *info, void *arg)
1019{
1020	unsigned long ip;
1021	struct param_bsp_cfm *lp = arg;
1022
1023	do {
1024		unw_get_ip(info, &ip);
1025		if (ip == 0)
1026			break;
1027		if (ip == lp->ip) {
1028			unw_get_bsp(info, (unsigned long*)&lp->bsp);
1029			unw_get_cfm(info, (unsigned long*)&lp->cfm);
1030			return;
1031		}
1032	} while (unw_unwind(info) >= 0);
1033	lp->bsp = NULL;
1034	lp->cfm = 0;
1035	return;
1036}
1037
1038unsigned long arch_deref_entry_point(void *entry)
1039{
1040	return ((struct fnptr *)entry)->ip;
1041}
1042
1043int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
1044{
1045	struct jprobe *jp = container_of(p, struct jprobe, kp);
1046	unsigned long addr = arch_deref_entry_point(jp->entry);
1047	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1048	struct param_bsp_cfm pa;
1049	int bytes;
1050
1051	/*
1052	 * Callee owns the argument space and could overwrite it, eg
1053	 * tail call optimization. So to be absolutely safe
1054	 * we save the argument space before transferring the control
1055	 * to instrumented jprobe function which runs in
1056	 * the process context
1057	 */
1058	pa.ip = regs->cr_iip;
1059	unw_init_running(ia64_get_bsp_cfm, &pa);
1060	bytes = (char *)ia64_rse_skip_regs(pa.bsp, pa.cfm & 0x3f)
1061				- (char *)pa.bsp;
1062	memcpy( kcb->jprobes_saved_stacked_regs,
1063		pa.bsp,
1064		bytes );
1065	kcb->bsp = pa.bsp;
1066	kcb->cfm = pa.cfm;
1067
1068	/* save architectural state */
1069	kcb->jprobe_saved_regs = *regs;
1070
1071	/* after rfi, execute the jprobe instrumented function */
1072	regs->cr_iip = addr & ~0xFULL;
1073	ia64_psr(regs)->ri = addr & 0xf;
1074	regs->r1 = ((struct fnptr *)(jp->entry))->gp;
1075
1076	/*
1077	 * fix the return address to our jprobe_inst_return() function
1078	 * in the jprobes.S file
1079	 */
1080	regs->b0 = ((struct fnptr *)(jprobe_inst_return))->ip;
1081
1082	return 1;
1083}
1084
1085/* ia64 does not need this */
1086void __kprobes jprobe_return(void)
1087{
1088}
1089
1090int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
1091{
1092	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1093	int bytes;
1094
1095	/* restoring architectural state */
1096	*regs = kcb->jprobe_saved_regs;
1097
1098	/* restoring the original argument space */
1099	flush_register_stack();
1100	bytes = (char *)ia64_rse_skip_regs(kcb->bsp, kcb->cfm & 0x3f)
1101				- (char *)kcb->bsp;
1102	memcpy( kcb->bsp,
1103		kcb->jprobes_saved_stacked_regs,
1104		bytes );
1105	invalidate_stacked_regs();
1106
1107	preempt_enable_no_resched();
1108	return 1;
1109}
1110
1111static struct kprobe trampoline_p = {
1112	.pre_handler = trampoline_probe_handler
1113};
1114
1115int __init arch_init_kprobes(void)
1116{
1117	trampoline_p.addr =
1118		(kprobe_opcode_t *)((struct fnptr *)kretprobe_trampoline)->ip;
1119	return register_kprobe(&trampoline_p);
1120}
1121
1122int __kprobes arch_trampoline_kprobe(struct kprobe *p)
1123{
1124	if (p->addr ==
1125		(kprobe_opcode_t *)((struct fnptr *)kretprobe_trampoline)->ip)
1126		return 1;
1127
1128	return 0;
1129}
1130