This source file includes following definitions.
- patch_text
- arch_prepare_ss_slot
- arch_prepare_simulate
- arch_simulate_insn
- arch_prepare_kprobe
- alloc_insn_page
- arch_arm_kprobe
- arch_disarm_kprobe
- arch_remove_kprobe
- save_previous_kprobe
- restore_previous_kprobe
- set_current_kprobe
- kprobes_save_local_irqflag
- kprobes_restore_local_irqflag
- set_ss_context
- clear_ss_context
- setup_singlestep
- reenter_kprobe
- post_kprobe_handler
- kprobe_fault_handler
- kprobe_handler
- kprobe_ss_hit
- kprobe_single_step_handler
- kprobe_breakpoint_handler
- arch_populate_kprobe_blacklist
- trampoline_probe_handler
- arch_prepare_kretprobe
- arch_trampoline_kprobe
- arch_init_kprobes
1
2
3
4
5
6
7
8
9
10 #include <linux/kasan.h>
11 #include <linux/kernel.h>
12 #include <linux/kprobes.h>
13 #include <linux/extable.h>
14 #include <linux/slab.h>
15 #include <linux/stop_machine.h>
16 #include <linux/sched/debug.h>
17 #include <linux/set_memory.h>
18 #include <linux/stringify.h>
19 #include <linux/vmalloc.h>
20 #include <asm/traps.h>
21 #include <asm/ptrace.h>
22 #include <asm/cacheflush.h>
23 #include <asm/debug-monitors.h>
24 #include <asm/daifflags.h>
25 #include <asm/system_misc.h>
26 #include <asm/insn.h>
27 #include <linux/uaccess.h>
28 #include <asm/irq.h>
29 #include <asm/sections.h>
30
31 #include "decode-insn.h"
32
33 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
34 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
35
36 static void __kprobes
37 post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
38
39 static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)
40 {
41 void *addrs[1];
42 u32 insns[1];
43
44 addrs[0] = addr;
45 insns[0] = opcode;
46
47 return aarch64_insn_patch_text(addrs, insns, 1);
48 }
49
50 static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
51 {
52
53 patch_text(p->ainsn.api.insn, p->opcode);
54
55 flush_icache_range((uintptr_t) (p->ainsn.api.insn),
56 (uintptr_t) (p->ainsn.api.insn) +
57 MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
58
59
60
61
62 p->ainsn.api.restore = (unsigned long) p->addr +
63 sizeof(kprobe_opcode_t);
64 }
65
66 static void __kprobes arch_prepare_simulate(struct kprobe *p)
67 {
68
69 p->ainsn.api.restore = 0;
70 }
71
72 static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
73 {
74 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
75
76 if (p->ainsn.api.handler)
77 p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs);
78
79
80 post_kprobe_handler(kcb, regs);
81 }
82
83 int __kprobes arch_prepare_kprobe(struct kprobe *p)
84 {
85 unsigned long probe_addr = (unsigned long)p->addr;
86
87 if (probe_addr & 0x3)
88 return -EINVAL;
89
90
91 p->opcode = le32_to_cpu(*p->addr);
92
93 if (search_exception_tables(probe_addr))
94 return -EINVAL;
95
96
97 switch (arm_kprobe_decode_insn(p->addr, &p->ainsn)) {
98 case INSN_REJECTED:
99 return -EINVAL;
100
101 case INSN_GOOD_NO_SLOT:
102 p->ainsn.api.insn = NULL;
103 break;
104
105 case INSN_GOOD:
106 p->ainsn.api.insn = get_insn_slot();
107 if (!p->ainsn.api.insn)
108 return -ENOMEM;
109 break;
110 }
111
112
113 if (p->ainsn.api.insn)
114 arch_prepare_ss_slot(p);
115 else
116 arch_prepare_simulate(p);
117
118 return 0;
119 }
120
121 void *alloc_insn_page(void)
122 {
123 void *page;
124
125 page = vmalloc_exec(PAGE_SIZE);
126 if (page) {
127 set_memory_ro((unsigned long)page, 1);
128 set_vm_flush_reset_perms(page);
129 }
130
131 return page;
132 }
133
134
135 void __kprobes arch_arm_kprobe(struct kprobe *p)
136 {
137 patch_text(p->addr, BRK64_OPCODE_KPROBES);
138 }
139
140
141 void __kprobes arch_disarm_kprobe(struct kprobe *p)
142 {
143 patch_text(p->addr, p->opcode);
144 }
145
146 void __kprobes arch_remove_kprobe(struct kprobe *p)
147 {
148 if (p->ainsn.api.insn) {
149 free_insn_slot(p->ainsn.api.insn, 0);
150 p->ainsn.api.insn = NULL;
151 }
152 }
153
154 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
155 {
156 kcb->prev_kprobe.kp = kprobe_running();
157 kcb->prev_kprobe.status = kcb->kprobe_status;
158 }
159
160 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
161 {
162 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
163 kcb->kprobe_status = kcb->prev_kprobe.status;
164 }
165
166 static void __kprobes set_current_kprobe(struct kprobe *p)
167 {
168 __this_cpu_write(current_kprobe, p);
169 }
170
171
172
173
174
175
176
177
178
179 static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
180 struct pt_regs *regs)
181 {
182 kcb->saved_irqflag = regs->pstate & DAIF_MASK;
183 regs->pstate |= PSR_I_BIT;
184
185 regs->pstate &= ~PSR_D_BIT;
186 }
187
188 static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
189 struct pt_regs *regs)
190 {
191 regs->pstate &= ~DAIF_MASK;
192 regs->pstate |= kcb->saved_irqflag;
193 }
194
195 static void __kprobes
196 set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr)
197 {
198 kcb->ss_ctx.ss_pending = true;
199 kcb->ss_ctx.match_addr = addr + sizeof(kprobe_opcode_t);
200 }
201
202 static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb)
203 {
204 kcb->ss_ctx.ss_pending = false;
205 kcb->ss_ctx.match_addr = 0;
206 }
207
208 static void __kprobes setup_singlestep(struct kprobe *p,
209 struct pt_regs *regs,
210 struct kprobe_ctlblk *kcb, int reenter)
211 {
212 unsigned long slot;
213
214 if (reenter) {
215 save_previous_kprobe(kcb);
216 set_current_kprobe(p);
217 kcb->kprobe_status = KPROBE_REENTER;
218 } else {
219 kcb->kprobe_status = KPROBE_HIT_SS;
220 }
221
222
223 if (p->ainsn.api.insn) {
224
225 slot = (unsigned long)p->ainsn.api.insn;
226
227 set_ss_context(kcb, slot);
228
229
230 kprobes_save_local_irqflag(kcb, regs);
231 kernel_enable_single_step(regs);
232 instruction_pointer_set(regs, slot);
233 } else {
234
235 arch_simulate_insn(p, regs);
236 }
237 }
238
239 static int __kprobes reenter_kprobe(struct kprobe *p,
240 struct pt_regs *regs,
241 struct kprobe_ctlblk *kcb)
242 {
243 switch (kcb->kprobe_status) {
244 case KPROBE_HIT_SSDONE:
245 case KPROBE_HIT_ACTIVE:
246 kprobes_inc_nmissed_count(p);
247 setup_singlestep(p, regs, kcb, 1);
248 break;
249 case KPROBE_HIT_SS:
250 case KPROBE_REENTER:
251 pr_warn("Unrecoverable kprobe detected.\n");
252 dump_kprobe(p);
253 BUG();
254 break;
255 default:
256 WARN_ON(1);
257 return 0;
258 }
259
260 return 1;
261 }
262
263 static void __kprobes
264 post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
265 {
266 struct kprobe *cur = kprobe_running();
267
268 if (!cur)
269 return;
270
271
272 if (cur->ainsn.api.restore != 0)
273 instruction_pointer_set(regs, cur->ainsn.api.restore);
274
275
276 if (kcb->kprobe_status == KPROBE_REENTER) {
277 restore_previous_kprobe(kcb);
278 return;
279 }
280
281 kcb->kprobe_status = KPROBE_HIT_SSDONE;
282 if (cur->post_handler) {
283
284
285
286 cur->post_handler(cur, regs, 0);
287 }
288
289 reset_current_kprobe();
290 }
291
292 int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
293 {
294 struct kprobe *cur = kprobe_running();
295 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
296
297 switch (kcb->kprobe_status) {
298 case KPROBE_HIT_SS:
299 case KPROBE_REENTER:
300
301
302
303
304
305
306
307 instruction_pointer_set(regs, (unsigned long) cur->addr);
308 if (!instruction_pointer(regs))
309 BUG();
310
311 kernel_disable_single_step();
312
313 if (kcb->kprobe_status == KPROBE_REENTER)
314 restore_previous_kprobe(kcb);
315 else
316 reset_current_kprobe();
317
318 break;
319 case KPROBE_HIT_ACTIVE:
320 case KPROBE_HIT_SSDONE:
321
322
323
324
325
326 kprobes_inc_nmissed_count(cur);
327
328
329
330
331
332
333
334
335 if (cur->fault_handler && cur->fault_handler(cur, regs, fsr))
336 return 1;
337
338
339
340
341
342 if (fixup_exception(regs))
343 return 1;
344 }
345 return 0;
346 }
347
348 static void __kprobes kprobe_handler(struct pt_regs *regs)
349 {
350 struct kprobe *p, *cur_kprobe;
351 struct kprobe_ctlblk *kcb;
352 unsigned long addr = instruction_pointer(regs);
353
354 kcb = get_kprobe_ctlblk();
355 cur_kprobe = kprobe_running();
356
357 p = get_kprobe((kprobe_opcode_t *) addr);
358
359 if (p) {
360 if (cur_kprobe) {
361 if (reenter_kprobe(p, regs, kcb))
362 return;
363 } else {
364
365 set_current_kprobe(p);
366 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
367
368
369
370
371
372
373
374
375
376
377
378
379 if (!p->pre_handler || !p->pre_handler(p, regs)) {
380 setup_singlestep(p, regs, kcb, 0);
381 } else
382 reset_current_kprobe();
383 }
384 }
385
386
387
388
389
390
391
392
393 }
394
395 static int __kprobes
396 kprobe_ss_hit(struct kprobe_ctlblk *kcb, unsigned long addr)
397 {
398 if ((kcb->ss_ctx.ss_pending)
399 && (kcb->ss_ctx.match_addr == addr)) {
400 clear_ss_context(kcb);
401 return DBG_HOOK_HANDLED;
402 }
403
404 return DBG_HOOK_ERROR;
405 }
406
407 static int __kprobes
408 kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
409 {
410 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
411 int retval;
412
413
414 retval = kprobe_ss_hit(kcb, instruction_pointer(regs));
415
416 if (retval == DBG_HOOK_HANDLED) {
417 kprobes_restore_local_irqflag(kcb, regs);
418 kernel_disable_single_step();
419
420 post_kprobe_handler(kcb, regs);
421 }
422
423 return retval;
424 }
425
426 static struct step_hook kprobes_step_hook = {
427 .fn = kprobe_single_step_handler,
428 };
429
430 static int __kprobes
431 kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr)
432 {
433 kprobe_handler(regs);
434 return DBG_HOOK_HANDLED;
435 }
436
437 static struct break_hook kprobes_break_hook = {
438 .imm = KPROBES_BRK_IMM,
439 .fn = kprobe_breakpoint_handler,
440 };
441
442
443
444
445
446 int __init arch_populate_kprobe_blacklist(void)
447 {
448 int ret;
449
450 ret = kprobe_add_area_blacklist((unsigned long)__entry_text_start,
451 (unsigned long)__entry_text_end);
452 if (ret)
453 return ret;
454 ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
455 (unsigned long)__irqentry_text_end);
456 if (ret)
457 return ret;
458 ret = kprobe_add_area_blacklist((unsigned long)__exception_text_start,
459 (unsigned long)__exception_text_end);
460 if (ret)
461 return ret;
462 ret = kprobe_add_area_blacklist((unsigned long)__idmap_text_start,
463 (unsigned long)__idmap_text_end);
464 if (ret)
465 return ret;
466 ret = kprobe_add_area_blacklist((unsigned long)__hyp_text_start,
467 (unsigned long)__hyp_text_end);
468 if (ret || is_kernel_in_hyp_mode())
469 return ret;
470 ret = kprobe_add_area_blacklist((unsigned long)__hyp_idmap_text_start,
471 (unsigned long)__hyp_idmap_text_end);
472 return ret;
473 }
474
475 void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
476 {
477 struct kretprobe_instance *ri = NULL;
478 struct hlist_head *head, empty_rp;
479 struct hlist_node *tmp;
480 unsigned long flags, orig_ret_address = 0;
481 unsigned long trampoline_address =
482 (unsigned long)&kretprobe_trampoline;
483 kprobe_opcode_t *correct_ret_addr = NULL;
484
485 INIT_HLIST_HEAD(&empty_rp);
486 kretprobe_hash_lock(current, &head, &flags);
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
502 if (ri->task != current)
503
504 continue;
505
506 orig_ret_address = (unsigned long)ri->ret_addr;
507
508 if (orig_ret_address != trampoline_address)
509
510
511
512
513
514 break;
515 }
516
517 kretprobe_assert(ri, orig_ret_address, trampoline_address);
518
519 correct_ret_addr = ri->ret_addr;
520 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
521 if (ri->task != current)
522
523 continue;
524
525 orig_ret_address = (unsigned long)ri->ret_addr;
526 if (ri->rp && ri->rp->handler) {
527 __this_cpu_write(current_kprobe, &ri->rp->kp);
528 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
529 ri->ret_addr = correct_ret_addr;
530 ri->rp->handler(ri, regs);
531 __this_cpu_write(current_kprobe, NULL);
532 }
533
534 recycle_rp_inst(ri, &empty_rp);
535
536 if (orig_ret_address != trampoline_address)
537
538
539
540
541
542 break;
543 }
544
545 kretprobe_hash_unlock(current, &flags);
546
547 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
548 hlist_del(&ri->hlist);
549 kfree(ri);
550 }
551 return (void *)orig_ret_address;
552 }
553
554 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
555 struct pt_regs *regs)
556 {
557 ri->ret_addr = (kprobe_opcode_t *)regs->regs[30];
558
559
560 regs->regs[30] = (long)&kretprobe_trampoline;
561 }
562
563 int __kprobes arch_trampoline_kprobe(struct kprobe *p)
564 {
565 return 0;
566 }
567
568 int __init arch_init_kprobes(void)
569 {
570 register_kernel_break_hook(&kprobes_break_hook);
571 register_kernel_step_hook(&kprobes_step_hook);
572
573 return 0;
574 }