This source file includes following definitions.
- kmmio_page_list
- get_kmmio_probe
- get_kmmio_fault_page
- clear_pmd_presence
- clear_pte_presence
- clear_page_presence
- arm_kmmio_fault_page
- disarm_kmmio_fault_page
- kmmio_handler
- post_kmmio_handler
- add_kmmio_fault_page
- release_kmmio_fault_page
- register_kmmio_probe
- rcu_free_kmmio_fault_pages
- remove_kmmio_fault_pages
- unregister_kmmio_probe
- kmmio_die_notifier
- kmmio_init
- kmmio_cleanup
1
2
3
4
5
6
7
8
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11 #include <linux/list.h>
12 #include <linux/rculist.h>
13 #include <linux/spinlock.h>
14 #include <linux/hash.h>
15 #include <linux/export.h>
16 #include <linux/kernel.h>
17 #include <linux/uaccess.h>
18 #include <linux/ptrace.h>
19 #include <linux/preempt.h>
20 #include <linux/percpu.h>
21 #include <linux/kdebug.h>
22 #include <linux/mutex.h>
23 #include <linux/io.h>
24 #include <linux/slab.h>
25 #include <asm/cacheflush.h>
26 #include <asm/tlbflush.h>
27 #include <linux/errno.h>
28 #include <asm/debugreg.h>
29 #include <linux/mmiotrace.h>
30
31 #define KMMIO_PAGE_HASH_BITS 4
32 #define KMMIO_PAGE_TABLE_SIZE (1 << KMMIO_PAGE_HASH_BITS)
33
34 struct kmmio_fault_page {
35 struct list_head list;
36 struct kmmio_fault_page *release_next;
37 unsigned long addr;
38 pteval_t old_presence;
39 bool armed;
40
41
42
43
44
45
46
47 int count;
48
49 bool scheduled_for_release;
50 };
51
52 struct kmmio_delayed_release {
53 struct rcu_head rcu;
54 struct kmmio_fault_page *release_list;
55 };
56
57 struct kmmio_context {
58 struct kmmio_fault_page *fpage;
59 struct kmmio_probe *probe;
60 unsigned long saved_flags;
61 unsigned long addr;
62 int active;
63 };
64
65 static DEFINE_SPINLOCK(kmmio_lock);
66
67
68 unsigned int kmmio_count;
69
70
71 static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
72 static LIST_HEAD(kmmio_probes);
73
74 static struct list_head *kmmio_page_list(unsigned long addr)
75 {
76 unsigned int l;
77 pte_t *pte = lookup_address(addr, &l);
78
79 if (!pte)
80 return NULL;
81 addr &= page_level_mask(l);
82
83 return &kmmio_page_table[hash_long(addr, KMMIO_PAGE_HASH_BITS)];
84 }
85
86
87 static DEFINE_PER_CPU(struct kmmio_context, kmmio_ctx);
88
89
90
91
92
93
94
95
96
97
98 static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
99 {
100 struct kmmio_probe *p;
101 list_for_each_entry_rcu(p, &kmmio_probes, list) {
102 if (addr >= p->addr && addr < (p->addr + p->len))
103 return p;
104 }
105 return NULL;
106 }
107
108
109 static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr)
110 {
111 struct list_head *head;
112 struct kmmio_fault_page *f;
113 unsigned int l;
114 pte_t *pte = lookup_address(addr, &l);
115
116 if (!pte)
117 return NULL;
118 addr &= page_level_mask(l);
119 head = kmmio_page_list(addr);
120 list_for_each_entry_rcu(f, head, list) {
121 if (f->addr == addr)
122 return f;
123 }
124 return NULL;
125 }
126
127 static void clear_pmd_presence(pmd_t *pmd, bool clear, pmdval_t *old)
128 {
129 pmd_t new_pmd;
130 pmdval_t v = pmd_val(*pmd);
131 if (clear) {
132 *old = v;
133 new_pmd = pmd_mknotpresent(*pmd);
134 } else {
135
136 new_pmd = __pmd(*old);
137 }
138 set_pmd(pmd, new_pmd);
139 }
140
141 static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
142 {
143 pteval_t v = pte_val(*pte);
144 if (clear) {
145 *old = v;
146
147 pte_clear(&init_mm, 0, pte);
148 } else {
149
150 set_pte_atomic(pte, __pte(*old));
151 }
152 }
153
154 static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
155 {
156 unsigned int level;
157 pte_t *pte = lookup_address(f->addr, &level);
158
159 if (!pte) {
160 pr_err("no pte for addr 0x%08lx\n", f->addr);
161 return -1;
162 }
163
164 switch (level) {
165 case PG_LEVEL_2M:
166 clear_pmd_presence((pmd_t *)pte, clear, &f->old_presence);
167 break;
168 case PG_LEVEL_4K:
169 clear_pte_presence(pte, clear, &f->old_presence);
170 break;
171 default:
172 pr_err("unexpected page level 0x%x.\n", level);
173 return -1;
174 }
175
176 __flush_tlb_one_kernel(f->addr);
177 return 0;
178 }
179
180
181
182
183
184
185
186
187
188
189
190
191 static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
192 {
193 int ret;
194 WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n"));
195 if (f->armed) {
196 pr_warning("double-arm: addr 0x%08lx, ref %d, old %d\n",
197 f->addr, f->count, !!f->old_presence);
198 }
199 ret = clear_page_presence(f, true);
200 WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming at 0x%08lx failed.\n"),
201 f->addr);
202 f->armed = true;
203 return ret;
204 }
205
206
207 static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
208 {
209 int ret = clear_page_presence(f, false);
210 WARN_ONCE(ret < 0,
211 KERN_ERR "kmmio disarming at 0x%08lx failed.\n", f->addr);
212 f->armed = false;
213 }
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230 int kmmio_handler(struct pt_regs *regs, unsigned long addr)
231 {
232 struct kmmio_context *ctx;
233 struct kmmio_fault_page *faultpage;
234 int ret = 0;
235 unsigned long page_base = addr;
236 unsigned int l;
237 pte_t *pte = lookup_address(addr, &l);
238 if (!pte)
239 return -EINVAL;
240 page_base &= page_level_mask(l);
241
242
243
244
245
246
247
248
249
250 preempt_disable();
251 rcu_read_lock();
252
253 faultpage = get_kmmio_fault_page(page_base);
254 if (!faultpage) {
255
256
257
258
259
260 goto no_kmmio;
261 }
262
263 ctx = &get_cpu_var(kmmio_ctx);
264 if (ctx->active) {
265 if (page_base == ctx->addr) {
266
267
268
269
270
271 pr_debug("secondary hit for 0x%08lx CPU %d.\n",
272 addr, smp_processor_id());
273
274 if (!faultpage->old_presence)
275 pr_info("unexpected secondary hit for address 0x%08lx on CPU %d.\n",
276 addr, smp_processor_id());
277 } else {
278
279
280
281
282
283 pr_emerg("recursive probe hit on CPU %d, for address 0x%08lx. Ignoring.\n",
284 smp_processor_id(), addr);
285 pr_emerg("previous hit was at 0x%08lx.\n", ctx->addr);
286 disarm_kmmio_fault_page(faultpage);
287 }
288 goto no_kmmio_ctx;
289 }
290 ctx->active++;
291
292 ctx->fpage = faultpage;
293 ctx->probe = get_kmmio_probe(page_base);
294 ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
295 ctx->addr = page_base;
296
297 if (ctx->probe && ctx->probe->pre_handler)
298 ctx->probe->pre_handler(ctx->probe, regs, addr);
299
300
301
302
303
304 regs->flags |= X86_EFLAGS_TF;
305 regs->flags &= ~X86_EFLAGS_IF;
306
307
308 disarm_kmmio_fault_page(ctx->fpage);
309
310
311
312
313
314
315
316
317 put_cpu_var(kmmio_ctx);
318 return 1;
319
320 no_kmmio_ctx:
321 put_cpu_var(kmmio_ctx);
322 no_kmmio:
323 rcu_read_unlock();
324 preempt_enable_no_resched();
325 return ret;
326 }
327
328
329
330
331
332
333 static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
334 {
335 int ret = 0;
336 struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx);
337
338 if (!ctx->active) {
339
340
341
342
343
344 pr_warning("unexpected debug trap on CPU %d.\n",
345 smp_processor_id());
346 goto out;
347 }
348
349 if (ctx->probe && ctx->probe->post_handler)
350 ctx->probe->post_handler(ctx->probe, condition, regs);
351
352
353 spin_lock(&kmmio_lock);
354 if (ctx->fpage->count)
355 arm_kmmio_fault_page(ctx->fpage);
356 spin_unlock(&kmmio_lock);
357
358 regs->flags &= ~X86_EFLAGS_TF;
359 regs->flags |= ctx->saved_flags;
360
361
362 ctx->active--;
363 BUG_ON(ctx->active);
364 rcu_read_unlock();
365 preempt_enable_no_resched();
366
367
368
369
370
371
372 if (!(regs->flags & X86_EFLAGS_TF))
373 ret = 1;
374 out:
375 put_cpu_var(kmmio_ctx);
376 return ret;
377 }
378
379
380 static int add_kmmio_fault_page(unsigned long addr)
381 {
382 struct kmmio_fault_page *f;
383
384 f = get_kmmio_fault_page(addr);
385 if (f) {
386 if (!f->count)
387 arm_kmmio_fault_page(f);
388 f->count++;
389 return 0;
390 }
391
392 f = kzalloc(sizeof(*f), GFP_ATOMIC);
393 if (!f)
394 return -1;
395
396 f->count = 1;
397 f->addr = addr;
398
399 if (arm_kmmio_fault_page(f)) {
400 kfree(f);
401 return -1;
402 }
403
404 list_add_rcu(&f->list, kmmio_page_list(f->addr));
405
406 return 0;
407 }
408
409
410 static void release_kmmio_fault_page(unsigned long addr,
411 struct kmmio_fault_page **release_list)
412 {
413 struct kmmio_fault_page *f;
414
415 f = get_kmmio_fault_page(addr);
416 if (!f)
417 return;
418
419 f->count--;
420 BUG_ON(f->count < 0);
421 if (!f->count) {
422 disarm_kmmio_fault_page(f);
423 if (!f->scheduled_for_release) {
424 f->release_next = *release_list;
425 *release_list = f;
426 f->scheduled_for_release = true;
427 }
428 }
429 }
430
431
432
433
434
435
436
437
438 int register_kmmio_probe(struct kmmio_probe *p)
439 {
440 unsigned long flags;
441 int ret = 0;
442 unsigned long size = 0;
443 unsigned long addr = p->addr & PAGE_MASK;
444 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
445 unsigned int l;
446 pte_t *pte;
447
448 spin_lock_irqsave(&kmmio_lock, flags);
449 if (get_kmmio_probe(addr)) {
450 ret = -EEXIST;
451 goto out;
452 }
453
454 pte = lookup_address(addr, &l);
455 if (!pte) {
456 ret = -EINVAL;
457 goto out;
458 }
459
460 kmmio_count++;
461 list_add_rcu(&p->list, &kmmio_probes);
462 while (size < size_lim) {
463 if (add_kmmio_fault_page(addr + size))
464 pr_err("Unable to set page fault.\n");
465 size += page_level_size(l);
466 }
467 out:
468 spin_unlock_irqrestore(&kmmio_lock, flags);
469
470
471
472
473
474 return ret;
475 }
476 EXPORT_SYMBOL(register_kmmio_probe);
477
478 static void rcu_free_kmmio_fault_pages(struct rcu_head *head)
479 {
480 struct kmmio_delayed_release *dr = container_of(
481 head,
482 struct kmmio_delayed_release,
483 rcu);
484 struct kmmio_fault_page *f = dr->release_list;
485 while (f) {
486 struct kmmio_fault_page *next = f->release_next;
487 BUG_ON(f->count);
488 kfree(f);
489 f = next;
490 }
491 kfree(dr);
492 }
493
494 static void remove_kmmio_fault_pages(struct rcu_head *head)
495 {
496 struct kmmio_delayed_release *dr =
497 container_of(head, struct kmmio_delayed_release, rcu);
498 struct kmmio_fault_page *f = dr->release_list;
499 struct kmmio_fault_page **prevp = &dr->release_list;
500 unsigned long flags;
501
502 spin_lock_irqsave(&kmmio_lock, flags);
503 while (f) {
504 if (!f->count) {
505 list_del_rcu(&f->list);
506 prevp = &f->release_next;
507 } else {
508 *prevp = f->release_next;
509 f->release_next = NULL;
510 f->scheduled_for_release = false;
511 }
512 f = *prevp;
513 }
514 spin_unlock_irqrestore(&kmmio_lock, flags);
515
516
517 call_rcu(&dr->rcu, rcu_free_kmmio_fault_pages);
518 }
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533 void unregister_kmmio_probe(struct kmmio_probe *p)
534 {
535 unsigned long flags;
536 unsigned long size = 0;
537 unsigned long addr = p->addr & PAGE_MASK;
538 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
539 struct kmmio_fault_page *release_list = NULL;
540 struct kmmio_delayed_release *drelease;
541 unsigned int l;
542 pte_t *pte;
543
544 pte = lookup_address(addr, &l);
545 if (!pte)
546 return;
547
548 spin_lock_irqsave(&kmmio_lock, flags);
549 while (size < size_lim) {
550 release_kmmio_fault_page(addr + size, &release_list);
551 size += page_level_size(l);
552 }
553 list_del_rcu(&p->list);
554 kmmio_count--;
555 spin_unlock_irqrestore(&kmmio_lock, flags);
556
557 if (!release_list)
558 return;
559
560 drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC);
561 if (!drelease) {
562 pr_crit("leaking kmmio_fault_page objects.\n");
563 return;
564 }
565 drelease->release_list = release_list;
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581 call_rcu(&drelease->rcu, remove_kmmio_fault_pages);
582 }
583 EXPORT_SYMBOL(unregister_kmmio_probe);
584
585 static int
586 kmmio_die_notifier(struct notifier_block *nb, unsigned long val, void *args)
587 {
588 struct die_args *arg = args;
589 unsigned long* dr6_p = (unsigned long *)ERR_PTR(arg->err);
590
591 if (val == DIE_DEBUG && (*dr6_p & DR_STEP))
592 if (post_kmmio_handler(*dr6_p, arg->regs) == 1) {
593
594
595
596
597 *dr6_p &= ~DR_STEP;
598 return NOTIFY_STOP;
599 }
600
601 return NOTIFY_DONE;
602 }
603
604 static struct notifier_block nb_die = {
605 .notifier_call = kmmio_die_notifier
606 };
607
608 int kmmio_init(void)
609 {
610 int i;
611
612 for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++)
613 INIT_LIST_HEAD(&kmmio_page_table[i]);
614
615 return register_die_notifier(&nb_die);
616 }
617
618 void kmmio_cleanup(void)
619 {
620 int i;
621
622 unregister_die_notifier(&nb_die);
623 for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++) {
624 WARN_ONCE(!list_empty(&kmmio_page_table[i]),
625 KERN_ERR "kmmio_page_table not empty at cleanup, any further tracing will leak memory.\n");
626 }
627 }