Lines Matching refs:uprobe
65 struct uprobe { struct
361 static struct uprobe *get_uprobe(struct uprobe *uprobe) in get_uprobe() argument
363 atomic_inc(&uprobe->ref); in get_uprobe()
364 return uprobe; in get_uprobe()
367 static void put_uprobe(struct uprobe *uprobe) in put_uprobe() argument
369 if (atomic_dec_and_test(&uprobe->ref)) in put_uprobe()
370 kfree(uprobe); in put_uprobe()
373 static int match_uprobe(struct uprobe *l, struct uprobe *r) in match_uprobe()
390 static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset) in __find_uprobe()
392 struct uprobe u = { .inode = inode, .offset = offset }; in __find_uprobe()
394 struct uprobe *uprobe; in __find_uprobe() local
398 uprobe = rb_entry(n, struct uprobe, rb_node); in __find_uprobe()
399 match = match_uprobe(&u, uprobe); in __find_uprobe()
401 return get_uprobe(uprobe); in __find_uprobe()
415 static struct uprobe *find_uprobe(struct inode *inode, loff_t offset) in find_uprobe()
417 struct uprobe *uprobe; in find_uprobe() local
420 uprobe = __find_uprobe(inode, offset); in find_uprobe()
423 return uprobe; in find_uprobe()
426 static struct uprobe *__insert_uprobe(struct uprobe *uprobe) in __insert_uprobe() argument
430 struct uprobe *u; in __insert_uprobe()
435 u = rb_entry(parent, struct uprobe, rb_node); in __insert_uprobe()
436 match = match_uprobe(uprobe, u); in __insert_uprobe()
448 rb_link_node(&uprobe->rb_node, parent, p); in __insert_uprobe()
449 rb_insert_color(&uprobe->rb_node, &uprobes_tree); in __insert_uprobe()
451 atomic_set(&uprobe->ref, 2); in __insert_uprobe()
464 static struct uprobe *insert_uprobe(struct uprobe *uprobe) in insert_uprobe() argument
466 struct uprobe *u; in insert_uprobe()
469 u = __insert_uprobe(uprobe); in insert_uprobe()
475 static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset) in alloc_uprobe()
477 struct uprobe *uprobe, *cur_uprobe; in alloc_uprobe() local
479 uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL); in alloc_uprobe()
480 if (!uprobe) in alloc_uprobe()
483 uprobe->inode = igrab(inode); in alloc_uprobe()
484 uprobe->offset = offset; in alloc_uprobe()
485 init_rwsem(&uprobe->register_rwsem); in alloc_uprobe()
486 init_rwsem(&uprobe->consumer_rwsem); in alloc_uprobe()
489 cur_uprobe = insert_uprobe(uprobe); in alloc_uprobe()
492 kfree(uprobe); in alloc_uprobe()
493 uprobe = cur_uprobe; in alloc_uprobe()
497 return uprobe; in alloc_uprobe()
500 static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc) in consumer_add() argument
502 down_write(&uprobe->consumer_rwsem); in consumer_add()
503 uc->next = uprobe->consumers; in consumer_add()
504 uprobe->consumers = uc; in consumer_add()
505 up_write(&uprobe->consumer_rwsem); in consumer_add()
513 static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc) in consumer_del() argument
518 down_write(&uprobe->consumer_rwsem); in consumer_del()
519 for (con = &uprobe->consumers; *con; con = &(*con)->next) { in consumer_del()
526 up_write(&uprobe->consumer_rwsem); in consumer_del()
553 static int copy_insn(struct uprobe *uprobe, struct file *filp) in copy_insn() argument
555 struct address_space *mapping = uprobe->inode->i_mapping; in copy_insn()
556 loff_t offs = uprobe->offset; in copy_insn()
557 void *insn = &uprobe->arch.insn; in copy_insn()
558 int size = sizeof(uprobe->arch.insn); in copy_insn()
563 if (offs >= i_size_read(uprobe->inode)) in copy_insn()
579 static int prepare_uprobe(struct uprobe *uprobe, struct file *file, in prepare_uprobe() argument
584 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) in prepare_uprobe()
588 down_write(&uprobe->consumer_rwsem); in prepare_uprobe()
589 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags)) in prepare_uprobe()
592 ret = copy_insn(uprobe, file); in prepare_uprobe()
597 if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn)) in prepare_uprobe()
600 ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr); in prepare_uprobe()
605 BUG_ON((uprobe->offset & ~PAGE_MASK) + in prepare_uprobe()
609 set_bit(UPROBE_COPY_INSN, &uprobe->flags); in prepare_uprobe()
612 up_write(&uprobe->consumer_rwsem); in prepare_uprobe()
623 static bool filter_chain(struct uprobe *uprobe, in filter_chain() argument
629 down_read(&uprobe->consumer_rwsem); in filter_chain()
630 for (uc = uprobe->consumers; uc; uc = uc->next) { in filter_chain()
635 up_read(&uprobe->consumer_rwsem); in filter_chain()
641 install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, in install_breakpoint() argument
647 ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr); in install_breakpoint()
659 ret = set_swbp(&uprobe->arch, mm, vaddr); in install_breakpoint()
669 remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr) in remove_breakpoint() argument
672 return set_orig_insn(&uprobe->arch, mm, vaddr); in remove_breakpoint()
675 static inline bool uprobe_is_active(struct uprobe *uprobe) in uprobe_is_active() argument
677 return !RB_EMPTY_NODE(&uprobe->rb_node); in uprobe_is_active()
684 static void delete_uprobe(struct uprobe *uprobe) in delete_uprobe() argument
686 if (WARN_ON(!uprobe_is_active(uprobe))) in delete_uprobe()
690 rb_erase(&uprobe->rb_node, &uprobes_tree); in delete_uprobe()
692 RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */ in delete_uprobe()
693 iput(uprobe->inode); in delete_uprobe()
694 put_uprobe(uprobe); in delete_uprobe()
781 register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new) in register_for_each_vma() argument
788 info = build_map_info(uprobe->inode->i_mapping, in register_for_each_vma()
789 uprobe->offset, is_register); in register_for_each_vma()
805 file_inode(vma->vm_file) != uprobe->inode) in register_for_each_vma()
809 vaddr_to_offset(vma, info->vaddr) != uprobe->offset) in register_for_each_vma()
816 err = install_breakpoint(uprobe, mm, vma, info->vaddr); in register_for_each_vma()
818 if (!filter_chain(uprobe, in register_for_each_vma()
820 err |= remove_breakpoint(uprobe, mm, info->vaddr); in register_for_each_vma()
834 static int __uprobe_register(struct uprobe *uprobe, struct uprobe_consumer *uc) in __uprobe_register() argument
836 consumer_add(uprobe, uc); in __uprobe_register()
837 return register_for_each_vma(uprobe, uc); in __uprobe_register()
840 static void __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc) in __uprobe_unregister() argument
844 if (WARN_ON(!consumer_del(uprobe, uc))) in __uprobe_unregister()
847 err = register_for_each_vma(uprobe, NULL); in __uprobe_unregister()
849 if (!uprobe->consumers && !err) in __uprobe_unregister()
850 delete_uprobe(uprobe); in __uprobe_unregister()
872 struct uprobe *uprobe; in uprobe_register() local
887 uprobe = alloc_uprobe(inode, offset); in uprobe_register()
888 if (!uprobe) in uprobe_register()
894 down_write(&uprobe->register_rwsem); in uprobe_register()
896 if (likely(uprobe_is_active(uprobe))) { in uprobe_register()
897 ret = __uprobe_register(uprobe, uc); in uprobe_register()
899 __uprobe_unregister(uprobe, uc); in uprobe_register()
901 up_write(&uprobe->register_rwsem); in uprobe_register()
902 put_uprobe(uprobe); in uprobe_register()
920 struct uprobe *uprobe; in uprobe_apply() local
924 uprobe = find_uprobe(inode, offset); in uprobe_apply()
925 if (WARN_ON(!uprobe)) in uprobe_apply()
928 down_write(&uprobe->register_rwsem); in uprobe_apply()
929 for (con = uprobe->consumers; con && con != uc ; con = con->next) in uprobe_apply()
932 ret = register_for_each_vma(uprobe, add ? uc : NULL); in uprobe_apply()
933 up_write(&uprobe->register_rwsem); in uprobe_apply()
934 put_uprobe(uprobe); in uprobe_apply()
947 struct uprobe *uprobe; in uprobe_unregister() local
949 uprobe = find_uprobe(inode, offset); in uprobe_unregister()
950 if (WARN_ON(!uprobe)) in uprobe_unregister()
953 down_write(&uprobe->register_rwsem); in uprobe_unregister()
954 __uprobe_unregister(uprobe, uc); in uprobe_unregister()
955 up_write(&uprobe->register_rwsem); in uprobe_unregister()
956 put_uprobe(uprobe); in uprobe_unregister()
960 static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm) in unapply_uprobe() argument
971 file_inode(vma->vm_file) != uprobe->inode) in unapply_uprobe()
975 if (uprobe->offset < offset || in unapply_uprobe()
976 uprobe->offset >= offset + vma->vm_end - vma->vm_start) in unapply_uprobe()
979 vaddr = offset_to_vaddr(vma, uprobe->offset); in unapply_uprobe()
980 err |= remove_breakpoint(uprobe, mm, vaddr); in unapply_uprobe()
993 struct uprobe *u = rb_entry(n, struct uprobe, rb_node); in find_node_in_range()
1022 struct uprobe *u; in build_probe_list()
1032 u = rb_entry(t, struct uprobe, rb_node); in build_probe_list()
1039 u = rb_entry(t, struct uprobe, rb_node); in build_probe_list()
1058 struct uprobe *uprobe, *u; in uprobe_mmap() local
1075 list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { in uprobe_mmap()
1077 filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) { in uprobe_mmap()
1078 unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); in uprobe_mmap()
1079 install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); in uprobe_mmap()
1081 put_uprobe(uprobe); in uprobe_mmap()
1290 static unsigned long xol_get_insn_slot(struct uprobe *uprobe) in xol_get_insn_slot() argument
1304 &uprobe->arch.ixol, sizeof(uprobe->arch.ixol)); in xol_get_insn_slot()
1387 put_uprobe(ri->uprobe); in free_ret_instance()
1448 get_uprobe(n->uprobe); in dup_utask()
1539 static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs) in prepare_uretprobe() argument
1590 ri->uprobe = get_uprobe(uprobe); in prepare_uretprobe()
1607 pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr) in pre_ssout() argument
1617 xol_vaddr = xol_get_insn_slot(uprobe); in pre_ssout()
1624 err = arch_uprobe_pre_xol(&uprobe->arch, regs); in pre_ssout()
1630 utask->active_uprobe = uprobe; in pre_ssout()
1713 static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp) in find_active_uprobe()
1716 struct uprobe *uprobe = NULL; in find_active_uprobe() local
1726 uprobe = find_uprobe(inode, offset); in find_active_uprobe()
1729 if (!uprobe) in find_active_uprobe()
1735 if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags)) in find_active_uprobe()
1739 return uprobe; in find_active_uprobe()
1742 static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs) in handler_chain() argument
1748 down_read(&uprobe->register_rwsem); in handler_chain()
1749 for (uc = uprobe->consumers; uc; uc = uc->next) { in handler_chain()
1765 prepare_uretprobe(uprobe, regs); /* put bp at return */ in handler_chain()
1767 if (remove && uprobe->consumers) { in handler_chain()
1768 WARN_ON(!uprobe_is_active(uprobe)); in handler_chain()
1769 unapply_uprobe(uprobe, current->mm); in handler_chain()
1771 up_read(&uprobe->register_rwsem); in handler_chain()
1777 struct uprobe *uprobe = ri->uprobe; in handle_uretprobe_chain() local
1780 down_read(&uprobe->register_rwsem); in handle_uretprobe_chain()
1781 for (uc = uprobe->consumers; uc; uc = uc->next) { in handle_uretprobe_chain()
1785 up_read(&uprobe->register_rwsem); in handle_uretprobe_chain()
1859 struct uprobe *uprobe; in handle_swbp() local
1867 uprobe = find_active_uprobe(bp_vaddr, &is_swbp); in handle_swbp()
1868 if (!uprobe) { in handle_swbp()
1895 if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags))) in handle_swbp()
1902 if (arch_uprobe_ignore(&uprobe->arch, regs)) in handle_swbp()
1905 handler_chain(uprobe, regs); in handle_swbp()
1907 if (arch_uprobe_skip_sstep(&uprobe->arch, regs)) in handle_swbp()
1910 if (!pre_ssout(uprobe, regs, bp_vaddr)) in handle_swbp()
1915 put_uprobe(uprobe); in handle_swbp()
1924 struct uprobe *uprobe; in handle_singlestep() local
1927 uprobe = utask->active_uprobe; in handle_singlestep()
1929 err = arch_uprobe_post_xol(&uprobe->arch, regs); in handle_singlestep()
1931 arch_uprobe_abort_xol(&uprobe->arch, regs); in handle_singlestep()
1935 put_uprobe(uprobe); in handle_singlestep()