Lines Matching refs:vma

129 static bool valid_vma(struct vm_area_struct *vma, bool is_register)  in valid_vma()  argument
136 return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC; in valid_vma()
139 static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset) in offset_to_vaddr() argument
141 return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT); in offset_to_vaddr()
144 static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr) in vaddr_to_offset() argument
146 return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start); in vaddr_to_offset()
160 static int __replace_page(struct vm_area_struct *vma, unsigned long addr, in __replace_page() argument
163 struct mm_struct *mm = vma->vm_mm; in __replace_page()
172 err = mem_cgroup_try_charge(kpage, vma->vm_mm, GFP_KERNEL, &memcg); in __replace_page()
186 page_add_new_anon_rmap(kpage, vma, addr); in __replace_page()
188 lru_cache_add_active_or_unevictable(kpage, vma); in __replace_page()
195 flush_cache_page(vma, addr, pte_pfn(*ptep)); in __replace_page()
196 ptep_clear_flush_notify(vma, addr, ptep); in __replace_page()
197 set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); in __replace_page()
204 if (vma->vm_flags & VM_LOCKED) in __replace_page()
304 struct vm_area_struct *vma; in uprobe_write_opcode() local
309 ret = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &old_page, &vma); in uprobe_write_opcode()
317 ret = anon_vma_prepare(vma); in uprobe_write_opcode()
322 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); in uprobe_write_opcode()
330 ret = __replace_page(vma, vaddr, old_page, new_page); in uprobe_write_opcode()
648 struct vm_area_struct *vma, unsigned long vaddr) in install_breakpoint() argument
653 ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr); in install_breakpoint()
720 struct vm_area_struct *vma; in build_map_info() local
728 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { in build_map_info()
729 if (!valid_vma(vma, is_register)) in build_map_info()
747 if (!atomic_inc_not_zero(&vma->vm_mm->mm_users)) in build_map_info()
755 info->mm = vma->vm_mm; in build_map_info()
756 info->vaddr = offset_to_vaddr(vma, offset); in build_map_info()
803 struct vm_area_struct *vma; in register_for_each_vma() local
809 vma = find_vma(mm, info->vaddr); in register_for_each_vma()
810 if (!vma || !valid_vma(vma, is_register) || in register_for_each_vma()
811 file_inode(vma->vm_file) != uprobe->inode) in register_for_each_vma()
814 if (vma->vm_start > info->vaddr || in register_for_each_vma()
815 vaddr_to_offset(vma, info->vaddr) != uprobe->offset) in register_for_each_vma()
822 err = install_breakpoint(uprobe, mm, vma, info->vaddr); in register_for_each_vma()
968 struct vm_area_struct *vma; in unapply_uprobe() local
972 for (vma = mm->mmap; vma; vma = vma->vm_next) { in unapply_uprobe()
976 if (!valid_vma(vma, false) || in unapply_uprobe()
977 file_inode(vma->vm_file) != uprobe->inode) in unapply_uprobe()
980 offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT; in unapply_uprobe()
982 uprobe->offset >= offset + vma->vm_end - vma->vm_start) in unapply_uprobe()
985 vaddr = offset_to_vaddr(vma, uprobe->offset); in unapply_uprobe()
1022 struct vm_area_struct *vma, in build_probe_list() argument
1031 min = vaddr_to_offset(vma, start); in build_probe_list()
1061 int uprobe_mmap(struct vm_area_struct *vma) in uprobe_mmap() argument
1067 if (no_uprobe_events() || !valid_vma(vma, true)) in uprobe_mmap()
1070 inode = file_inode(vma->vm_file); in uprobe_mmap()
1075 build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list); in uprobe_mmap()
1083 filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) { in uprobe_mmap()
1084 unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); in uprobe_mmap()
1085 install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); in uprobe_mmap()
1095 vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end) in vma_has_uprobes() argument
1101 inode = file_inode(vma->vm_file); in vma_has_uprobes()
1103 min = vaddr_to_offset(vma, start); in vma_has_uprobes()
1116 void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) in uprobe_munmap() argument
1118 if (no_uprobe_events() || !valid_vma(vma, false)) in uprobe_munmap()
1121 if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ in uprobe_munmap()
1124 if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) || in uprobe_munmap()
1125 test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags)) in uprobe_munmap()
1128 if (vma_has_uprobes(vma, start, end)) in uprobe_munmap()
1129 set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags); in uprobe_munmap()
1651 struct vm_area_struct *vma; in mmf_recalc_uprobes() local
1653 for (vma = mm->mmap; vma; vma = vma->vm_next) { in mmf_recalc_uprobes()
1654 if (!valid_vma(vma, false)) in mmf_recalc_uprobes()
1662 if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end)) in mmf_recalc_uprobes()
1698 struct vm_area_struct *vma; in find_active_uprobe() local
1701 vma = find_vma(mm, bp_vaddr); in find_active_uprobe()
1702 if (vma && vma->vm_start <= bp_vaddr) { in find_active_uprobe()
1703 if (valid_vma(vma, false)) { in find_active_uprobe()
1704 struct inode *inode = file_inode(vma->vm_file); in find_active_uprobe()
1705 loff_t offset = vaddr_to_offset(vma, bp_vaddr); in find_active_uprobe()