Lines Matching refs:vma

121 static bool valid_vma(struct vm_area_struct *vma, bool is_register)  in valid_vma()  argument
128 return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC; in valid_vma()
131 static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset) in offset_to_vaddr() argument
133 return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT); in offset_to_vaddr()
136 static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr) in vaddr_to_offset() argument
138 return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start); in vaddr_to_offset()
152 static int __replace_page(struct vm_area_struct *vma, unsigned long addr, in __replace_page() argument
155 struct mm_struct *mm = vma->vm_mm; in __replace_page()
164 err = mem_cgroup_try_charge(kpage, vma->vm_mm, GFP_KERNEL, &memcg); in __replace_page()
178 page_add_new_anon_rmap(kpage, vma, addr); in __replace_page()
180 lru_cache_add_active_or_unevictable(kpage, vma); in __replace_page()
187 flush_cache_page(vma, addr, pte_pfn(*ptep)); in __replace_page()
188 ptep_clear_flush_notify(vma, addr, ptep); in __replace_page()
189 set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); in __replace_page()
196 if (vma->vm_flags & VM_LOCKED) in __replace_page()
296 struct vm_area_struct *vma; in uprobe_write_opcode() local
301 ret = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &old_page, &vma); in uprobe_write_opcode()
309 ret = anon_vma_prepare(vma); in uprobe_write_opcode()
314 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); in uprobe_write_opcode()
322 ret = __replace_page(vma, vaddr, old_page, new_page); in uprobe_write_opcode()
642 struct vm_area_struct *vma, unsigned long vaddr) in install_breakpoint() argument
647 ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr); in install_breakpoint()
714 struct vm_area_struct *vma; in build_map_info() local
722 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { in build_map_info()
723 if (!valid_vma(vma, is_register)) in build_map_info()
741 if (!atomic_inc_not_zero(&vma->vm_mm->mm_users)) in build_map_info()
749 info->mm = vma->vm_mm; in build_map_info()
750 info->vaddr = offset_to_vaddr(vma, offset); in build_map_info()
797 struct vm_area_struct *vma; in register_for_each_vma() local
803 vma = find_vma(mm, info->vaddr); in register_for_each_vma()
804 if (!vma || !valid_vma(vma, is_register) || in register_for_each_vma()
805 file_inode(vma->vm_file) != uprobe->inode) in register_for_each_vma()
808 if (vma->vm_start > info->vaddr || in register_for_each_vma()
809 vaddr_to_offset(vma, info->vaddr) != uprobe->offset) in register_for_each_vma()
816 err = install_breakpoint(uprobe, mm, vma, info->vaddr); in register_for_each_vma()
962 struct vm_area_struct *vma; in unapply_uprobe() local
966 for (vma = mm->mmap; vma; vma = vma->vm_next) { in unapply_uprobe()
970 if (!valid_vma(vma, false) || in unapply_uprobe()
971 file_inode(vma->vm_file) != uprobe->inode) in unapply_uprobe()
974 offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT; in unapply_uprobe()
976 uprobe->offset >= offset + vma->vm_end - vma->vm_start) in unapply_uprobe()
979 vaddr = offset_to_vaddr(vma, uprobe->offset); in unapply_uprobe()
1016 struct vm_area_struct *vma, in build_probe_list() argument
1025 min = vaddr_to_offset(vma, start); in build_probe_list()
1055 int uprobe_mmap(struct vm_area_struct *vma) in uprobe_mmap() argument
1061 if (no_uprobe_events() || !valid_vma(vma, true)) in uprobe_mmap()
1064 inode = file_inode(vma->vm_file); in uprobe_mmap()
1069 build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list); in uprobe_mmap()
1077 filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) { in uprobe_mmap()
1078 unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); in uprobe_mmap()
1079 install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); in uprobe_mmap()
1089 vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end) in vma_has_uprobes() argument
1095 inode = file_inode(vma->vm_file); in vma_has_uprobes()
1097 min = vaddr_to_offset(vma, start); in vma_has_uprobes()
1110 void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) in uprobe_munmap() argument
1112 if (no_uprobe_events() || !valid_vma(vma, false)) in uprobe_munmap()
1115 if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ in uprobe_munmap()
1118 if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) || in uprobe_munmap()
1119 test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags)) in uprobe_munmap()
1122 if (vma_has_uprobes(vma, start, end)) in uprobe_munmap()
1123 set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags); in uprobe_munmap()
1129 struct vm_area_struct *vma; in xol_add_vma() local
1148 vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE, in xol_add_vma()
1151 if (IS_ERR(vma)) { in xol_add_vma()
1152 ret = PTR_ERR(vma); in xol_add_vma()
1670 struct vm_area_struct *vma; in mmf_recalc_uprobes() local
1672 for (vma = mm->mmap; vma; vma = vma->vm_next) { in mmf_recalc_uprobes()
1673 if (!valid_vma(vma, false)) in mmf_recalc_uprobes()
1681 if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end)) in mmf_recalc_uprobes()
1717 struct vm_area_struct *vma; in find_active_uprobe() local
1720 vma = find_vma(mm, bp_vaddr); in find_active_uprobe()
1721 if (vma && vma->vm_start <= bp_vaddr) { in find_active_uprobe()
1722 if (valid_vma(vma, false)) { in find_active_uprobe()
1723 struct inode *inode = file_inode(vma->vm_file); in find_active_uprobe()
1724 loff_t offset = vaddr_to_offset(vma, bp_vaddr); in find_active_uprobe()