locked_vm 1324 drivers/infiniband/sw/siw/siw_verbs.c if (num_pages > mem_limit - current->mm->locked_vm) { locked_vm 1327 drivers/infiniband/sw/siw/siw_verbs.c current->mm->locked_vm); locked_vm 422 drivers/vfio/vfio_iommu_type1.c if (!dma->lock_cap && current->mm->locked_vm + 1 > limit) { locked_vm 449 drivers/vfio/vfio_iommu_type1.c current->mm->locked_vm + lock_acct + 1 > limit) { locked_vm 3314 fs/io_uring.c atomic_long_sub(nr_pages, &user->locked_vm); locked_vm 3325 fs/io_uring.c cur_pages = atomic_long_read(&user->locked_vm); locked_vm 3329 fs/io_uring.c } while (atomic_long_cmpxchg(&user->locked_vm, cur_pages, locked_vm 61 fs/proc/task_mmu.c SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm); locked_vm 442 include/linux/mm_types.h unsigned long locked_vm; /* Pages that have PG_mlocked set */ locked_vm 37 include/linux/sched/user.h atomic_long_t locked_vm; locked_vm 190 kernel/bpf/syscall.c if (atomic_long_add_return(pages, &user->locked_vm) > memlock_limit) { locked_vm 191 kernel/bpf/syscall.c atomic_long_sub(pages, &user->locked_vm); locked_vm 200 kernel/bpf/syscall.c atomic_long_sub(pages, &user->locked_vm); locked_vm 1248 kernel/bpf/syscall.c user_bufs = atomic_long_add_return(pages, &user->locked_vm); locked_vm 1250 kernel/bpf/syscall.c atomic_long_sub(pages, &user->locked_vm); locked_vm 1261 kernel/bpf/syscall.c atomic_long_sub(pages, &user->locked_vm); locked_vm 5615 kernel/events/core.c atomic_long_sub(rb->aux_nr_pages - rb->aux_mmap_locked, &mmap_user->locked_vm); locked_vm 5689 kernel/events/core.c &mmap_user->locked_vm); locked_vm 5831 kernel/events/core.c user_locked = atomic_long_read(&user->locked_vm); locked_vm 5843 kernel/events/core.c } else if (atomic_long_read(&user->locked_vm) >= user_lock_limit) { locked_vm 5898 kernel/events/core.c atomic_long_add(user_extra, &user->locked_vm); locked_vm 1016 kernel/fork.c mm->locked_vm = 0; locked_vm 175 mm/debug.c mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, locked_vm 565 mm/mlock.c mm->locked_vm += nr_pages; locked_vm 692 mm/mlock.c locked += current->mm->locked_vm; locked_vm 1345 mm/mmap.c locked += mm->locked_vm; locked_vm 1838 mm/mmap.c mm->locked_vm += (len >> PAGE_SHIFT); locked_vm 2311 mm/mmap.c locked = mm->locked_vm + grow; locked_vm 2405 mm/mmap.c mm->locked_vm += grow; locked_vm 2485 mm/mmap.c mm->locked_vm += grow; locked_vm 2817 mm/mmap.c if (mm->locked_vm) { locked_vm 2821 mm/mmap.c mm->locked_vm -= vma_pages(tmp); locked_vm 3059 mm/mmap.c mm->locked_vm += (len >> PAGE_SHIFT); locked_vm 3131 mm/mmap.c if (mm->locked_vm) { locked_vm 426 mm/mremap.c mm->locked_vm += new_len >> PAGE_SHIFT; locked_vm 477 mm/mremap.c locked = mm->locked_vm << PAGE_SHIFT; locked_vm 695 mm/mremap.c mm->locked_vm += pages; locked_vm 427 mm/util.c unsigned long locked_vm, limit; locked_vm 432 mm/util.c locked_vm = mm->locked_vm; locked_vm 436 mm/util.c if (locked_vm + pages > limit) locked_vm 440 mm/util.c mm->locked_vm = locked_vm + pages; locked_vm 442 mm/util.c WARN_ON_ONCE(pages > locked_vm); locked_vm 443 mm/util.c mm->locked_vm = locked_vm - pages; locked_vm 448 mm/util.c locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK), locked_vm 1068 net/core/skbuff.c old_pg = atomic_long_read(&user->locked_vm); locked_vm 1072 net/core/skbuff.c } while (atomic_long_cmpxchg(&user->locked_vm, old_pg, new_pg) != locked_vm 1089 net/core/skbuff.c atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm); locked_vm 224 net/xdp/xdp_umem.c atomic_long_sub(umem->npgs, &umem->user->locked_vm); locked_vm 328 net/xdp/xdp_umem.c old_npgs = atomic_long_read(&umem->user->locked_vm); locked_vm 335 net/xdp/xdp_umem.c } while (atomic_long_cmpxchg(&umem->user->locked_vm, old_npgs,