Lines Matching refs:ctx
65 struct userfaultfd_ctx *ctx; member
117 static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx) in userfaultfd_ctx_get() argument
119 if (!atomic_inc_not_zero(&ctx->refcount)) in userfaultfd_ctx_get()
131 static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx) in userfaultfd_ctx_put() argument
133 if (atomic_dec_and_test(&ctx->refcount)) { in userfaultfd_ctx_put()
134 VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock)); in userfaultfd_ctx_put()
135 VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh)); in userfaultfd_ctx_put()
136 VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock)); in userfaultfd_ctx_put()
137 VM_BUG_ON(waitqueue_active(&ctx->fault_wqh)); in userfaultfd_ctx_put()
138 VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock)); in userfaultfd_ctx_put()
139 VM_BUG_ON(waitqueue_active(&ctx->fd_wqh)); in userfaultfd_ctx_put()
140 mmput(ctx->mm); in userfaultfd_ctx_put()
141 kmem_cache_free(userfaultfd_ctx_cachep, ctx); in userfaultfd_ctx_put()
191 static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx, in userfaultfd_must_wait() argument
196 struct mm_struct *mm = ctx->mm; in userfaultfd_must_wait()
264 struct userfaultfd_ctx *ctx; in handle_userfault() local
272 ctx = vma->vm_userfaultfd_ctx.ctx; in handle_userfault()
273 if (!ctx) in handle_userfault()
276 BUG_ON(ctx->mm != mm); in handle_userfault()
286 if (unlikely(ACCESS_ONCE(ctx->released))) in handle_userfault()
331 userfaultfd_ctx_get(ctx); in handle_userfault()
336 uwq.ctx = ctx; in handle_userfault()
341 spin_lock(&ctx->fault_pending_wqh.lock); in handle_userfault()
346 __add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq); in handle_userfault()
354 spin_unlock(&ctx->fault_pending_wqh.lock); in handle_userfault()
356 must_wait = userfaultfd_must_wait(ctx, address, flags, reason); in handle_userfault()
359 if (likely(must_wait && !ACCESS_ONCE(ctx->released) && in handle_userfault()
362 wake_up_poll(&ctx->fd_wqh, POLLIN); in handle_userfault()
407 spin_lock(&ctx->fault_pending_wqh.lock); in handle_userfault()
413 spin_unlock(&ctx->fault_pending_wqh.lock); in handle_userfault()
420 userfaultfd_ctx_put(ctx); in handle_userfault()
428 struct userfaultfd_ctx *ctx = file->private_data; in userfaultfd_release() local
429 struct mm_struct *mm = ctx->mm; in userfaultfd_release()
435 ACCESS_ONCE(ctx->released) = true; in userfaultfd_release()
449 BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^ in userfaultfd_release()
451 if (vma->vm_userfaultfd_ctx.ctx != ctx) { in userfaultfd_release()
475 spin_lock(&ctx->fault_pending_wqh.lock); in userfaultfd_release()
476 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range); in userfaultfd_release()
477 __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, &range); in userfaultfd_release()
478 spin_unlock(&ctx->fault_pending_wqh.lock); in userfaultfd_release()
480 wake_up_poll(&ctx->fd_wqh, POLLHUP); in userfaultfd_release()
481 userfaultfd_ctx_put(ctx); in userfaultfd_release()
487 struct userfaultfd_ctx *ctx) in find_userfault() argument
492 VM_BUG_ON(!spin_is_locked(&ctx->fault_pending_wqh.lock)); in find_userfault()
495 if (!waitqueue_active(&ctx->fault_pending_wqh)) in find_userfault()
498 wq = list_last_entry(&ctx->fault_pending_wqh.task_list, in find_userfault()
507 struct userfaultfd_ctx *ctx = file->private_data; in userfaultfd_poll() local
510 poll_wait(file, &ctx->fd_wqh, wait); in userfaultfd_poll()
512 switch (ctx->state) { in userfaultfd_poll()
534 if (waitqueue_active(&ctx->fault_pending_wqh)) in userfaultfd_poll()
542 static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait, in userfaultfd_ctx_read() argument
550 spin_lock(&ctx->fd_wqh.lock); in userfaultfd_ctx_read()
551 __add_wait_queue(&ctx->fd_wqh, &wait); in userfaultfd_ctx_read()
554 spin_lock(&ctx->fault_pending_wqh.lock); in userfaultfd_ctx_read()
555 uwq = find_userfault(ctx); in userfaultfd_ctx_read()
564 write_seqcount_begin(&ctx->refile_seq); in userfaultfd_ctx_read()
588 __add_wait_queue(&ctx->fault_wqh, &uwq->wq); in userfaultfd_ctx_read()
590 write_seqcount_end(&ctx->refile_seq); in userfaultfd_ctx_read()
594 spin_unlock(&ctx->fault_pending_wqh.lock); in userfaultfd_ctx_read()
598 spin_unlock(&ctx->fault_pending_wqh.lock); in userfaultfd_ctx_read()
607 spin_unlock(&ctx->fd_wqh.lock); in userfaultfd_ctx_read()
609 spin_lock(&ctx->fd_wqh.lock); in userfaultfd_ctx_read()
611 __remove_wait_queue(&ctx->fd_wqh, &wait); in userfaultfd_ctx_read()
613 spin_unlock(&ctx->fd_wqh.lock); in userfaultfd_ctx_read()
621 struct userfaultfd_ctx *ctx = file->private_data; in userfaultfd_read() local
626 if (ctx->state == UFFD_STATE_WAIT_API) in userfaultfd_read()
632 _ret = userfaultfd_ctx_read(ctx, no_wait, &msg); in userfaultfd_read()
648 static void __wake_userfault(struct userfaultfd_ctx *ctx, in __wake_userfault() argument
656 spin_lock(&ctx->fault_pending_wqh.lock); in __wake_userfault()
658 if (waitqueue_active(&ctx->fault_pending_wqh)) in __wake_userfault()
659 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, in __wake_userfault()
661 if (waitqueue_active(&ctx->fault_wqh)) in __wake_userfault()
662 __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, range); in __wake_userfault()
663 spin_unlock(&ctx->fault_pending_wqh.lock); in __wake_userfault()
666 static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx, in wake_userfault() argument
688 seq = read_seqcount_begin(&ctx->refile_seq); in wake_userfault()
689 need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) || in wake_userfault()
690 waitqueue_active(&ctx->fault_wqh); in wake_userfault()
692 } while (read_seqcount_retry(&ctx->refile_seq, seq)); in wake_userfault()
694 __wake_userfault(ctx, range); in wake_userfault()
717 static int userfaultfd_register(struct userfaultfd_ctx *ctx, in userfaultfd_register() argument
720 struct mm_struct *mm = ctx->mm; in userfaultfd_register()
786 BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^ in userfaultfd_register()
801 if (cur->vm_userfaultfd_ctx.ctx && in userfaultfd_register()
802 cur->vm_userfaultfd_ctx.ctx != ctx) in userfaultfd_register()
817 BUG_ON(vma->vm_userfaultfd_ctx.ctx && in userfaultfd_register()
818 vma->vm_userfaultfd_ctx.ctx != ctx); in userfaultfd_register()
824 if (vma->vm_userfaultfd_ctx.ctx == ctx && in userfaultfd_register()
836 ((struct vm_userfaultfd_ctx){ ctx })); in userfaultfd_register()
858 vma->vm_userfaultfd_ctx.ctx = ctx; in userfaultfd_register()
881 static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, in userfaultfd_unregister() argument
884 struct mm_struct *mm = ctx->mm; in userfaultfd_unregister()
929 BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^ in userfaultfd_unregister()
959 if (!vma->vm_userfaultfd_ctx.ctx) in userfaultfd_unregister()
1009 static int userfaultfd_wake(struct userfaultfd_ctx *ctx, in userfaultfd_wake() argument
1021 ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len); in userfaultfd_wake()
1034 wake_userfault(ctx, &range); in userfaultfd_wake()
1041 static int userfaultfd_copy(struct userfaultfd_ctx *ctx, in userfaultfd_copy() argument
1057 ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len); in userfaultfd_copy()
1071 ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src, in userfaultfd_copy()
1082 wake_userfault(ctx, &range); in userfaultfd_copy()
1089 static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx, in userfaultfd_zeropage() argument
1105 ret = validate_range(ctx->mm, uffdio_zeropage.range.start, in userfaultfd_zeropage()
1113 ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start, in userfaultfd_zeropage()
1124 wake_userfault(ctx, &range); in userfaultfd_zeropage()
1136 static int userfaultfd_api(struct userfaultfd_ctx *ctx, in userfaultfd_api() argument
1144 if (ctx->state != UFFD_STATE_WAIT_API) in userfaultfd_api()
1161 ctx->state = UFFD_STATE_RUNNING; in userfaultfd_api()
1171 struct userfaultfd_ctx *ctx = file->private_data; in userfaultfd_ioctl() local
1173 if (cmd != UFFDIO_API && ctx->state == UFFD_STATE_WAIT_API) in userfaultfd_ioctl()
1178 ret = userfaultfd_api(ctx, arg); in userfaultfd_ioctl()
1181 ret = userfaultfd_register(ctx, arg); in userfaultfd_ioctl()
1184 ret = userfaultfd_unregister(ctx, arg); in userfaultfd_ioctl()
1187 ret = userfaultfd_wake(ctx, arg); in userfaultfd_ioctl()
1190 ret = userfaultfd_copy(ctx, arg); in userfaultfd_ioctl()
1193 ret = userfaultfd_zeropage(ctx, arg); in userfaultfd_ioctl()
1202 struct userfaultfd_ctx *ctx = f->private_data; in userfaultfd_show_fdinfo() local
1207 spin_lock(&ctx->fault_pending_wqh.lock); in userfaultfd_show_fdinfo()
1208 list_for_each_entry(wq, &ctx->fault_pending_wqh.task_list, task_list) { in userfaultfd_show_fdinfo()
1213 list_for_each_entry(wq, &ctx->fault_wqh.task_list, task_list) { in userfaultfd_show_fdinfo()
1217 spin_unlock(&ctx->fault_pending_wqh.lock); in userfaultfd_show_fdinfo()
1244 struct userfaultfd_ctx *ctx = (struct userfaultfd_ctx *) mem; in init_once_userfaultfd_ctx() local
1246 init_waitqueue_head(&ctx->fault_pending_wqh); in init_once_userfaultfd_ctx()
1247 init_waitqueue_head(&ctx->fault_wqh); in init_once_userfaultfd_ctx()
1248 init_waitqueue_head(&ctx->fd_wqh); in init_once_userfaultfd_ctx()
1249 seqcount_init(&ctx->refile_seq); in init_once_userfaultfd_ctx()
1269 struct userfaultfd_ctx *ctx; in userfaultfd_file_create() local
1282 ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL); in userfaultfd_file_create()
1283 if (!ctx) in userfaultfd_file_create()
1286 atomic_set(&ctx->refcount, 1); in userfaultfd_file_create()
1287 ctx->flags = flags; in userfaultfd_file_create()
1288 ctx->state = UFFD_STATE_WAIT_API; in userfaultfd_file_create()
1289 ctx->released = false; in userfaultfd_file_create()
1290 ctx->mm = current->mm; in userfaultfd_file_create()
1292 atomic_inc(&ctx->mm->mm_users); in userfaultfd_file_create()
1294 file = anon_inode_getfile("[userfaultfd]", &userfaultfd_fops, ctx, in userfaultfd_file_create()
1297 mmput(ctx->mm); in userfaultfd_file_create()
1298 kmem_cache_free(userfaultfd_ctx_cachep, ctx); in userfaultfd_file_create()