Lines Matching refs:ctx
205 static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) in aio_private_file() argument
215 inode->i_mapping->private_data = ctx; in aio_private_file()
269 static void put_aio_ring_file(struct kioctx *ctx) in put_aio_ring_file() argument
271 struct file *aio_ring_file = ctx->aio_ring_file; in put_aio_ring_file()
278 ctx->aio_ring_file = NULL; in put_aio_ring_file()
285 static void aio_free_ring(struct kioctx *ctx) in aio_free_ring() argument
292 put_aio_ring_file(ctx); in aio_free_ring()
294 for (i = 0; i < ctx->nr_pages; i++) { in aio_free_ring()
297 page_count(ctx->ring_pages[i])); in aio_free_ring()
298 page = ctx->ring_pages[i]; in aio_free_ring()
301 ctx->ring_pages[i] = NULL; in aio_free_ring()
305 if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) { in aio_free_ring()
306 kfree(ctx->ring_pages); in aio_free_ring()
307 ctx->ring_pages = NULL; in aio_free_ring()
328 struct kioctx *ctx; in aio_ring_remap() local
330 ctx = table->table[i]; in aio_ring_remap()
331 if (ctx && ctx->aio_ring_file == file) { in aio_ring_remap()
332 if (!atomic_read(&ctx->dead)) { in aio_ring_remap()
333 ctx->user_id = ctx->mmap_base = vma->vm_start; in aio_ring_remap()
354 struct kioctx *ctx; in aio_migratepage() local
363 ctx = mapping->private_data; in aio_migratepage()
364 if (!ctx) { in aio_migratepage()
373 if (!mutex_trylock(&ctx->ring_lock)) { in aio_migratepage()
379 if (idx < (pgoff_t)ctx->nr_pages) { in aio_migratepage()
381 if (ctx->ring_pages[idx] != old) in aio_migratepage()
403 spin_lock_irqsave(&ctx->completion_lock, flags); in aio_migratepage()
405 BUG_ON(ctx->ring_pages[idx] != old); in aio_migratepage()
406 ctx->ring_pages[idx] = new; in aio_migratepage()
407 spin_unlock_irqrestore(&ctx->completion_lock, flags); in aio_migratepage()
413 mutex_unlock(&ctx->ring_lock); in aio_migratepage()
427 static int aio_setup_ring(struct kioctx *ctx) in aio_setup_ring() argument
430 unsigned nr_events = ctx->max_reqs; in aio_setup_ring()
447 file = aio_private_file(ctx, nr_pages); in aio_setup_ring()
449 ctx->aio_ring_file = NULL; in aio_setup_ring()
453 ctx->aio_ring_file = file; in aio_setup_ring()
457 ctx->ring_pages = ctx->internal_pages; in aio_setup_ring()
459 ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *), in aio_setup_ring()
461 if (!ctx->ring_pages) { in aio_setup_ring()
462 put_aio_ring_file(ctx); in aio_setup_ring()
478 ctx->ring_pages[i] = page; in aio_setup_ring()
480 ctx->nr_pages = i; in aio_setup_ring()
483 aio_free_ring(ctx); in aio_setup_ring()
487 ctx->mmap_size = nr_pages * PAGE_SIZE; in aio_setup_ring()
488 pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size); in aio_setup_ring()
491 ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size, in aio_setup_ring()
495 if (IS_ERR((void *)ctx->mmap_base)) { in aio_setup_ring()
496 ctx->mmap_size = 0; in aio_setup_ring()
497 aio_free_ring(ctx); in aio_setup_ring()
501 pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base); in aio_setup_ring()
503 ctx->user_id = ctx->mmap_base; in aio_setup_ring()
504 ctx->nr_events = nr_events; /* trusted copy */ in aio_setup_ring()
506 ring = kmap_atomic(ctx->ring_pages[0]); in aio_setup_ring()
515 flush_dcache_page(ctx->ring_pages[0]); in aio_setup_ring()
527 struct kioctx *ctx = req->ki_ctx; in kiocb_set_cancel_fn() local
530 spin_lock_irqsave(&ctx->ctx_lock, flags); in kiocb_set_cancel_fn()
533 list_add(&req->ki_list, &ctx->active_reqs); in kiocb_set_cancel_fn()
537 spin_unlock_irqrestore(&ctx->ctx_lock, flags); in kiocb_set_cancel_fn()
564 struct kioctx *ctx = container_of(work, struct kioctx, free_work); in free_ioctx() local
566 pr_debug("freeing %p\n", ctx); in free_ioctx()
568 aio_free_ring(ctx); in free_ioctx()
569 free_percpu(ctx->cpu); in free_ioctx()
570 percpu_ref_exit(&ctx->reqs); in free_ioctx()
571 percpu_ref_exit(&ctx->users); in free_ioctx()
572 kmem_cache_free(kioctx_cachep, ctx); in free_ioctx()
577 struct kioctx *ctx = container_of(ref, struct kioctx, reqs); in free_ioctx_reqs() local
580 if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count)) in free_ioctx_reqs()
581 complete(&ctx->rq_wait->comp); in free_ioctx_reqs()
583 INIT_WORK(&ctx->free_work, free_ioctx); in free_ioctx_reqs()
584 schedule_work(&ctx->free_work); in free_ioctx_reqs()
594 struct kioctx *ctx = container_of(ref, struct kioctx, users); in free_ioctx_users() local
597 spin_lock_irq(&ctx->ctx_lock); in free_ioctx_users()
599 while (!list_empty(&ctx->active_reqs)) { in free_ioctx_users()
600 req = list_first_entry(&ctx->active_reqs, in free_ioctx_users()
607 spin_unlock_irq(&ctx->ctx_lock); in free_ioctx_users()
609 percpu_ref_kill(&ctx->reqs); in free_ioctx_users()
610 percpu_ref_put(&ctx->reqs); in free_ioctx_users()
613 static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) in ioctx_add_table() argument
626 ctx->id = i; in ioctx_add_table()
627 table->table[i] = ctx; in ioctx_add_table()
634 ring = kmap_atomic(ctx->ring_pages[0]); in ioctx_add_table()
635 ring->id = ctx->id; in ioctx_add_table()
684 struct kioctx *ctx; in ioctx_alloc() local
708 ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); in ioctx_alloc()
709 if (!ctx) in ioctx_alloc()
712 ctx->max_reqs = nr_events; in ioctx_alloc()
714 spin_lock_init(&ctx->ctx_lock); in ioctx_alloc()
715 spin_lock_init(&ctx->completion_lock); in ioctx_alloc()
716 mutex_init(&ctx->ring_lock); in ioctx_alloc()
719 mutex_lock(&ctx->ring_lock); in ioctx_alloc()
720 init_waitqueue_head(&ctx->wait); in ioctx_alloc()
722 INIT_LIST_HEAD(&ctx->active_reqs); in ioctx_alloc()
724 if (percpu_ref_init(&ctx->users, free_ioctx_users, 0, GFP_KERNEL)) in ioctx_alloc()
727 if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs, 0, GFP_KERNEL)) in ioctx_alloc()
730 ctx->cpu = alloc_percpu(struct kioctx_cpu); in ioctx_alloc()
731 if (!ctx->cpu) in ioctx_alloc()
734 err = aio_setup_ring(ctx); in ioctx_alloc()
738 atomic_set(&ctx->reqs_available, ctx->nr_events - 1); in ioctx_alloc()
739 ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4); in ioctx_alloc()
740 if (ctx->req_batch < 1) in ioctx_alloc()
741 ctx->req_batch = 1; in ioctx_alloc()
751 aio_nr += ctx->max_reqs; in ioctx_alloc()
754 percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */ in ioctx_alloc()
755 percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */ in ioctx_alloc()
757 err = ioctx_add_table(ctx, mm); in ioctx_alloc()
762 mutex_unlock(&ctx->ring_lock); in ioctx_alloc()
765 ctx, ctx->user_id, mm, ctx->nr_events); in ioctx_alloc()
766 return ctx; in ioctx_alloc()
769 aio_nr_sub(ctx->max_reqs); in ioctx_alloc()
771 atomic_set(&ctx->dead, 1); in ioctx_alloc()
772 if (ctx->mmap_size) in ioctx_alloc()
773 vm_munmap(ctx->mmap_base, ctx->mmap_size); in ioctx_alloc()
774 aio_free_ring(ctx); in ioctx_alloc()
776 mutex_unlock(&ctx->ring_lock); in ioctx_alloc()
777 free_percpu(ctx->cpu); in ioctx_alloc()
778 percpu_ref_exit(&ctx->reqs); in ioctx_alloc()
779 percpu_ref_exit(&ctx->users); in ioctx_alloc()
780 kmem_cache_free(kioctx_cachep, ctx); in ioctx_alloc()
790 static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx, in kill_ioctx() argument
796 if (atomic_xchg(&ctx->dead, 1)) { in kill_ioctx()
802 WARN_ON(ctx != table->table[ctx->id]); in kill_ioctx()
803 table->table[ctx->id] = NULL; in kill_ioctx()
807 wake_up_all(&ctx->wait); in kill_ioctx()
816 aio_nr_sub(ctx->max_reqs); in kill_ioctx()
818 if (ctx->mmap_size) in kill_ioctx()
819 vm_munmap(ctx->mmap_base, ctx->mmap_size); in kill_ioctx()
821 ctx->rq_wait = wait; in kill_ioctx()
822 percpu_ref_kill(&ctx->users); in kill_ioctx()
848 struct kioctx *ctx = table->table[i]; in exit_aio() local
850 if (!ctx) { in exit_aio()
862 ctx->mmap_size = 0; in exit_aio()
863 kill_ioctx(mm, ctx, &wait); in exit_aio()
875 static void put_reqs_available(struct kioctx *ctx, unsigned nr) in put_reqs_available() argument
881 kcpu = this_cpu_ptr(ctx->cpu); in put_reqs_available()
884 while (kcpu->reqs_available >= ctx->req_batch * 2) { in put_reqs_available()
885 kcpu->reqs_available -= ctx->req_batch; in put_reqs_available()
886 atomic_add(ctx->req_batch, &ctx->reqs_available); in put_reqs_available()
892 static bool get_reqs_available(struct kioctx *ctx) in get_reqs_available() argument
899 kcpu = this_cpu_ptr(ctx->cpu); in get_reqs_available()
901 int old, avail = atomic_read(&ctx->reqs_available); in get_reqs_available()
904 if (avail < ctx->req_batch) in get_reqs_available()
908 avail = atomic_cmpxchg(&ctx->reqs_available, in get_reqs_available()
909 avail, avail - ctx->req_batch); in get_reqs_available()
912 kcpu->reqs_available += ctx->req_batch; in get_reqs_available()
929 static void refill_reqs_available(struct kioctx *ctx, unsigned head, in refill_reqs_available() argument
935 head %= ctx->nr_events; in refill_reqs_available()
939 events_in_ring = ctx->nr_events - (head - tail); in refill_reqs_available()
941 completed = ctx->completed_events; in refill_reqs_available()
950 ctx->completed_events -= completed; in refill_reqs_available()
951 put_reqs_available(ctx, completed); in refill_reqs_available()
958 static void user_refill_reqs_available(struct kioctx *ctx) in user_refill_reqs_available() argument
960 spin_lock_irq(&ctx->completion_lock); in user_refill_reqs_available()
961 if (ctx->completed_events) { in user_refill_reqs_available()
974 ring = kmap_atomic(ctx->ring_pages[0]); in user_refill_reqs_available()
978 refill_reqs_available(ctx, head, ctx->tail); in user_refill_reqs_available()
981 spin_unlock_irq(&ctx->completion_lock); in user_refill_reqs_available()
988 static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx) in aio_get_req() argument
992 if (!get_reqs_available(ctx)) { in aio_get_req()
993 user_refill_reqs_available(ctx); in aio_get_req()
994 if (!get_reqs_available(ctx)) in aio_get_req()
1002 percpu_ref_get(&ctx->reqs); in aio_get_req()
1004 req->ki_ctx = ctx; in aio_get_req()
1007 put_reqs_available(ctx, 1); in aio_get_req()
1024 struct kioctx *ctx, *ret = NULL; in lookup_ioctx() local
1037 ctx = table->table[id]; in lookup_ioctx()
1038 if (ctx && ctx->user_id == ctx_id) { in lookup_ioctx()
1039 percpu_ref_get(&ctx->users); in lookup_ioctx()
1040 ret = ctx; in lookup_ioctx()
1053 struct kioctx *ctx = iocb->ki_ctx; in aio_complete() local
1071 spin_lock_irqsave(&ctx->ctx_lock, flags); in aio_complete()
1073 spin_unlock_irqrestore(&ctx->ctx_lock, flags); in aio_complete()
1081 spin_lock_irqsave(&ctx->completion_lock, flags); in aio_complete()
1083 tail = ctx->tail; in aio_complete()
1086 if (++tail >= ctx->nr_events) in aio_complete()
1089 ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); in aio_complete()
1098 flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); in aio_complete()
1101 ctx, tail, iocb, iocb->ki_user_iocb, iocb->ki_user_data, in aio_complete()
1109 ctx->tail = tail; in aio_complete()
1111 ring = kmap_atomic(ctx->ring_pages[0]); in aio_complete()
1115 flush_dcache_page(ctx->ring_pages[0]); in aio_complete()
1117 ctx->completed_events++; in aio_complete()
1118 if (ctx->completed_events > 1) in aio_complete()
1119 refill_reqs_available(ctx, head, tail); in aio_complete()
1120 spin_unlock_irqrestore(&ctx->completion_lock, flags); in aio_complete()
1143 if (waitqueue_active(&ctx->wait)) in aio_complete()
1144 wake_up(&ctx->wait); in aio_complete()
1146 percpu_ref_put(&ctx->reqs); in aio_complete()
1153 static long aio_read_events_ring(struct kioctx *ctx, in aio_read_events_ring() argument
1168 mutex_lock(&ctx->ring_lock); in aio_read_events_ring()
1171 ring = kmap_atomic(ctx->ring_pages[0]); in aio_read_events_ring()
1182 pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events); in aio_read_events_ring()
1187 head %= ctx->nr_events; in aio_read_events_ring()
1188 tail %= ctx->nr_events; in aio_read_events_ring()
1195 avail = (head <= tail ? tail : ctx->nr_events) - head; in aio_read_events_ring()
1204 page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]; in aio_read_events_ring()
1219 head %= ctx->nr_events; in aio_read_events_ring()
1222 ring = kmap_atomic(ctx->ring_pages[0]); in aio_read_events_ring()
1225 flush_dcache_page(ctx->ring_pages[0]); in aio_read_events_ring()
1229 mutex_unlock(&ctx->ring_lock); in aio_read_events_ring()
1234 static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr, in aio_read_events() argument
1237 long ret = aio_read_events_ring(ctx, event + *i, nr - *i); in aio_read_events()
1242 if (unlikely(atomic_read(&ctx->dead))) in aio_read_events()
1251 static long read_events(struct kioctx *ctx, long min_nr, long nr, in read_events() argument
1282 aio_read_events(ctx, min_nr, nr, event, &ret); in read_events()
1284 wait_event_interruptible_hrtimeout(ctx->wait, in read_events()
1285 aio_read_events(ctx, min_nr, nr, event, &ret), in read_events()
1310 unsigned long ctx; in SYSCALL_DEFINE2() local
1313 ret = get_user(ctx, ctxp); in SYSCALL_DEFINE2()
1318 if (unlikely(ctx || nr_events == 0)) { in SYSCALL_DEFINE2()
1320 ctx, nr_events); in SYSCALL_DEFINE2()
1343 SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) in SYSCALL_DEFINE1() argument
1345 struct kioctx *ioctx = lookup_ioctx(ctx); in SYSCALL_DEFINE1()
1487 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, in io_submit_one() argument
1509 req = aio_get_req(ctx); in io_submit_one()
1557 put_reqs_available(ctx, 1); in io_submit_one()
1558 percpu_ref_put(&ctx->reqs); in io_submit_one()
1566 struct kioctx *ctx; in do_io_submit() local
1580 ctx = lookup_ioctx(ctx_id); in do_io_submit()
1581 if (unlikely(!ctx)) { in do_io_submit()
1606 ret = io_submit_one(ctx, user_iocb, &tmp, compat); in do_io_submit()
1612 percpu_ref_put(&ctx->users); in do_io_submit()
1638 lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, u32 key) in lookup_kiocb() argument
1642 assert_spin_locked(&ctx->ctx_lock); in lookup_kiocb()
1648 list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) { in lookup_kiocb()
1668 struct kioctx *ctx; in SYSCALL_DEFINE3() local
1677 ctx = lookup_ioctx(ctx_id); in SYSCALL_DEFINE3()
1678 if (unlikely(!ctx)) in SYSCALL_DEFINE3()
1681 spin_lock_irq(&ctx->ctx_lock); in SYSCALL_DEFINE3()
1683 kiocb = lookup_kiocb(ctx, iocb, key); in SYSCALL_DEFINE3()
1689 spin_unlock_irq(&ctx->ctx_lock); in SYSCALL_DEFINE3()
1700 percpu_ref_put(&ctx->users); in SYSCALL_DEFINE3()