Lines Matching refs:ctx
205 static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) in aio_private_file() argument
215 inode->i_mapping->private_data = ctx; in aio_private_file()
269 static void put_aio_ring_file(struct kioctx *ctx) in put_aio_ring_file() argument
271 struct file *aio_ring_file = ctx->aio_ring_file; in put_aio_ring_file()
278 ctx->aio_ring_file = NULL; in put_aio_ring_file()
285 static void aio_free_ring(struct kioctx *ctx) in aio_free_ring() argument
292 put_aio_ring_file(ctx); in aio_free_ring()
294 for (i = 0; i < ctx->nr_pages; i++) { in aio_free_ring()
297 page_count(ctx->ring_pages[i])); in aio_free_ring()
298 page = ctx->ring_pages[i]; in aio_free_ring()
301 ctx->ring_pages[i] = NULL; in aio_free_ring()
305 if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) { in aio_free_ring()
306 kfree(ctx->ring_pages); in aio_free_ring()
307 ctx->ring_pages = NULL; in aio_free_ring()
322 struct kioctx *ctx; in aio_ring_mremap() local
324 ctx = table->table[i]; in aio_ring_mremap()
325 if (ctx && ctx->aio_ring_file == file) { in aio_ring_mremap()
326 if (!atomic_read(&ctx->dead)) { in aio_ring_mremap()
327 ctx->user_id = ctx->mmap_base = vma->vm_start; in aio_ring_mremap()
363 struct kioctx *ctx; in aio_migratepage() local
372 ctx = mapping->private_data; in aio_migratepage()
373 if (!ctx) { in aio_migratepage()
382 if (!mutex_trylock(&ctx->ring_lock)) { in aio_migratepage()
388 if (idx < (pgoff_t)ctx->nr_pages) { in aio_migratepage()
390 if (ctx->ring_pages[idx] != old) in aio_migratepage()
412 spin_lock_irqsave(&ctx->completion_lock, flags); in aio_migratepage()
414 BUG_ON(ctx->ring_pages[idx] != old); in aio_migratepage()
415 ctx->ring_pages[idx] = new; in aio_migratepage()
416 spin_unlock_irqrestore(&ctx->completion_lock, flags); in aio_migratepage()
422 mutex_unlock(&ctx->ring_lock); in aio_migratepage()
436 static int aio_setup_ring(struct kioctx *ctx) in aio_setup_ring() argument
439 unsigned nr_events = ctx->max_reqs; in aio_setup_ring()
456 file = aio_private_file(ctx, nr_pages); in aio_setup_ring()
458 ctx->aio_ring_file = NULL; in aio_setup_ring()
462 ctx->aio_ring_file = file; in aio_setup_ring()
466 ctx->ring_pages = ctx->internal_pages; in aio_setup_ring()
468 ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *), in aio_setup_ring()
470 if (!ctx->ring_pages) { in aio_setup_ring()
471 put_aio_ring_file(ctx); in aio_setup_ring()
487 ctx->ring_pages[i] = page; in aio_setup_ring()
489 ctx->nr_pages = i; in aio_setup_ring()
492 aio_free_ring(ctx); in aio_setup_ring()
496 ctx->mmap_size = nr_pages * PAGE_SIZE; in aio_setup_ring()
497 pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size); in aio_setup_ring()
500 ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size, in aio_setup_ring()
504 if (IS_ERR((void *)ctx->mmap_base)) { in aio_setup_ring()
505 ctx->mmap_size = 0; in aio_setup_ring()
506 aio_free_ring(ctx); in aio_setup_ring()
510 pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base); in aio_setup_ring()
512 ctx->user_id = ctx->mmap_base; in aio_setup_ring()
513 ctx->nr_events = nr_events; /* trusted copy */ in aio_setup_ring()
515 ring = kmap_atomic(ctx->ring_pages[0]); in aio_setup_ring()
524 flush_dcache_page(ctx->ring_pages[0]); in aio_setup_ring()
536 struct kioctx *ctx = req->ki_ctx; in kiocb_set_cancel_fn() local
539 spin_lock_irqsave(&ctx->ctx_lock, flags); in kiocb_set_cancel_fn()
542 list_add(&req->ki_list, &ctx->active_reqs); in kiocb_set_cancel_fn()
546 spin_unlock_irqrestore(&ctx->ctx_lock, flags); in kiocb_set_cancel_fn()
573 struct kioctx *ctx = container_of(work, struct kioctx, free_work); in free_ioctx() local
575 pr_debug("freeing %p\n", ctx); in free_ioctx()
577 aio_free_ring(ctx); in free_ioctx()
578 free_percpu(ctx->cpu); in free_ioctx()
579 percpu_ref_exit(&ctx->reqs); in free_ioctx()
580 percpu_ref_exit(&ctx->users); in free_ioctx()
581 kmem_cache_free(kioctx_cachep, ctx); in free_ioctx()
586 struct kioctx *ctx = container_of(ref, struct kioctx, reqs); in free_ioctx_reqs() local
589 if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count)) in free_ioctx_reqs()
590 complete(&ctx->rq_wait->comp); in free_ioctx_reqs()
592 INIT_WORK(&ctx->free_work, free_ioctx); in free_ioctx_reqs()
593 schedule_work(&ctx->free_work); in free_ioctx_reqs()
603 struct kioctx *ctx = container_of(ref, struct kioctx, users); in free_ioctx_users() local
606 spin_lock_irq(&ctx->ctx_lock); in free_ioctx_users()
608 while (!list_empty(&ctx->active_reqs)) { in free_ioctx_users()
609 req = list_first_entry(&ctx->active_reqs, in free_ioctx_users()
616 spin_unlock_irq(&ctx->ctx_lock); in free_ioctx_users()
618 percpu_ref_kill(&ctx->reqs); in free_ioctx_users()
619 percpu_ref_put(&ctx->reqs); in free_ioctx_users()
622 static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) in ioctx_add_table() argument
635 ctx->id = i; in ioctx_add_table()
636 table->table[i] = ctx; in ioctx_add_table()
643 ring = kmap_atomic(ctx->ring_pages[0]); in ioctx_add_table()
644 ring->id = ctx->id; in ioctx_add_table()
693 struct kioctx *ctx; in ioctx_alloc() local
717 ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); in ioctx_alloc()
718 if (!ctx) in ioctx_alloc()
721 ctx->max_reqs = nr_events; in ioctx_alloc()
723 spin_lock_init(&ctx->ctx_lock); in ioctx_alloc()
724 spin_lock_init(&ctx->completion_lock); in ioctx_alloc()
725 mutex_init(&ctx->ring_lock); in ioctx_alloc()
728 mutex_lock(&ctx->ring_lock); in ioctx_alloc()
729 init_waitqueue_head(&ctx->wait); in ioctx_alloc()
731 INIT_LIST_HEAD(&ctx->active_reqs); in ioctx_alloc()
733 if (percpu_ref_init(&ctx->users, free_ioctx_users, 0, GFP_KERNEL)) in ioctx_alloc()
736 if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs, 0, GFP_KERNEL)) in ioctx_alloc()
739 ctx->cpu = alloc_percpu(struct kioctx_cpu); in ioctx_alloc()
740 if (!ctx->cpu) in ioctx_alloc()
743 err = aio_setup_ring(ctx); in ioctx_alloc()
747 atomic_set(&ctx->reqs_available, ctx->nr_events - 1); in ioctx_alloc()
748 ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4); in ioctx_alloc()
749 if (ctx->req_batch < 1) in ioctx_alloc()
750 ctx->req_batch = 1; in ioctx_alloc()
760 aio_nr += ctx->max_reqs; in ioctx_alloc()
763 percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */ in ioctx_alloc()
764 percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */ in ioctx_alloc()
766 err = ioctx_add_table(ctx, mm); in ioctx_alloc()
771 mutex_unlock(&ctx->ring_lock); in ioctx_alloc()
774 ctx, ctx->user_id, mm, ctx->nr_events); in ioctx_alloc()
775 return ctx; in ioctx_alloc()
778 aio_nr_sub(ctx->max_reqs); in ioctx_alloc()
780 atomic_set(&ctx->dead, 1); in ioctx_alloc()
781 if (ctx->mmap_size) in ioctx_alloc()
782 vm_munmap(ctx->mmap_base, ctx->mmap_size); in ioctx_alloc()
783 aio_free_ring(ctx); in ioctx_alloc()
785 mutex_unlock(&ctx->ring_lock); in ioctx_alloc()
786 free_percpu(ctx->cpu); in ioctx_alloc()
787 percpu_ref_exit(&ctx->reqs); in ioctx_alloc()
788 percpu_ref_exit(&ctx->users); in ioctx_alloc()
789 kmem_cache_free(kioctx_cachep, ctx); in ioctx_alloc()
799 static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx, in kill_ioctx() argument
805 if (atomic_xchg(&ctx->dead, 1)) { in kill_ioctx()
811 WARN_ON(ctx != table->table[ctx->id]); in kill_ioctx()
812 table->table[ctx->id] = NULL; in kill_ioctx()
816 wake_up_all(&ctx->wait); in kill_ioctx()
825 aio_nr_sub(ctx->max_reqs); in kill_ioctx()
827 if (ctx->mmap_size) in kill_ioctx()
828 vm_munmap(ctx->mmap_base, ctx->mmap_size); in kill_ioctx()
830 ctx->rq_wait = wait; in kill_ioctx()
831 percpu_ref_kill(&ctx->users); in kill_ioctx()
857 struct kioctx *ctx = table->table[i]; in exit_aio() local
859 if (!ctx) { in exit_aio()
871 ctx->mmap_size = 0; in exit_aio()
872 kill_ioctx(mm, ctx, &wait); in exit_aio()
884 static void put_reqs_available(struct kioctx *ctx, unsigned nr) in put_reqs_available() argument
890 kcpu = this_cpu_ptr(ctx->cpu); in put_reqs_available()
893 while (kcpu->reqs_available >= ctx->req_batch * 2) { in put_reqs_available()
894 kcpu->reqs_available -= ctx->req_batch; in put_reqs_available()
895 atomic_add(ctx->req_batch, &ctx->reqs_available); in put_reqs_available()
901 static bool get_reqs_available(struct kioctx *ctx) in get_reqs_available() argument
908 kcpu = this_cpu_ptr(ctx->cpu); in get_reqs_available()
910 int old, avail = atomic_read(&ctx->reqs_available); in get_reqs_available()
913 if (avail < ctx->req_batch) in get_reqs_available()
917 avail = atomic_cmpxchg(&ctx->reqs_available, in get_reqs_available()
918 avail, avail - ctx->req_batch); in get_reqs_available()
921 kcpu->reqs_available += ctx->req_batch; in get_reqs_available()
938 static void refill_reqs_available(struct kioctx *ctx, unsigned head, in refill_reqs_available() argument
944 head %= ctx->nr_events; in refill_reqs_available()
948 events_in_ring = ctx->nr_events - (head - tail); in refill_reqs_available()
950 completed = ctx->completed_events; in refill_reqs_available()
959 ctx->completed_events -= completed; in refill_reqs_available()
960 put_reqs_available(ctx, completed); in refill_reqs_available()
967 static void user_refill_reqs_available(struct kioctx *ctx) in user_refill_reqs_available() argument
969 spin_lock_irq(&ctx->completion_lock); in user_refill_reqs_available()
970 if (ctx->completed_events) { in user_refill_reqs_available()
983 ring = kmap_atomic(ctx->ring_pages[0]); in user_refill_reqs_available()
987 refill_reqs_available(ctx, head, ctx->tail); in user_refill_reqs_available()
990 spin_unlock_irq(&ctx->completion_lock); in user_refill_reqs_available()
997 static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx) in aio_get_req() argument
1001 if (!get_reqs_available(ctx)) { in aio_get_req()
1002 user_refill_reqs_available(ctx); in aio_get_req()
1003 if (!get_reqs_available(ctx)) in aio_get_req()
1011 percpu_ref_get(&ctx->reqs); in aio_get_req()
1013 req->ki_ctx = ctx; in aio_get_req()
1016 put_reqs_available(ctx, 1); in aio_get_req()
1033 struct kioctx *ctx, *ret = NULL; in lookup_ioctx() local
1046 ctx = table->table[id]; in lookup_ioctx()
1047 if (ctx && ctx->user_id == ctx_id) { in lookup_ioctx()
1048 percpu_ref_get(&ctx->users); in lookup_ioctx()
1049 ret = ctx; in lookup_ioctx()
1062 struct kioctx *ctx = iocb->ki_ctx; in aio_complete() local
1080 spin_lock_irqsave(&ctx->ctx_lock, flags); in aio_complete()
1082 spin_unlock_irqrestore(&ctx->ctx_lock, flags); in aio_complete()
1090 spin_lock_irqsave(&ctx->completion_lock, flags); in aio_complete()
1092 tail = ctx->tail; in aio_complete()
1095 if (++tail >= ctx->nr_events) in aio_complete()
1098 ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); in aio_complete()
1107 flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); in aio_complete()
1110 ctx, tail, iocb, iocb->ki_user_iocb, iocb->ki_user_data, in aio_complete()
1118 ctx->tail = tail; in aio_complete()
1120 ring = kmap_atomic(ctx->ring_pages[0]); in aio_complete()
1124 flush_dcache_page(ctx->ring_pages[0]); in aio_complete()
1126 ctx->completed_events++; in aio_complete()
1127 if (ctx->completed_events > 1) in aio_complete()
1128 refill_reqs_available(ctx, head, tail); in aio_complete()
1129 spin_unlock_irqrestore(&ctx->completion_lock, flags); in aio_complete()
1152 if (waitqueue_active(&ctx->wait)) in aio_complete()
1153 wake_up(&ctx->wait); in aio_complete()
1155 percpu_ref_put(&ctx->reqs); in aio_complete()
1162 static long aio_read_events_ring(struct kioctx *ctx, in aio_read_events_ring() argument
1177 mutex_lock(&ctx->ring_lock); in aio_read_events_ring()
1180 ring = kmap_atomic(ctx->ring_pages[0]); in aio_read_events_ring()
1191 pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events); in aio_read_events_ring()
1196 head %= ctx->nr_events; in aio_read_events_ring()
1197 tail %= ctx->nr_events; in aio_read_events_ring()
1204 avail = (head <= tail ? tail : ctx->nr_events) - head; in aio_read_events_ring()
1213 page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]; in aio_read_events_ring()
1228 head %= ctx->nr_events; in aio_read_events_ring()
1231 ring = kmap_atomic(ctx->ring_pages[0]); in aio_read_events_ring()
1234 flush_dcache_page(ctx->ring_pages[0]); in aio_read_events_ring()
1238 mutex_unlock(&ctx->ring_lock); in aio_read_events_ring()
1243 static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr, in aio_read_events() argument
1246 long ret = aio_read_events_ring(ctx, event + *i, nr - *i); in aio_read_events()
1251 if (unlikely(atomic_read(&ctx->dead))) in aio_read_events()
1260 static long read_events(struct kioctx *ctx, long min_nr, long nr, in read_events() argument
1291 aio_read_events(ctx, min_nr, nr, event, &ret); in read_events()
1293 wait_event_interruptible_hrtimeout(ctx->wait, in read_events()
1294 aio_read_events(ctx, min_nr, nr, event, &ret), in read_events()
1319 unsigned long ctx; in SYSCALL_DEFINE2() local
1322 ret = get_user(ctx, ctxp); in SYSCALL_DEFINE2()
1327 if (unlikely(ctx || nr_events == 0)) { in SYSCALL_DEFINE2()
1329 ctx, nr_events); in SYSCALL_DEFINE2()
1352 SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) in SYSCALL_DEFINE1() argument
1354 struct kioctx *ioctx = lookup_ioctx(ctx); in SYSCALL_DEFINE1()
1496 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, in io_submit_one() argument
1518 req = aio_get_req(ctx); in io_submit_one()
1566 put_reqs_available(ctx, 1); in io_submit_one()
1567 percpu_ref_put(&ctx->reqs); in io_submit_one()
1575 struct kioctx *ctx; in do_io_submit() local
1589 ctx = lookup_ioctx(ctx_id); in do_io_submit()
1590 if (unlikely(!ctx)) { in do_io_submit()
1615 ret = io_submit_one(ctx, user_iocb, &tmp, compat); in do_io_submit()
1621 percpu_ref_put(&ctx->users); in do_io_submit()
1647 lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, u32 key) in lookup_kiocb() argument
1651 assert_spin_locked(&ctx->ctx_lock); in lookup_kiocb()
1657 list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) { in lookup_kiocb()
1677 struct kioctx *ctx; in SYSCALL_DEFINE3() local
1686 ctx = lookup_ioctx(ctx_id); in SYSCALL_DEFINE3()
1687 if (unlikely(!ctx)) in SYSCALL_DEFINE3()
1690 spin_lock_irq(&ctx->ctx_lock); in SYSCALL_DEFINE3()
1692 kiocb = lookup_kiocb(ctx, iocb, key); in SYSCALL_DEFINE3()
1698 spin_unlock_irq(&ctx->ctx_lock); in SYSCALL_DEFINE3()
1709 percpu_ref_put(&ctx->users); in SYSCALL_DEFINE3()