Lines Matching refs:nr_events
110 unsigned nr_events; member
430 unsigned nr_events = ctx->max_reqs; in aio_setup_ring() local
438 nr_events += 2; /* 1 is required, 2 for good luck */ in aio_setup_ring()
441 size += sizeof(struct io_event) * nr_events; in aio_setup_ring()
454 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) in aio_setup_ring()
504 ctx->nr_events = nr_events; /* trusted copy */ in aio_setup_ring()
507 ring->nr = nr_events; /* user copy */ in aio_setup_ring()
681 static struct kioctx *ioctx_alloc(unsigned nr_events) in ioctx_alloc() argument
696 nr_events = max(nr_events, num_possible_cpus() * 4); in ioctx_alloc()
697 nr_events *= 2; in ioctx_alloc()
700 if (nr_events > (0x10000000U / sizeof(struct io_event))) { in ioctx_alloc()
705 if (!nr_events || (unsigned long)nr_events > (aio_max_nr * 2UL)) in ioctx_alloc()
712 ctx->max_reqs = nr_events; in ioctx_alloc()
738 atomic_set(&ctx->reqs_available, ctx->nr_events - 1); in ioctx_alloc()
739 ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4); in ioctx_alloc()
745 if (aio_nr + nr_events > (aio_max_nr * 2UL) || in ioctx_alloc()
746 aio_nr + nr_events < aio_nr) { in ioctx_alloc()
765 ctx, ctx->user_id, mm, ctx->nr_events); in ioctx_alloc()
935 head %= ctx->nr_events; in refill_reqs_available()
939 events_in_ring = ctx->nr_events - (head - tail); in refill_reqs_available()
1086 if (++tail >= ctx->nr_events) in aio_complete()
1182 pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events); in aio_read_events_ring()
1187 head %= ctx->nr_events; in aio_read_events_ring()
1188 tail %= ctx->nr_events; in aio_read_events_ring()
1195 avail = (head <= tail ? tail : ctx->nr_events) - head; in aio_read_events_ring()
1219 head %= ctx->nr_events; in aio_read_events_ring()
1307 SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp) in SYSCALL_DEFINE2() argument
1318 if (unlikely(ctx || nr_events == 0)) { in SYSCALL_DEFINE2()
1320 ctx, nr_events); in SYSCALL_DEFINE2()
1324 ioctx = ioctx_alloc(nr_events); in SYSCALL_DEFINE2()