user_bufs 239 fs/io_uring.c struct io_mapped_ubuf *user_bufs; user_bufs 1176 fs/io_uring.c if (unlikely(!ctx->user_bufs)) user_bufs 1184 fs/io_uring.c imu = &ctx->user_bufs[index]; user_bufs 3400 fs/io_uring.c if (!ctx->user_bufs) user_bufs 3404 fs/io_uring.c struct io_mapped_ubuf *imu = &ctx->user_bufs[i]; user_bufs 3415 fs/io_uring.c kfree(ctx->user_bufs); user_bufs 3416 fs/io_uring.c ctx->user_bufs = NULL; user_bufs 3454 fs/io_uring.c if (ctx->user_bufs) user_bufs 3459 fs/io_uring.c ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf), user_bufs 3461 fs/io_uring.c if (!ctx->user_bufs) user_bufs 3465 fs/io_uring.c struct io_mapped_ubuf *imu = &ctx->user_bufs[i]; user_bufs 632 fs/pipe.c static bool too_many_pipe_buffers_soft(unsigned long user_bufs) user_bufs 636 fs/pipe.c return soft_limit && user_bufs > soft_limit; user_bufs 639 fs/pipe.c static bool too_many_pipe_buffers_hard(unsigned long user_bufs) user_bufs 643 fs/pipe.c return hard_limit && user_bufs > hard_limit; user_bufs 656 fs/pipe.c unsigned long user_bufs; user_bufs 666 fs/pipe.c user_bufs = account_pipe_buffers(user, 0, pipe_bufs); user_bufs 668 fs/pipe.c if (too_many_pipe_buffers_soft(user_bufs) && is_unprivileged_user()) { user_bufs 669 fs/pipe.c user_bufs = account_pipe_buffers(user, pipe_bufs, 1); user_bufs 673 fs/pipe.c if (too_many_pipe_buffers_hard(user_bufs) && is_unprivileged_user()) user_bufs 1058 fs/pipe.c unsigned long user_bufs; user_bufs 1078 fs/pipe.c user_bufs = account_pipe_buffers(pipe->user, pipe->buffers, nr_pages); user_bufs 1081 fs/pipe.c (too_many_pipe_buffers_hard(user_bufs) || user_bufs 1082 fs/pipe.c too_many_pipe_buffers_soft(user_bufs)) && user_bufs 1245 kernel/bpf/syscall.c unsigned long user_bufs; user_bufs 1248 kernel/bpf/syscall.c user_bufs = atomic_long_add_return(pages, &user->locked_vm); user_bufs 1249 kernel/bpf/syscall.c if (user_bufs > memlock_limit) {