root/ipc/mqueue.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. MQUEUE_I
  2. __get_ns_from_inode
  3. get_ns_from_inode
  4. msg_insert
  5. msg_tree_erase
  6. msg_get
  7. mqueue_get_inode
  8. mqueue_fill_super
  9. mqueue_get_tree
  10. mqueue_fs_context_free
  11. mqueue_init_fs_context
  12. mq_create_mount
  13. init_once
  14. mqueue_alloc_inode
  15. mqueue_free_inode
  16. mqueue_evict_inode
  17. mqueue_create_attr
  18. mqueue_create
  19. mqueue_unlink
  20. mqueue_read_file
  21. mqueue_flush_file
  22. mqueue_poll_file
  23. wq_add
  24. wq_sleep
  25. wq_get_first_waiter
  26. set_cookie
  27. __do_notify
  28. prepare_timeout
  29. remove_notification
  30. prepare_open
  31. do_mq_open
  32. SYSCALL_DEFINE4
  33. SYSCALL_DEFINE1
  34. pipelined_send
  35. pipelined_receive
  36. do_mq_timedsend
  37. do_mq_timedreceive
  38. SYSCALL_DEFINE5
  39. SYSCALL_DEFINE5
  40. do_mq_notify
  41. SYSCALL_DEFINE2
  42. do_mq_getsetattr
  43. SYSCALL_DEFINE3
  44. get_compat_mq_attr
  45. put_compat_mq_attr
  46. COMPAT_SYSCALL_DEFINE4
  47. COMPAT_SYSCALL_DEFINE2
  48. COMPAT_SYSCALL_DEFINE3
  49. compat_prepare_timeout
  50. SYSCALL_DEFINE5
  51. SYSCALL_DEFINE5
  52. mq_init_ns
  53. mq_clear_sbinfo
  54. mq_put_mnt
  55. init_mqueue_fs

   1 /*
   2  * POSIX message queues filesystem for Linux.
   3  *
   4  * Copyright (C) 2003,2004  Krzysztof Benedyczak    (golbi@mat.uni.torun.pl)
   5  *                          Michal Wronski          (michal.wronski@gmail.com)
   6  *
   7  * Spinlocks:               Mohamed Abbas           (abbas.mohamed@intel.com)
   8  * Lockless receive & send, fd based notify:
   9  *                          Manfred Spraul          (manfred@colorfullife.com)
  10  *
  11  * Audit:                   George Wilson           (ltcgcw@us.ibm.com)
  12  *
  13  * This file is released under the GPL.
  14  */
  15 
  16 #include <linux/capability.h>
  17 #include <linux/init.h>
  18 #include <linux/pagemap.h>
  19 #include <linux/file.h>
  20 #include <linux/mount.h>
  21 #include <linux/fs_context.h>
  22 #include <linux/namei.h>
  23 #include <linux/sysctl.h>
  24 #include <linux/poll.h>
  25 #include <linux/mqueue.h>
  26 #include <linux/msg.h>
  27 #include <linux/skbuff.h>
  28 #include <linux/vmalloc.h>
  29 #include <linux/netlink.h>
  30 #include <linux/syscalls.h>
  31 #include <linux/audit.h>
  32 #include <linux/signal.h>
  33 #include <linux/mutex.h>
  34 #include <linux/nsproxy.h>
  35 #include <linux/pid.h>
  36 #include <linux/ipc_namespace.h>
  37 #include <linux/user_namespace.h>
  38 #include <linux/slab.h>
  39 #include <linux/sched/wake_q.h>
  40 #include <linux/sched/signal.h>
  41 #include <linux/sched/user.h>
  42 
  43 #include <net/sock.h>
  44 #include "util.h"
  45 
  46 struct mqueue_fs_context {
  47         struct ipc_namespace    *ipc_ns;
  48 };
  49 
  50 #define MQUEUE_MAGIC    0x19800202
  51 #define DIRENT_SIZE     20
  52 #define FILENT_SIZE     80
  53 
  54 #define SEND            0
  55 #define RECV            1
  56 
  57 #define STATE_NONE      0
  58 #define STATE_READY     1
  59 
  60 struct posix_msg_tree_node {
  61         struct rb_node          rb_node;
  62         struct list_head        msg_list;
  63         int                     priority;
  64 };
  65 
  66 struct ext_wait_queue {         /* queue of sleeping tasks */
  67         struct task_struct *task;
  68         struct list_head list;
  69         struct msg_msg *msg;    /* ptr of loaded message */
  70         int state;              /* one of STATE_* values */
  71 };
  72 
  73 struct mqueue_inode_info {
  74         spinlock_t lock;
  75         struct inode vfs_inode;
  76         wait_queue_head_t wait_q;
  77 
  78         struct rb_root msg_tree;
  79         struct rb_node *msg_tree_rightmost;
  80         struct posix_msg_tree_node *node_cache;
  81         struct mq_attr attr;
  82 
  83         struct sigevent notify;
  84         struct pid *notify_owner;
  85         u32 notify_self_exec_id;
  86         struct user_namespace *notify_user_ns;
  87         struct user_struct *user;       /* user who created, for accounting */
  88         struct sock *notify_sock;
  89         struct sk_buff *notify_cookie;
  90 
  91         /* for tasks waiting for free space and messages, respectively */
  92         struct ext_wait_queue e_wait_q[2];
  93 
  94         unsigned long qsize; /* size of queue in memory (sum of all msgs) */
  95 };
  96 
  97 static struct file_system_type mqueue_fs_type;
  98 static const struct inode_operations mqueue_dir_inode_operations;
  99 static const struct file_operations mqueue_file_operations;
 100 static const struct super_operations mqueue_super_ops;
 101 static const struct fs_context_operations mqueue_fs_context_ops;
 102 static void remove_notification(struct mqueue_inode_info *info);
 103 
 104 static struct kmem_cache *mqueue_inode_cachep;
 105 
 106 static struct ctl_table_header *mq_sysctl_table;
 107 
 108 static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
 109 {
 110         return container_of(inode, struct mqueue_inode_info, vfs_inode);
 111 }
 112 
 113 /*
 114  * This routine should be called with the mq_lock held.
 115  */
 116 static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode)
 117 {
 118         return get_ipc_ns(inode->i_sb->s_fs_info);
 119 }
 120 
 121 static struct ipc_namespace *get_ns_from_inode(struct inode *inode)
 122 {
 123         struct ipc_namespace *ns;
 124 
 125         spin_lock(&mq_lock);
 126         ns = __get_ns_from_inode(inode);
 127         spin_unlock(&mq_lock);
 128         return ns;
 129 }
 130 
 131 /* Auxiliary functions to manipulate messages' list */
 132 static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
 133 {
 134         struct rb_node **p, *parent = NULL;
 135         struct posix_msg_tree_node *leaf;
 136         bool rightmost = true;
 137 
 138         p = &info->msg_tree.rb_node;
 139         while (*p) {
 140                 parent = *p;
 141                 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
 142 
 143                 if (likely(leaf->priority == msg->m_type))
 144                         goto insert_msg;
 145                 else if (msg->m_type < leaf->priority) {
 146                         p = &(*p)->rb_left;
 147                         rightmost = false;
 148                 } else
 149                         p = &(*p)->rb_right;
 150         }
 151         if (info->node_cache) {
 152                 leaf = info->node_cache;
 153                 info->node_cache = NULL;
 154         } else {
 155                 leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC);
 156                 if (!leaf)
 157                         return -ENOMEM;
 158                 INIT_LIST_HEAD(&leaf->msg_list);
 159         }
 160         leaf->priority = msg->m_type;
 161 
 162         if (rightmost)
 163                 info->msg_tree_rightmost = &leaf->rb_node;
 164 
 165         rb_link_node(&leaf->rb_node, parent, p);
 166         rb_insert_color(&leaf->rb_node, &info->msg_tree);
 167 insert_msg:
 168         info->attr.mq_curmsgs++;
 169         info->qsize += msg->m_ts;
 170         list_add_tail(&msg->m_list, &leaf->msg_list);
 171         return 0;
 172 }
 173 
 174 static inline void msg_tree_erase(struct posix_msg_tree_node *leaf,
 175                                   struct mqueue_inode_info *info)
 176 {
 177         struct rb_node *node = &leaf->rb_node;
 178 
 179         if (info->msg_tree_rightmost == node)
 180                 info->msg_tree_rightmost = rb_prev(node);
 181 
 182         rb_erase(node, &info->msg_tree);
 183         if (info->node_cache) {
 184                 kfree(leaf);
 185         } else {
 186                 info->node_cache = leaf;
 187         }
 188 }
 189 
 190 static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
 191 {
 192         struct rb_node *parent = NULL;
 193         struct posix_msg_tree_node *leaf;
 194         struct msg_msg *msg;
 195 
 196 try_again:
 197         /*
 198          * During insert, low priorities go to the left and high to the
 199          * right.  On receive, we want the highest priorities first, so
 200          * walk all the way to the right.
 201          */
 202         parent = info->msg_tree_rightmost;
 203         if (!parent) {
 204                 if (info->attr.mq_curmsgs) {
 205                         pr_warn_once("Inconsistency in POSIX message queue, "
 206                                      "no tree element, but supposedly messages "
 207                                      "should exist!\n");
 208                         info->attr.mq_curmsgs = 0;
 209                 }
 210                 return NULL;
 211         }
 212         leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
 213         if (unlikely(list_empty(&leaf->msg_list))) {
 214                 pr_warn_once("Inconsistency in POSIX message queue, "
 215                              "empty leaf node but we haven't implemented "
 216                              "lazy leaf delete!\n");
 217                 msg_tree_erase(leaf, info);
 218                 goto try_again;
 219         } else {
 220                 msg = list_first_entry(&leaf->msg_list,
 221                                        struct msg_msg, m_list);
 222                 list_del(&msg->m_list);
 223                 if (list_empty(&leaf->msg_list)) {
 224                         msg_tree_erase(leaf, info);
 225                 }
 226         }
 227         info->attr.mq_curmsgs--;
 228         info->qsize -= msg->m_ts;
 229         return msg;
 230 }
 231 
 232 static struct inode *mqueue_get_inode(struct super_block *sb,
 233                 struct ipc_namespace *ipc_ns, umode_t mode,
 234                 struct mq_attr *attr)
 235 {
 236         struct user_struct *u = current_user();
 237         struct inode *inode;
 238         int ret = -ENOMEM;
 239 
 240         inode = new_inode(sb);
 241         if (!inode)
 242                 goto err;
 243 
 244         inode->i_ino = get_next_ino();
 245         inode->i_mode = mode;
 246         inode->i_uid = current_fsuid();
 247         inode->i_gid = current_fsgid();
 248         inode->i_mtime = inode->i_ctime = inode->i_atime = current_time(inode);
 249 
 250         if (S_ISREG(mode)) {
 251                 struct mqueue_inode_info *info;
 252                 unsigned long mq_bytes, mq_treesize;
 253 
 254                 inode->i_fop = &mqueue_file_operations;
 255                 inode->i_size = FILENT_SIZE;
 256                 /* mqueue specific info */
 257                 info = MQUEUE_I(inode);
 258                 spin_lock_init(&info->lock);
 259                 init_waitqueue_head(&info->wait_q);
 260                 INIT_LIST_HEAD(&info->e_wait_q[0].list);
 261                 INIT_LIST_HEAD(&info->e_wait_q[1].list);
 262                 info->notify_owner = NULL;
 263                 info->notify_user_ns = NULL;
 264                 info->qsize = 0;
 265                 info->user = NULL;      /* set when all is ok */
 266                 info->msg_tree = RB_ROOT;
 267                 info->msg_tree_rightmost = NULL;
 268                 info->node_cache = NULL;
 269                 memset(&info->attr, 0, sizeof(info->attr));
 270                 info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
 271                                            ipc_ns->mq_msg_default);
 272                 info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
 273                                             ipc_ns->mq_msgsize_default);
 274                 if (attr) {
 275                         info->attr.mq_maxmsg = attr->mq_maxmsg;
 276                         info->attr.mq_msgsize = attr->mq_msgsize;
 277                 }
 278                 /*
 279                  * We used to allocate a static array of pointers and account
 280                  * the size of that array as well as one msg_msg struct per
 281                  * possible message into the queue size. That's no longer
 282                  * accurate as the queue is now an rbtree and will grow and
 283                  * shrink depending on usage patterns.  We can, however, still
 284                  * account one msg_msg struct per message, but the nodes are
 285                  * allocated depending on priority usage, and most programs
 286                  * only use one, or a handful, of priorities.  However, since
 287                  * this is pinned memory, we need to assume worst case, so
 288                  * that means the min(mq_maxmsg, max_priorities) * struct
 289                  * posix_msg_tree_node.
 290                  */
 291 
 292                 ret = -EINVAL;
 293                 if (info->attr.mq_maxmsg <= 0 || info->attr.mq_msgsize <= 0)
 294                         goto out_inode;
 295                 if (capable(CAP_SYS_RESOURCE)) {
 296                         if (info->attr.mq_maxmsg > HARD_MSGMAX ||
 297                             info->attr.mq_msgsize > HARD_MSGSIZEMAX)
 298                                 goto out_inode;
 299                 } else {
 300                         if (info->attr.mq_maxmsg > ipc_ns->mq_msg_max ||
 301                                         info->attr.mq_msgsize > ipc_ns->mq_msgsize_max)
 302                                 goto out_inode;
 303                 }
 304                 ret = -EOVERFLOW;
 305                 /* check for overflow */
 306                 if (info->attr.mq_msgsize > ULONG_MAX/info->attr.mq_maxmsg)
 307                         goto out_inode;
 308                 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
 309                         min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
 310                         sizeof(struct posix_msg_tree_node);
 311                 mq_bytes = info->attr.mq_maxmsg * info->attr.mq_msgsize;
 312                 if (mq_bytes + mq_treesize < mq_bytes)
 313                         goto out_inode;
 314                 mq_bytes += mq_treesize;
 315                 spin_lock(&mq_lock);
 316                 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
 317                     u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
 318                         spin_unlock(&mq_lock);
 319                         /* mqueue_evict_inode() releases info->messages */
 320                         ret = -EMFILE;
 321                         goto out_inode;
 322                 }
 323                 u->mq_bytes += mq_bytes;
 324                 spin_unlock(&mq_lock);
 325 
 326                 /* all is ok */
 327                 info->user = get_uid(u);
 328         } else if (S_ISDIR(mode)) {
 329                 inc_nlink(inode);
 330                 /* Some things misbehave if size == 0 on a directory */
 331                 inode->i_size = 2 * DIRENT_SIZE;
 332                 inode->i_op = &mqueue_dir_inode_operations;
 333                 inode->i_fop = &simple_dir_operations;
 334         }
 335 
 336         return inode;
 337 out_inode:
 338         iput(inode);
 339 err:
 340         return ERR_PTR(ret);
 341 }
 342 
 343 static int mqueue_fill_super(struct super_block *sb, struct fs_context *fc)
 344 {
 345         struct inode *inode;
 346         struct ipc_namespace *ns = sb->s_fs_info;
 347 
 348         sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV;
 349         sb->s_blocksize = PAGE_SIZE;
 350         sb->s_blocksize_bits = PAGE_SHIFT;
 351         sb->s_magic = MQUEUE_MAGIC;
 352         sb->s_op = &mqueue_super_ops;
 353 
 354         inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL);
 355         if (IS_ERR(inode))
 356                 return PTR_ERR(inode);
 357 
 358         sb->s_root = d_make_root(inode);
 359         if (!sb->s_root)
 360                 return -ENOMEM;
 361         return 0;
 362 }
 363 
 364 static int mqueue_get_tree(struct fs_context *fc)
 365 {
 366         struct mqueue_fs_context *ctx = fc->fs_private;
 367 
 368         return get_tree_keyed(fc, mqueue_fill_super, ctx->ipc_ns);
 369 }
 370 
 371 static void mqueue_fs_context_free(struct fs_context *fc)
 372 {
 373         struct mqueue_fs_context *ctx = fc->fs_private;
 374 
 375         put_ipc_ns(ctx->ipc_ns);
 376         kfree(ctx);
 377 }
 378 
 379 static int mqueue_init_fs_context(struct fs_context *fc)
 380 {
 381         struct mqueue_fs_context *ctx;
 382 
 383         ctx = kzalloc(sizeof(struct mqueue_fs_context), GFP_KERNEL);
 384         if (!ctx)
 385                 return -ENOMEM;
 386 
 387         ctx->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns);
 388         put_user_ns(fc->user_ns);
 389         fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns);
 390         fc->fs_private = ctx;
 391         fc->ops = &mqueue_fs_context_ops;
 392         return 0;
 393 }
 394 
 395 static struct vfsmount *mq_create_mount(struct ipc_namespace *ns)
 396 {
 397         struct mqueue_fs_context *ctx;
 398         struct fs_context *fc;
 399         struct vfsmount *mnt;
 400 
 401         fc = fs_context_for_mount(&mqueue_fs_type, SB_KERNMOUNT);
 402         if (IS_ERR(fc))
 403                 return ERR_CAST(fc);
 404 
 405         ctx = fc->fs_private;
 406         put_ipc_ns(ctx->ipc_ns);
 407         ctx->ipc_ns = get_ipc_ns(ns);
 408         put_user_ns(fc->user_ns);
 409         fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns);
 410 
 411         mnt = fc_mount(fc);
 412         put_fs_context(fc);
 413         return mnt;
 414 }
 415 
 416 static void init_once(void *foo)
 417 {
 418         struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
 419 
 420         inode_init_once(&p->vfs_inode);
 421 }
 422 
 423 static struct inode *mqueue_alloc_inode(struct super_block *sb)
 424 {
 425         struct mqueue_inode_info *ei;
 426 
 427         ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL);
 428         if (!ei)
 429                 return NULL;
 430         return &ei->vfs_inode;
 431 }
 432 
 433 static void mqueue_free_inode(struct inode *inode)
 434 {
 435         kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
 436 }
 437 
 438 static void mqueue_evict_inode(struct inode *inode)
 439 {
 440         struct mqueue_inode_info *info;
 441         struct user_struct *user;
 442         struct ipc_namespace *ipc_ns;
 443         struct msg_msg *msg, *nmsg;
 444         LIST_HEAD(tmp_msg);
 445 
 446         clear_inode(inode);
 447 
 448         if (S_ISDIR(inode->i_mode))
 449                 return;
 450 
 451         ipc_ns = get_ns_from_inode(inode);
 452         info = MQUEUE_I(inode);
 453         spin_lock(&info->lock);
 454         while ((msg = msg_get(info)) != NULL)
 455                 list_add_tail(&msg->m_list, &tmp_msg);
 456         kfree(info->node_cache);
 457         spin_unlock(&info->lock);
 458 
 459         list_for_each_entry_safe(msg, nmsg, &tmp_msg, m_list) {
 460                 list_del(&msg->m_list);
 461                 free_msg(msg);
 462         }
 463 
 464         user = info->user;
 465         if (user) {
 466                 unsigned long mq_bytes, mq_treesize;
 467 
 468                 /* Total amount of bytes accounted for the mqueue */
 469                 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
 470                         min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
 471                         sizeof(struct posix_msg_tree_node);
 472 
 473                 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
 474                                           info->attr.mq_msgsize);
 475 
 476                 spin_lock(&mq_lock);
 477                 user->mq_bytes -= mq_bytes;
 478                 /*
 479                  * get_ns_from_inode() ensures that the
 480                  * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns
 481                  * to which we now hold a reference, or it is NULL.
 482                  * We can't put it here under mq_lock, though.
 483                  */
 484                 if (ipc_ns)
 485                         ipc_ns->mq_queues_count--;
 486                 spin_unlock(&mq_lock);
 487                 free_uid(user);
 488         }
 489         if (ipc_ns)
 490                 put_ipc_ns(ipc_ns);
 491 }
 492 
 493 static int mqueue_create_attr(struct dentry *dentry, umode_t mode, void *arg)
 494 {
 495         struct inode *dir = dentry->d_parent->d_inode;
 496         struct inode *inode;
 497         struct mq_attr *attr = arg;
 498         int error;
 499         struct ipc_namespace *ipc_ns;
 500 
 501         spin_lock(&mq_lock);
 502         ipc_ns = __get_ns_from_inode(dir);
 503         if (!ipc_ns) {
 504                 error = -EACCES;
 505                 goto out_unlock;
 506         }
 507 
 508         if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
 509             !capable(CAP_SYS_RESOURCE)) {
 510                 error = -ENOSPC;
 511                 goto out_unlock;
 512         }
 513         ipc_ns->mq_queues_count++;
 514         spin_unlock(&mq_lock);
 515 
 516         inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr);
 517         if (IS_ERR(inode)) {
 518                 error = PTR_ERR(inode);
 519                 spin_lock(&mq_lock);
 520                 ipc_ns->mq_queues_count--;
 521                 goto out_unlock;
 522         }
 523 
 524         put_ipc_ns(ipc_ns);
 525         dir->i_size += DIRENT_SIZE;
 526         dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
 527 
 528         d_instantiate(dentry, inode);
 529         dget(dentry);
 530         return 0;
 531 out_unlock:
 532         spin_unlock(&mq_lock);
 533         if (ipc_ns)
 534                 put_ipc_ns(ipc_ns);
 535         return error;
 536 }
 537 
 538 static int mqueue_create(struct inode *dir, struct dentry *dentry,
 539                                 umode_t mode, bool excl)
 540 {
 541         return mqueue_create_attr(dentry, mode, NULL);
 542 }
 543 
 544 static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
 545 {
 546         struct inode *inode = d_inode(dentry);
 547 
 548         dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
 549         dir->i_size -= DIRENT_SIZE;
 550         drop_nlink(inode);
 551         dput(dentry);
 552         return 0;
 553 }
 554 
 555 /*
 556 *       This is routine for system read from queue file.
 557 *       To avoid mess with doing here some sort of mq_receive we allow
 558 *       to read only queue size & notification info (the only values
 559 *       that are interesting from user point of view and aren't accessible
 560 *       through std routines)
 561 */
 562 static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
 563                                 size_t count, loff_t *off)
 564 {
 565         struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
 566         char buffer[FILENT_SIZE];
 567         ssize_t ret;
 568 
 569         spin_lock(&info->lock);
 570         snprintf(buffer, sizeof(buffer),
 571                         "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
 572                         info->qsize,
 573                         info->notify_owner ? info->notify.sigev_notify : 0,
 574                         (info->notify_owner &&
 575                          info->notify.sigev_notify == SIGEV_SIGNAL) ?
 576                                 info->notify.sigev_signo : 0,
 577                         pid_vnr(info->notify_owner));
 578         spin_unlock(&info->lock);
 579         buffer[sizeof(buffer)-1] = '\0';
 580 
 581         ret = simple_read_from_buffer(u_data, count, off, buffer,
 582                                 strlen(buffer));
 583         if (ret <= 0)
 584                 return ret;
 585 
 586         file_inode(filp)->i_atime = file_inode(filp)->i_ctime = current_time(file_inode(filp));
 587         return ret;
 588 }
 589 
 590 static int mqueue_flush_file(struct file *filp, fl_owner_t id)
 591 {
 592         struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
 593 
 594         spin_lock(&info->lock);
 595         if (task_tgid(current) == info->notify_owner)
 596                 remove_notification(info);
 597 
 598         spin_unlock(&info->lock);
 599         return 0;
 600 }
 601 
 602 static __poll_t mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
 603 {
 604         struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
 605         __poll_t retval = 0;
 606 
 607         poll_wait(filp, &info->wait_q, poll_tab);
 608 
 609         spin_lock(&info->lock);
 610         if (info->attr.mq_curmsgs)
 611                 retval = EPOLLIN | EPOLLRDNORM;
 612 
 613         if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
 614                 retval |= EPOLLOUT | EPOLLWRNORM;
 615         spin_unlock(&info->lock);
 616 
 617         return retval;
 618 }
 619 
 620 /* Adds current to info->e_wait_q[sr] before element with smaller prio */
 621 static void wq_add(struct mqueue_inode_info *info, int sr,
 622                         struct ext_wait_queue *ewp)
 623 {
 624         struct ext_wait_queue *walk;
 625 
 626         list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
 627                 if (walk->task->prio <= current->prio) {
 628                         list_add_tail(&ewp->list, &walk->list);
 629                         return;
 630                 }
 631         }
 632         list_add_tail(&ewp->list, &info->e_wait_q[sr].list);
 633 }
 634 
 635 /*
 636  * Puts current task to sleep. Caller must hold queue lock. After return
 637  * lock isn't held.
 638  * sr: SEND or RECV
 639  */
 640 static int wq_sleep(struct mqueue_inode_info *info, int sr,
 641                     ktime_t *timeout, struct ext_wait_queue *ewp)
 642         __releases(&info->lock)
 643 {
 644         int retval;
 645         signed long time;
 646 
 647         wq_add(info, sr, ewp);
 648 
 649         for (;;) {
 650                 __set_current_state(TASK_INTERRUPTIBLE);
 651 
 652                 spin_unlock(&info->lock);
 653                 time = schedule_hrtimeout_range_clock(timeout, 0,
 654                         HRTIMER_MODE_ABS, CLOCK_REALTIME);
 655 
 656                 if (ewp->state == STATE_READY) {
 657                         retval = 0;
 658                         goto out;
 659                 }
 660                 spin_lock(&info->lock);
 661                 if (ewp->state == STATE_READY) {
 662                         retval = 0;
 663                         goto out_unlock;
 664                 }
 665                 if (signal_pending(current)) {
 666                         retval = -ERESTARTSYS;
 667                         break;
 668                 }
 669                 if (time == 0) {
 670                         retval = -ETIMEDOUT;
 671                         break;
 672                 }
 673         }
 674         list_del(&ewp->list);
 675 out_unlock:
 676         spin_unlock(&info->lock);
 677 out:
 678         return retval;
 679 }
 680 
 681 /*
 682  * Returns waiting task that should be serviced first or NULL if none exists
 683  */
 684 static struct ext_wait_queue *wq_get_first_waiter(
 685                 struct mqueue_inode_info *info, int sr)
 686 {
 687         struct list_head *ptr;
 688 
 689         ptr = info->e_wait_q[sr].list.prev;
 690         if (ptr == &info->e_wait_q[sr].list)
 691                 return NULL;
 692         return list_entry(ptr, struct ext_wait_queue, list);
 693 }
 694 
 695 
 696 static inline void set_cookie(struct sk_buff *skb, char code)
 697 {
 698         ((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
 699 }
 700 
 701 /*
 702  * The next function is only to split too long sys_mq_timedsend
 703  */
 704 static void __do_notify(struct mqueue_inode_info *info)
 705 {
 706         /* notification
 707          * invoked when there is registered process and there isn't process
 708          * waiting synchronously for message AND state of queue changed from
 709          * empty to not empty. Here we are sure that no one is waiting
 710          * synchronously. */
 711         if (info->notify_owner &&
 712             info->attr.mq_curmsgs == 1) {
 713                 switch (info->notify.sigev_notify) {
 714                 case SIGEV_NONE:
 715                         break;
 716                 case SIGEV_SIGNAL: {
 717                         struct kernel_siginfo sig_i;
 718                         struct task_struct *task;
 719 
 720                         /* do_mq_notify() accepts sigev_signo == 0, why?? */
 721                         if (!info->notify.sigev_signo)
 722                                 break;
 723 
 724                         clear_siginfo(&sig_i);
 725                         sig_i.si_signo = info->notify.sigev_signo;
 726                         sig_i.si_errno = 0;
 727                         sig_i.si_code = SI_MESGQ;
 728                         sig_i.si_value = info->notify.sigev_value;
 729                         rcu_read_lock();
 730                         /* map current pid/uid into info->owner's namespaces */
 731                         sig_i.si_pid = task_tgid_nr_ns(current,
 732                                                 ns_of_pid(info->notify_owner));
 733                         sig_i.si_uid = from_kuid_munged(info->notify_user_ns,
 734                                                 current_uid());
 735                         /*
 736                          * We can't use kill_pid_info(), this signal should
 737                          * bypass check_kill_permission(). It is from kernel
 738                          * but si_fromuser() can't know this.
 739                          * We do check the self_exec_id, to avoid sending
 740                          * signals to programs that don't expect them.
 741                          */
 742                         task = pid_task(info->notify_owner, PIDTYPE_TGID);
 743                         if (task && task->self_exec_id ==
 744                                                 info->notify_self_exec_id) {
 745                                 do_send_sig_info(info->notify.sigev_signo,
 746                                                 &sig_i, task, PIDTYPE_TGID);
 747                         }
 748                         rcu_read_unlock();
 749                         break;
 750                 }
 751                 case SIGEV_THREAD:
 752                         set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
 753                         netlink_sendskb(info->notify_sock, info->notify_cookie);
 754                         break;
 755                 }
 756                 /* after notification unregisters process */
 757                 put_pid(info->notify_owner);
 758                 put_user_ns(info->notify_user_ns);
 759                 info->notify_owner = NULL;
 760                 info->notify_user_ns = NULL;
 761         }
 762         wake_up(&info->wait_q);
 763 }
 764 
 765 static int prepare_timeout(const struct __kernel_timespec __user *u_abs_timeout,
 766                            struct timespec64 *ts)
 767 {
 768         if (get_timespec64(ts, u_abs_timeout))
 769                 return -EFAULT;
 770         if (!timespec64_valid(ts))
 771                 return -EINVAL;
 772         return 0;
 773 }
 774 
 775 static void remove_notification(struct mqueue_inode_info *info)
 776 {
 777         if (info->notify_owner != NULL &&
 778             info->notify.sigev_notify == SIGEV_THREAD) {
 779                 set_cookie(info->notify_cookie, NOTIFY_REMOVED);
 780                 netlink_sendskb(info->notify_sock, info->notify_cookie);
 781         }
 782         put_pid(info->notify_owner);
 783         put_user_ns(info->notify_user_ns);
 784         info->notify_owner = NULL;
 785         info->notify_user_ns = NULL;
 786 }
 787 
 788 static int prepare_open(struct dentry *dentry, int oflag, int ro,
 789                         umode_t mode, struct filename *name,
 790                         struct mq_attr *attr)
 791 {
 792         static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
 793                                                   MAY_READ | MAY_WRITE };
 794         int acc;
 795 
 796         if (d_really_is_negative(dentry)) {
 797                 if (!(oflag & O_CREAT))
 798                         return -ENOENT;
 799                 if (ro)
 800                         return ro;
 801                 audit_inode_parent_hidden(name, dentry->d_parent);
 802                 return vfs_mkobj(dentry, mode & ~current_umask(),
 803                                   mqueue_create_attr, attr);
 804         }
 805         /* it already existed */
 806         audit_inode(name, dentry, 0);
 807         if ((oflag & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL))
 808                 return -EEXIST;
 809         if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY))
 810                 return -EINVAL;
 811         acc = oflag2acc[oflag & O_ACCMODE];
 812         return inode_permission(d_inode(dentry), acc);
 813 }
 814 
 815 static int do_mq_open(const char __user *u_name, int oflag, umode_t mode,
 816                       struct mq_attr *attr)
 817 {
 818         struct vfsmount *mnt = current->nsproxy->ipc_ns->mq_mnt;
 819         struct dentry *root = mnt->mnt_root;
 820         struct filename *name;
 821         struct path path;
 822         int fd, error;
 823         int ro;
 824 
 825         audit_mq_open(oflag, mode, attr);
 826 
 827         if (IS_ERR(name = getname(u_name)))
 828                 return PTR_ERR(name);
 829 
 830         fd = get_unused_fd_flags(O_CLOEXEC);
 831         if (fd < 0)
 832                 goto out_putname;
 833 
 834         ro = mnt_want_write(mnt);       /* we'll drop it in any case */
 835         inode_lock(d_inode(root));
 836         path.dentry = lookup_one_len(name->name, root, strlen(name->name));
 837         if (IS_ERR(path.dentry)) {
 838                 error = PTR_ERR(path.dentry);
 839                 goto out_putfd;
 840         }
 841         path.mnt = mntget(mnt);
 842         error = prepare_open(path.dentry, oflag, ro, mode, name, attr);
 843         if (!error) {
 844                 struct file *file = dentry_open(&path, oflag, current_cred());
 845                 if (!IS_ERR(file))
 846                         fd_install(fd, file);
 847                 else
 848                         error = PTR_ERR(file);
 849         }
 850         path_put(&path);
 851 out_putfd:
 852         if (error) {
 853                 put_unused_fd(fd);
 854                 fd = error;
 855         }
 856         inode_unlock(d_inode(root));
 857         if (!ro)
 858                 mnt_drop_write(mnt);
 859 out_putname:
 860         putname(name);
 861         return fd;
 862 }
 863 
 864 SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
 865                 struct mq_attr __user *, u_attr)
 866 {
 867         struct mq_attr attr;
 868         if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr)))
 869                 return -EFAULT;
 870 
 871         return do_mq_open(u_name, oflag, mode, u_attr ? &attr : NULL);
 872 }
 873 
 874 SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
 875 {
 876         int err;
 877         struct filename *name;
 878         struct dentry *dentry;
 879         struct inode *inode = NULL;
 880         struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
 881         struct vfsmount *mnt = ipc_ns->mq_mnt;
 882 
 883         name = getname(u_name);
 884         if (IS_ERR(name))
 885                 return PTR_ERR(name);
 886 
 887         audit_inode_parent_hidden(name, mnt->mnt_root);
 888         err = mnt_want_write(mnt);
 889         if (err)
 890                 goto out_name;
 891         inode_lock_nested(d_inode(mnt->mnt_root), I_MUTEX_PARENT);
 892         dentry = lookup_one_len(name->name, mnt->mnt_root,
 893                                 strlen(name->name));
 894         if (IS_ERR(dentry)) {
 895                 err = PTR_ERR(dentry);
 896                 goto out_unlock;
 897         }
 898 
 899         inode = d_inode(dentry);
 900         if (!inode) {
 901                 err = -ENOENT;
 902         } else {
 903                 ihold(inode);
 904                 err = vfs_unlink(d_inode(dentry->d_parent), dentry, NULL);
 905         }
 906         dput(dentry);
 907 
 908 out_unlock:
 909         inode_unlock(d_inode(mnt->mnt_root));
 910         if (inode)
 911                 iput(inode);
 912         mnt_drop_write(mnt);
 913 out_name:
 914         putname(name);
 915 
 916         return err;
 917 }
 918 
 919 /* Pipelined send and receive functions.
 920  *
 921  * If a receiver finds no waiting message, then it registers itself in the
 922  * list of waiting receivers. A sender checks that list before adding the new
 923  * message into the message array. If there is a waiting receiver, then it
 924  * bypasses the message array and directly hands the message over to the
 925  * receiver. The receiver accepts the message and returns without grabbing the
 926  * queue spinlock:
 927  *
 928  * - Set pointer to message.
 929  * - Queue the receiver task for later wakeup (without the info->lock).
 930  * - Update its state to STATE_READY. Now the receiver can continue.
 931  * - Wake up the process after the lock is dropped. Should the process wake up
 932  *   before this wakeup (due to a timeout or a signal) it will either see
 933  *   STATE_READY and continue or acquire the lock to check the state again.
 934  *
 935  * The same algorithm is used for senders.
 936  */
 937 
 938 /* pipelined_send() - send a message directly to the task waiting in
 939  * sys_mq_timedreceive() (without inserting message into a queue).
 940  */
 941 static inline void pipelined_send(struct wake_q_head *wake_q,
 942                                   struct mqueue_inode_info *info,
 943                                   struct msg_msg *message,
 944                                   struct ext_wait_queue *receiver)
 945 {
 946         receiver->msg = message;
 947         list_del(&receiver->list);
 948         wake_q_add(wake_q, receiver->task);
 949         /*
 950          * Rely on the implicit cmpxchg barrier from wake_q_add such
 951          * that we can ensure that updating receiver->state is the last
 952          * write operation: As once set, the receiver can continue,
 953          * and if we don't have the reference count from the wake_q,
 954          * yet, at that point we can later have a use-after-free
 955          * condition and bogus wakeup.
 956          */
 957         receiver->state = STATE_READY;
 958 }
 959 
 960 /* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
 961  * gets its message and put to the queue (we have one free place for sure). */
 962 static inline void pipelined_receive(struct wake_q_head *wake_q,
 963                                      struct mqueue_inode_info *info)
 964 {
 965         struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
 966 
 967         if (!sender) {
 968                 /* for poll */
 969                 wake_up_interruptible(&info->wait_q);
 970                 return;
 971         }
 972         if (msg_insert(sender->msg, info))
 973                 return;
 974 
 975         list_del(&sender->list);
 976         wake_q_add(wake_q, sender->task);
 977         sender->state = STATE_READY;
 978 }
 979 
 980 static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
 981                 size_t msg_len, unsigned int msg_prio,
 982                 struct timespec64 *ts)
 983 {
 984         struct fd f;
 985         struct inode *inode;
 986         struct ext_wait_queue wait;
 987         struct ext_wait_queue *receiver;
 988         struct msg_msg *msg_ptr;
 989         struct mqueue_inode_info *info;
 990         ktime_t expires, *timeout = NULL;
 991         struct posix_msg_tree_node *new_leaf = NULL;
 992         int ret = 0;
 993         DEFINE_WAKE_Q(wake_q);
 994 
 995         if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
 996                 return -EINVAL;
 997 
 998         if (ts) {
 999                 expires = timespec64_to_ktime(*ts);
1000                 timeout = &expires;
1001         }
1002 
1003         audit_mq_sendrecv(mqdes, msg_len, msg_prio, ts);
1004 
1005         f = fdget(mqdes);
1006         if (unlikely(!f.file)) {
1007                 ret = -EBADF;
1008                 goto out;
1009         }
1010 
1011         inode = file_inode(f.file);
1012         if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1013                 ret = -EBADF;
1014                 goto out_fput;
1015         }
1016         info = MQUEUE_I(inode);
1017         audit_file(f.file);
1018 
1019         if (unlikely(!(f.file->f_mode & FMODE_WRITE))) {
1020                 ret = -EBADF;
1021                 goto out_fput;
1022         }
1023 
1024         if (unlikely(msg_len > info->attr.mq_msgsize)) {
1025                 ret = -EMSGSIZE;
1026                 goto out_fput;
1027         }
1028 
1029         /* First try to allocate memory, before doing anything with
1030          * existing queues. */
1031         msg_ptr = load_msg(u_msg_ptr, msg_len);
1032         if (IS_ERR(msg_ptr)) {
1033                 ret = PTR_ERR(msg_ptr);
1034                 goto out_fput;
1035         }
1036         msg_ptr->m_ts = msg_len;
1037         msg_ptr->m_type = msg_prio;
1038 
1039         /*
1040          * msg_insert really wants us to have a valid, spare node struct so
1041          * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1042          * fall back to that if necessary.
1043          */
1044         if (!info->node_cache)
1045                 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1046 
1047         spin_lock(&info->lock);
1048 
1049         if (!info->node_cache && new_leaf) {
1050                 /* Save our speculative allocation into the cache */
1051                 INIT_LIST_HEAD(&new_leaf->msg_list);
1052                 info->node_cache = new_leaf;
1053                 new_leaf = NULL;
1054         } else {
1055                 kfree(new_leaf);
1056         }
1057 
1058         if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
1059                 if (f.file->f_flags & O_NONBLOCK) {
1060                         ret = -EAGAIN;
1061                 } else {
1062                         wait.task = current;
1063                         wait.msg = (void *) msg_ptr;
1064                         wait.state = STATE_NONE;
1065                         ret = wq_sleep(info, SEND, timeout, &wait);
1066                         /*
1067                          * wq_sleep must be called with info->lock held, and
1068                          * returns with the lock released
1069                          */
1070                         goto out_free;
1071                 }
1072         } else {
1073                 receiver = wq_get_first_waiter(info, RECV);
1074                 if (receiver) {
1075                         pipelined_send(&wake_q, info, msg_ptr, receiver);
1076                 } else {
1077                         /* adds message to the queue */
1078                         ret = msg_insert(msg_ptr, info);
1079                         if (ret)
1080                                 goto out_unlock;
1081                         __do_notify(info);
1082                 }
1083                 inode->i_atime = inode->i_mtime = inode->i_ctime =
1084                                 current_time(inode);
1085         }
1086 out_unlock:
1087         spin_unlock(&info->lock);
1088         wake_up_q(&wake_q);
1089 out_free:
1090         if (ret)
1091                 free_msg(msg_ptr);
1092 out_fput:
1093         fdput(f);
1094 out:
1095         return ret;
1096 }
1097 
1098 static int do_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr,
1099                 size_t msg_len, unsigned int __user *u_msg_prio,
1100                 struct timespec64 *ts)
1101 {
1102         ssize_t ret;
1103         struct msg_msg *msg_ptr;
1104         struct fd f;
1105         struct inode *inode;
1106         struct mqueue_inode_info *info;
1107         struct ext_wait_queue wait;
1108         ktime_t expires, *timeout = NULL;
1109         struct posix_msg_tree_node *new_leaf = NULL;
1110 
1111         if (ts) {
1112                 expires = timespec64_to_ktime(*ts);
1113                 timeout = &expires;
1114         }
1115 
1116         audit_mq_sendrecv(mqdes, msg_len, 0, ts);
1117 
1118         f = fdget(mqdes);
1119         if (unlikely(!f.file)) {
1120                 ret = -EBADF;
1121                 goto out;
1122         }
1123 
1124         inode = file_inode(f.file);
1125         if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1126                 ret = -EBADF;
1127                 goto out_fput;
1128         }
1129         info = MQUEUE_I(inode);
1130         audit_file(f.file);
1131 
1132         if (unlikely(!(f.file->f_mode & FMODE_READ))) {
1133                 ret = -EBADF;
1134                 goto out_fput;
1135         }
1136 
1137         /* checks if buffer is big enough */
1138         if (unlikely(msg_len < info->attr.mq_msgsize)) {
1139                 ret = -EMSGSIZE;
1140                 goto out_fput;
1141         }
1142 
1143         /*
1144          * msg_insert really wants us to have a valid, spare node struct so
1145          * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1146          * fall back to that if necessary.
1147          */
1148         if (!info->node_cache)
1149                 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1150 
1151         spin_lock(&info->lock);
1152 
1153         if (!info->node_cache && new_leaf) {
1154                 /* Save our speculative allocation into the cache */
1155                 INIT_LIST_HEAD(&new_leaf->msg_list);
1156                 info->node_cache = new_leaf;
1157         } else {
1158                 kfree(new_leaf);
1159         }
1160 
1161         if (info->attr.mq_curmsgs == 0) {
1162                 if (f.file->f_flags & O_NONBLOCK) {
1163                         spin_unlock(&info->lock);
1164                         ret = -EAGAIN;
1165                 } else {
1166                         wait.task = current;
1167                         wait.state = STATE_NONE;
1168                         ret = wq_sleep(info, RECV, timeout, &wait);
1169                         msg_ptr = wait.msg;
1170                 }
1171         } else {
1172                 DEFINE_WAKE_Q(wake_q);
1173 
1174                 msg_ptr = msg_get(info);
1175 
1176                 inode->i_atime = inode->i_mtime = inode->i_ctime =
1177                                 current_time(inode);
1178 
1179                 /* There is now free space in queue. */
1180                 pipelined_receive(&wake_q, info);
1181                 spin_unlock(&info->lock);
1182                 wake_up_q(&wake_q);
1183                 ret = 0;
1184         }
1185         if (ret == 0) {
1186                 ret = msg_ptr->m_ts;
1187 
1188                 if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) ||
1189                         store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) {
1190                         ret = -EFAULT;
1191                 }
1192                 free_msg(msg_ptr);
1193         }
1194 out_fput:
1195         fdput(f);
1196 out:
1197         return ret;
1198 }
1199 
1200 SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
1201                 size_t, msg_len, unsigned int, msg_prio,
1202                 const struct __kernel_timespec __user *, u_abs_timeout)
1203 {
1204         struct timespec64 ts, *p = NULL;
1205         if (u_abs_timeout) {
1206                 int res = prepare_timeout(u_abs_timeout, &ts);
1207                 if (res)
1208                         return res;
1209                 p = &ts;
1210         }
1211         return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
1212 }
1213 
1214 SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
1215                 size_t, msg_len, unsigned int __user *, u_msg_prio,
1216                 const struct __kernel_timespec __user *, u_abs_timeout)
1217 {
1218         struct timespec64 ts, *p = NULL;
1219         if (u_abs_timeout) {
1220                 int res = prepare_timeout(u_abs_timeout, &ts);
1221                 if (res)
1222                         return res;
1223                 p = &ts;
1224         }
1225         return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p);
1226 }
1227 
1228 /*
1229  * Notes: the case when user wants us to deregister (with NULL as pointer)
1230  * and he isn't currently owner of notification, will be silently discarded.
1231  * It isn't explicitly defined in the POSIX.
1232  */
1233 static int do_mq_notify(mqd_t mqdes, const struct sigevent *notification)
1234 {
1235         int ret;
1236         struct fd f;
1237         struct sock *sock;
1238         struct inode *inode;
1239         struct mqueue_inode_info *info;
1240         struct sk_buff *nc;
1241 
1242         audit_mq_notify(mqdes, notification);
1243 
1244         nc = NULL;
1245         sock = NULL;
1246         if (notification != NULL) {
1247                 if (unlikely(notification->sigev_notify != SIGEV_NONE &&
1248                              notification->sigev_notify != SIGEV_SIGNAL &&
1249                              notification->sigev_notify != SIGEV_THREAD))
1250                         return -EINVAL;
1251                 if (notification->sigev_notify == SIGEV_SIGNAL &&
1252                         !valid_signal(notification->sigev_signo)) {
1253                         return -EINVAL;
1254                 }
1255                 if (notification->sigev_notify == SIGEV_THREAD) {
1256                         long timeo;
1257 
1258                         /* create the notify skb */
1259                         nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
1260                         if (!nc)
1261                                 return -ENOMEM;
1262 
1263                         if (copy_from_user(nc->data,
1264                                         notification->sigev_value.sival_ptr,
1265                                         NOTIFY_COOKIE_LEN)) {
1266                                 ret = -EFAULT;
1267                                 goto free_skb;
1268                         }
1269 
1270                         /* TODO: add a header? */
1271                         skb_put(nc, NOTIFY_COOKIE_LEN);
1272                         /* and attach it to the socket */
1273 retry:
1274                         f = fdget(notification->sigev_signo);
1275                         if (!f.file) {
1276                                 ret = -EBADF;
1277                                 goto out;
1278                         }
1279                         sock = netlink_getsockbyfilp(f.file);
1280                         fdput(f);
1281                         if (IS_ERR(sock)) {
1282                                 ret = PTR_ERR(sock);
1283                                 goto free_skb;
1284                         }
1285 
1286                         timeo = MAX_SCHEDULE_TIMEOUT;
1287                         ret = netlink_attachskb(sock, nc, &timeo, NULL);
1288                         if (ret == 1) {
1289                                 sock = NULL;
1290                                 goto retry;
1291                         }
1292                         if (ret)
1293                                 return ret;
1294                 }
1295         }
1296 
1297         f = fdget(mqdes);
1298         if (!f.file) {
1299                 ret = -EBADF;
1300                 goto out;
1301         }
1302 
1303         inode = file_inode(f.file);
1304         if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1305                 ret = -EBADF;
1306                 goto out_fput;
1307         }
1308         info = MQUEUE_I(inode);
1309 
1310         ret = 0;
1311         spin_lock(&info->lock);
1312         if (notification == NULL) {
1313                 if (info->notify_owner == task_tgid(current)) {
1314                         remove_notification(info);
1315                         inode->i_atime = inode->i_ctime = current_time(inode);
1316                 }
1317         } else if (info->notify_owner != NULL) {
1318                 ret = -EBUSY;
1319         } else {
1320                 switch (notification->sigev_notify) {
1321                 case SIGEV_NONE:
1322                         info->notify.sigev_notify = SIGEV_NONE;
1323                         break;
1324                 case SIGEV_THREAD:
1325                         info->notify_sock = sock;
1326                         info->notify_cookie = nc;
1327                         sock = NULL;
1328                         nc = NULL;
1329                         info->notify.sigev_notify = SIGEV_THREAD;
1330                         break;
1331                 case SIGEV_SIGNAL:
1332                         info->notify.sigev_signo = notification->sigev_signo;
1333                         info->notify.sigev_value = notification->sigev_value;
1334                         info->notify.sigev_notify = SIGEV_SIGNAL;
1335                         info->notify_self_exec_id = current->self_exec_id;
1336                         break;
1337                 }
1338 
1339                 info->notify_owner = get_pid(task_tgid(current));
1340                 info->notify_user_ns = get_user_ns(current_user_ns());
1341                 inode->i_atime = inode->i_ctime = current_time(inode);
1342         }
1343         spin_unlock(&info->lock);
1344 out_fput:
1345         fdput(f);
1346 out:
1347         if (sock)
1348                 netlink_detachskb(sock, nc);
1349         else
1350 free_skb:
1351                 dev_kfree_skb(nc);
1352 
1353         return ret;
1354 }
1355 
1356 SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1357                 const struct sigevent __user *, u_notification)
1358 {
1359         struct sigevent n, *p = NULL;
1360         if (u_notification) {
1361                 if (copy_from_user(&n, u_notification, sizeof(struct sigevent)))
1362                         return -EFAULT;
1363                 p = &n;
1364         }
1365         return do_mq_notify(mqdes, p);
1366 }
1367 
1368 static int do_mq_getsetattr(int mqdes, struct mq_attr *new, struct mq_attr *old)
1369 {
1370         struct fd f;
1371         struct inode *inode;
1372         struct mqueue_inode_info *info;
1373 
1374         if (new && (new->mq_flags & (~O_NONBLOCK)))
1375                 return -EINVAL;
1376 
1377         f = fdget(mqdes);
1378         if (!f.file)
1379                 return -EBADF;
1380 
1381         if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1382                 fdput(f);
1383                 return -EBADF;
1384         }
1385 
1386         inode = file_inode(f.file);
1387         info = MQUEUE_I(inode);
1388 
1389         spin_lock(&info->lock);
1390 
1391         if (old) {
1392                 *old = info->attr;
1393                 old->mq_flags = f.file->f_flags & O_NONBLOCK;
1394         }
1395         if (new) {
1396                 audit_mq_getsetattr(mqdes, new);
1397                 spin_lock(&f.file->f_lock);
1398                 if (new->mq_flags & O_NONBLOCK)
1399                         f.file->f_flags |= O_NONBLOCK;
1400                 else
1401                         f.file->f_flags &= ~O_NONBLOCK;
1402                 spin_unlock(&f.file->f_lock);
1403 
1404                 inode->i_atime = inode->i_ctime = current_time(inode);
1405         }
1406 
1407         spin_unlock(&info->lock);
1408         fdput(f);
1409         return 0;
1410 }
1411 
1412 SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1413                 const struct mq_attr __user *, u_mqstat,
1414                 struct mq_attr __user *, u_omqstat)
1415 {
1416         int ret;
1417         struct mq_attr mqstat, omqstat;
1418         struct mq_attr *new = NULL, *old = NULL;
1419 
1420         if (u_mqstat) {
1421                 new = &mqstat;
1422                 if (copy_from_user(new, u_mqstat, sizeof(struct mq_attr)))
1423                         return -EFAULT;
1424         }
1425         if (u_omqstat)
1426                 old = &omqstat;
1427 
1428         ret = do_mq_getsetattr(mqdes, new, old);
1429         if (ret || !old)
1430                 return ret;
1431 
1432         if (copy_to_user(u_omqstat, old, sizeof(struct mq_attr)))
1433                 return -EFAULT;
1434         return 0;
1435 }
1436 
1437 #ifdef CONFIG_COMPAT
1438 
1439 struct compat_mq_attr {
1440         compat_long_t mq_flags;      /* message queue flags                  */
1441         compat_long_t mq_maxmsg;     /* maximum number of messages           */
1442         compat_long_t mq_msgsize;    /* maximum message size                 */
1443         compat_long_t mq_curmsgs;    /* number of messages currently queued  */
1444         compat_long_t __reserved[4]; /* ignored for input, zeroed for output */
1445 };
1446 
1447 static inline int get_compat_mq_attr(struct mq_attr *attr,
1448                         const struct compat_mq_attr __user *uattr)
1449 {
1450         struct compat_mq_attr v;
1451 
1452         if (copy_from_user(&v, uattr, sizeof(*uattr)))
1453                 return -EFAULT;
1454 
1455         memset(attr, 0, sizeof(*attr));
1456         attr->mq_flags = v.mq_flags;
1457         attr->mq_maxmsg = v.mq_maxmsg;
1458         attr->mq_msgsize = v.mq_msgsize;
1459         attr->mq_curmsgs = v.mq_curmsgs;
1460         return 0;
1461 }
1462 
1463 static inline int put_compat_mq_attr(const struct mq_attr *attr,
1464                         struct compat_mq_attr __user *uattr)
1465 {
1466         struct compat_mq_attr v;
1467 
1468         memset(&v, 0, sizeof(v));
1469         v.mq_flags = attr->mq_flags;
1470         v.mq_maxmsg = attr->mq_maxmsg;
1471         v.mq_msgsize = attr->mq_msgsize;
1472         v.mq_curmsgs = attr->mq_curmsgs;
1473         if (copy_to_user(uattr, &v, sizeof(*uattr)))
1474                 return -EFAULT;
1475         return 0;
1476 }
1477 
1478 COMPAT_SYSCALL_DEFINE4(mq_open, const char __user *, u_name,
1479                        int, oflag, compat_mode_t, mode,
1480                        struct compat_mq_attr __user *, u_attr)
1481 {
1482         struct mq_attr attr, *p = NULL;
1483         if (u_attr && oflag & O_CREAT) {
1484                 p = &attr;
1485                 if (get_compat_mq_attr(&attr, u_attr))
1486                         return -EFAULT;
1487         }
1488         return do_mq_open(u_name, oflag, mode, p);
1489 }
1490 
1491 COMPAT_SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1492                        const struct compat_sigevent __user *, u_notification)
1493 {
1494         struct sigevent n, *p = NULL;
1495         if (u_notification) {
1496                 if (get_compat_sigevent(&n, u_notification))
1497                         return -EFAULT;
1498                 if (n.sigev_notify == SIGEV_THREAD)
1499                         n.sigev_value.sival_ptr = compat_ptr(n.sigev_value.sival_int);
1500                 p = &n;
1501         }
1502         return do_mq_notify(mqdes, p);
1503 }
1504 
1505 COMPAT_SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1506                        const struct compat_mq_attr __user *, u_mqstat,
1507                        struct compat_mq_attr __user *, u_omqstat)
1508 {
1509         int ret;
1510         struct mq_attr mqstat, omqstat;
1511         struct mq_attr *new = NULL, *old = NULL;
1512 
1513         if (u_mqstat) {
1514                 new = &mqstat;
1515                 if (get_compat_mq_attr(new, u_mqstat))
1516                         return -EFAULT;
1517         }
1518         if (u_omqstat)
1519                 old = &omqstat;
1520 
1521         ret = do_mq_getsetattr(mqdes, new, old);
1522         if (ret || !old)
1523                 return ret;
1524 
1525         if (put_compat_mq_attr(old, u_omqstat))
1526                 return -EFAULT;
1527         return 0;
1528 }
1529 #endif
1530 
1531 #ifdef CONFIG_COMPAT_32BIT_TIME
1532 static int compat_prepare_timeout(const struct old_timespec32 __user *p,
1533                                    struct timespec64 *ts)
1534 {
1535         if (get_old_timespec32(ts, p))
1536                 return -EFAULT;
1537         if (!timespec64_valid(ts))
1538                 return -EINVAL;
1539         return 0;
1540 }
1541 
1542 SYSCALL_DEFINE5(mq_timedsend_time32, mqd_t, mqdes,
1543                 const char __user *, u_msg_ptr,
1544                 unsigned int, msg_len, unsigned int, msg_prio,
1545                 const struct old_timespec32 __user *, u_abs_timeout)
1546 {
1547         struct timespec64 ts, *p = NULL;
1548         if (u_abs_timeout) {
1549                 int res = compat_prepare_timeout(u_abs_timeout, &ts);
1550                 if (res)
1551                         return res;
1552                 p = &ts;
1553         }
1554         return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
1555 }
1556 
1557 SYSCALL_DEFINE5(mq_timedreceive_time32, mqd_t, mqdes,
1558                 char __user *, u_msg_ptr,
1559                 unsigned int, msg_len, unsigned int __user *, u_msg_prio,
1560                 const struct old_timespec32 __user *, u_abs_timeout)
1561 {
1562         struct timespec64 ts, *p = NULL;
1563         if (u_abs_timeout) {
1564                 int res = compat_prepare_timeout(u_abs_timeout, &ts);
1565                 if (res)
1566                         return res;
1567                 p = &ts;
1568         }
1569         return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p);
1570 }
1571 #endif
1572 
1573 static const struct inode_operations mqueue_dir_inode_operations = {
1574         .lookup = simple_lookup,
1575         .create = mqueue_create,
1576         .unlink = mqueue_unlink,
1577 };
1578 
1579 static const struct file_operations mqueue_file_operations = {
1580         .flush = mqueue_flush_file,
1581         .poll = mqueue_poll_file,
1582         .read = mqueue_read_file,
1583         .llseek = default_llseek,
1584 };
1585 
1586 static const struct super_operations mqueue_super_ops = {
1587         .alloc_inode = mqueue_alloc_inode,
1588         .free_inode = mqueue_free_inode,
1589         .evict_inode = mqueue_evict_inode,
1590         .statfs = simple_statfs,
1591 };
1592 
1593 static const struct fs_context_operations mqueue_fs_context_ops = {
1594         .free           = mqueue_fs_context_free,
1595         .get_tree       = mqueue_get_tree,
1596 };
1597 
1598 static struct file_system_type mqueue_fs_type = {
1599         .name                   = "mqueue",
1600         .init_fs_context        = mqueue_init_fs_context,
1601         .kill_sb                = kill_litter_super,
1602         .fs_flags               = FS_USERNS_MOUNT,
1603 };
1604 
1605 int mq_init_ns(struct ipc_namespace *ns)
1606 {
1607         struct vfsmount *m;
1608 
1609         ns->mq_queues_count  = 0;
1610         ns->mq_queues_max    = DFLT_QUEUESMAX;
1611         ns->mq_msg_max       = DFLT_MSGMAX;
1612         ns->mq_msgsize_max   = DFLT_MSGSIZEMAX;
1613         ns->mq_msg_default   = DFLT_MSG;
1614         ns->mq_msgsize_default  = DFLT_MSGSIZE;
1615 
1616         m = mq_create_mount(ns);
1617         if (IS_ERR(m))
1618                 return PTR_ERR(m);
1619         ns->mq_mnt = m;
1620         return 0;
1621 }
1622 
1623 void mq_clear_sbinfo(struct ipc_namespace *ns)
1624 {
1625         ns->mq_mnt->mnt_sb->s_fs_info = NULL;
1626 }
1627 
1628 void mq_put_mnt(struct ipc_namespace *ns)
1629 {
1630         kern_unmount(ns->mq_mnt);
1631 }
1632 
1633 static int __init init_mqueue_fs(void)
1634 {
1635         int error;
1636 
1637         mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
1638                                 sizeof(struct mqueue_inode_info), 0,
1639                                 SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, init_once);
1640         if (mqueue_inode_cachep == NULL)
1641                 return -ENOMEM;
1642 
1643         /* ignore failures - they are not fatal */
1644         mq_sysctl_table = mq_register_sysctl_table();
1645 
1646         error = register_filesystem(&mqueue_fs_type);
1647         if (error)
1648                 goto out_sysctl;
1649 
1650         spin_lock_init(&mq_lock);
1651 
1652         error = mq_init_ns(&init_ipc_ns);
1653         if (error)
1654                 goto out_filesystem;
1655 
1656         return 0;
1657 
1658 out_filesystem:
1659         unregister_filesystem(&mqueue_fs_type);
1660 out_sysctl:
1661         if (mq_sysctl_table)
1662                 unregister_sysctl_table(mq_sysctl_table);
1663         kmem_cache_destroy(mqueue_inode_cachep);
1664         return error;
1665 }
1666 
1667 device_initcall(init_mqueue_fs);

/* [<][>][^][v][top][bottom][index][help] */