root/kernel/seccomp.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. seccomp_check_filter
  2. seccomp_run_filters
  3. seccomp_may_assign_mode
  4. arch_seccomp_spec_mitigate
  5. seccomp_assign_mode
  6. is_ancestor
  7. seccomp_can_sync_threads
  8. seccomp_sync_threads
  9. seccomp_prepare_filter
  10. seccomp_prepare_user_filter
  11. seccomp_attach_filter
  12. __get_seccomp_filter
  13. get_seccomp_filter
  14. seccomp_filter_free
  15. __put_seccomp_filter
  16. put_seccomp_filter
  17. seccomp_init_siginfo
  18. seccomp_send_sigsys
  19. seccomp_log
  20. __secure_computing_strict
  21. secure_computing_strict
  22. seccomp_next_notify_id
  23. seccomp_do_user_notification
  24. __seccomp_filter
  25. __seccomp_filter
  26. __secure_computing
  27. prctl_get_seccomp
  28. seccomp_set_mode_strict
  29. seccomp_notify_release
  30. seccomp_notify_recv
  31. seccomp_notify_send
  32. seccomp_notify_id_valid
  33. seccomp_notify_ioctl
  34. seccomp_notify_poll
  35. init_listener
  36. seccomp_set_mode_filter
  37. seccomp_set_mode_filter
  38. seccomp_get_action_avail
  39. seccomp_get_notif_sizes
  40. do_seccomp
  41. SYSCALL_DEFINE3
  42. prctl_set_seccomp
  43. get_nth_filter
  44. seccomp_get_filter
  45. seccomp_get_metadata
  46. seccomp_names_from_actions_logged
  47. seccomp_action_logged_from_name
  48. seccomp_actions_logged_from_names
  49. read_actions_logged
  50. write_actions_logged
  51. audit_actions_logged
  52. seccomp_actions_logged_handler
  53. seccomp_sysctl_init

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * linux/kernel/seccomp.c
   4  *
   5  * Copyright 2004-2005  Andrea Arcangeli <andrea@cpushare.com>
   6  *
   7  * Copyright (C) 2012 Google, Inc.
   8  * Will Drewry <wad@chromium.org>
   9  *
  10  * This defines a simple but solid secure-computing facility.
  11  *
  12  * Mode 1 uses a fixed list of allowed system calls.
  13  * Mode 2 allows user-defined system call filters in the form
  14  *        of Berkeley Packet Filters/Linux Socket Filters.
  15  */
  16 
  17 #include <linux/refcount.h>
  18 #include <linux/audit.h>
  19 #include <linux/compat.h>
  20 #include <linux/coredump.h>
  21 #include <linux/kmemleak.h>
  22 #include <linux/nospec.h>
  23 #include <linux/prctl.h>
  24 #include <linux/sched.h>
  25 #include <linux/sched/task_stack.h>
  26 #include <linux/seccomp.h>
  27 #include <linux/slab.h>
  28 #include <linux/syscalls.h>
  29 #include <linux/sysctl.h>
  30 
  31 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
  32 #include <asm/syscall.h>
  33 #endif
  34 
  35 #ifdef CONFIG_SECCOMP_FILTER
  36 #include <linux/file.h>
  37 #include <linux/filter.h>
  38 #include <linux/pid.h>
  39 #include <linux/ptrace.h>
  40 #include <linux/security.h>
  41 #include <linux/tracehook.h>
  42 #include <linux/uaccess.h>
  43 #include <linux/anon_inodes.h>
  44 
  45 enum notify_state {
  46         SECCOMP_NOTIFY_INIT,
  47         SECCOMP_NOTIFY_SENT,
  48         SECCOMP_NOTIFY_REPLIED,
  49 };
  50 
  51 struct seccomp_knotif {
  52         /* The struct pid of the task whose filter triggered the notification */
  53         struct task_struct *task;
  54 
  55         /* The "cookie" for this request; this is unique for this filter. */
  56         u64 id;
  57 
  58         /*
  59          * The seccomp data. This pointer is valid the entire time this
  60          * notification is active, since it comes from __seccomp_filter which
  61          * eclipses the entire lifecycle here.
  62          */
  63         const struct seccomp_data *data;
  64 
  65         /*
  66          * Notification states. When SECCOMP_RET_USER_NOTIF is returned, a
  67          * struct seccomp_knotif is created and starts out in INIT. Once the
  68          * handler reads the notification off of an FD, it transitions to SENT.
  69          * If a signal is received the state transitions back to INIT and
  70          * another message is sent. When the userspace handler replies, state
  71          * transitions to REPLIED.
  72          */
  73         enum notify_state state;
  74 
  75         /* The return values, only valid when in SECCOMP_NOTIFY_REPLIED */
  76         int error;
  77         long val;
  78 
  79         /* Signals when this has entered SECCOMP_NOTIFY_REPLIED */
  80         struct completion ready;
  81 
  82         struct list_head list;
  83 };
  84 
  85 /**
  86  * struct notification - container for seccomp userspace notifications. Since
  87  * most seccomp filters will not have notification listeners attached and this
  88  * structure is fairly large, we store the notification-specific stuff in a
  89  * separate structure.
  90  *
  91  * @request: A semaphore that users of this notification can wait on for
  92  *           changes. Actual reads and writes are still controlled with
  93  *           filter->notify_lock.
  94  * @next_id: The id of the next request.
  95  * @notifications: A list of struct seccomp_knotif elements.
  96  * @wqh: A wait queue for poll.
  97  */
  98 struct notification {
  99         struct semaphore request;
 100         u64 next_id;
 101         struct list_head notifications;
 102         wait_queue_head_t wqh;
 103 };
 104 
 105 /**
 106  * struct seccomp_filter - container for seccomp BPF programs
 107  *
 108  * @usage: reference count to manage the object lifetime.
 109  *         get/put helpers should be used when accessing an instance
 110  *         outside of a lifetime-guarded section.  In general, this
 111  *         is only needed for handling filters shared across tasks.
 112  * @log: true if all actions except for SECCOMP_RET_ALLOW should be logged
 113  * @prev: points to a previously installed, or inherited, filter
 114  * @prog: the BPF program to evaluate
 115  * @notif: the struct that holds all notification related information
 116  * @notify_lock: A lock for all notification-related accesses.
 117  *
 118  * seccomp_filter objects are organized in a tree linked via the @prev
 119  * pointer.  For any task, it appears to be a singly-linked list starting
 120  * with current->seccomp.filter, the most recently attached or inherited filter.
 121  * However, multiple filters may share a @prev node, by way of fork(), which
 122  * results in a unidirectional tree existing in memory.  This is similar to
 123  * how namespaces work.
 124  *
 125  * seccomp_filter objects should never be modified after being attached
 126  * to a task_struct (other than @usage).
 127  */
 128 struct seccomp_filter {
 129         refcount_t usage;
 130         bool log;
 131         struct seccomp_filter *prev;
 132         struct bpf_prog *prog;
 133         struct notification *notif;
 134         struct mutex notify_lock;
 135 };
 136 
 137 /* Limit any path through the tree to 256KB worth of instructions. */
 138 #define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter))
 139 
 140 /*
 141  * Endianness is explicitly ignored and left for BPF program authors to manage
 142  * as per the specific architecture.
 143  */
 144 static void populate_seccomp_data(struct seccomp_data *sd)
 145 {
 146         struct task_struct *task = current;
 147         struct pt_regs *regs = task_pt_regs(task);
 148         unsigned long args[6];
 149 
 150         sd->nr = syscall_get_nr(task, regs);
 151         sd->arch = syscall_get_arch(task);
 152         syscall_get_arguments(task, regs, args);
 153         sd->args[0] = args[0];
 154         sd->args[1] = args[1];
 155         sd->args[2] = args[2];
 156         sd->args[3] = args[3];
 157         sd->args[4] = args[4];
 158         sd->args[5] = args[5];
 159         sd->instruction_pointer = KSTK_EIP(task);
 160 }
 161 
 162 /**
 163  *      seccomp_check_filter - verify seccomp filter code
 164  *      @filter: filter to verify
 165  *      @flen: length of filter
 166  *
 167  * Takes a previously checked filter (by bpf_check_classic) and
 168  * redirects all filter code that loads struct sk_buff data
 169  * and related data through seccomp_bpf_load.  It also
 170  * enforces length and alignment checking of those loads.
 171  *
 172  * Returns 0 if the rule set is legal or -EINVAL if not.
 173  */
 174 static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
 175 {
 176         int pc;
 177         for (pc = 0; pc < flen; pc++) {
 178                 struct sock_filter *ftest = &filter[pc];
 179                 u16 code = ftest->code;
 180                 u32 k = ftest->k;
 181 
 182                 switch (code) {
 183                 case BPF_LD | BPF_W | BPF_ABS:
 184                         ftest->code = BPF_LDX | BPF_W | BPF_ABS;
 185                         /* 32-bit aligned and not out of bounds. */
 186                         if (k >= sizeof(struct seccomp_data) || k & 3)
 187                                 return -EINVAL;
 188                         continue;
 189                 case BPF_LD | BPF_W | BPF_LEN:
 190                         ftest->code = BPF_LD | BPF_IMM;
 191                         ftest->k = sizeof(struct seccomp_data);
 192                         continue;
 193                 case BPF_LDX | BPF_W | BPF_LEN:
 194                         ftest->code = BPF_LDX | BPF_IMM;
 195                         ftest->k = sizeof(struct seccomp_data);
 196                         continue;
 197                 /* Explicitly include allowed calls. */
 198                 case BPF_RET | BPF_K:
 199                 case BPF_RET | BPF_A:
 200                 case BPF_ALU | BPF_ADD | BPF_K:
 201                 case BPF_ALU | BPF_ADD | BPF_X:
 202                 case BPF_ALU | BPF_SUB | BPF_K:
 203                 case BPF_ALU | BPF_SUB | BPF_X:
 204                 case BPF_ALU | BPF_MUL | BPF_K:
 205                 case BPF_ALU | BPF_MUL | BPF_X:
 206                 case BPF_ALU | BPF_DIV | BPF_K:
 207                 case BPF_ALU | BPF_DIV | BPF_X:
 208                 case BPF_ALU | BPF_AND | BPF_K:
 209                 case BPF_ALU | BPF_AND | BPF_X:
 210                 case BPF_ALU | BPF_OR | BPF_K:
 211                 case BPF_ALU | BPF_OR | BPF_X:
 212                 case BPF_ALU | BPF_XOR | BPF_K:
 213                 case BPF_ALU | BPF_XOR | BPF_X:
 214                 case BPF_ALU | BPF_LSH | BPF_K:
 215                 case BPF_ALU | BPF_LSH | BPF_X:
 216                 case BPF_ALU | BPF_RSH | BPF_K:
 217                 case BPF_ALU | BPF_RSH | BPF_X:
 218                 case BPF_ALU | BPF_NEG:
 219                 case BPF_LD | BPF_IMM:
 220                 case BPF_LDX | BPF_IMM:
 221                 case BPF_MISC | BPF_TAX:
 222                 case BPF_MISC | BPF_TXA:
 223                 case BPF_LD | BPF_MEM:
 224                 case BPF_LDX | BPF_MEM:
 225                 case BPF_ST:
 226                 case BPF_STX:
 227                 case BPF_JMP | BPF_JA:
 228                 case BPF_JMP | BPF_JEQ | BPF_K:
 229                 case BPF_JMP | BPF_JEQ | BPF_X:
 230                 case BPF_JMP | BPF_JGE | BPF_K:
 231                 case BPF_JMP | BPF_JGE | BPF_X:
 232                 case BPF_JMP | BPF_JGT | BPF_K:
 233                 case BPF_JMP | BPF_JGT | BPF_X:
 234                 case BPF_JMP | BPF_JSET | BPF_K:
 235                 case BPF_JMP | BPF_JSET | BPF_X:
 236                         continue;
 237                 default:
 238                         return -EINVAL;
 239                 }
 240         }
 241         return 0;
 242 }
 243 
 244 /**
 245  * seccomp_run_filters - evaluates all seccomp filters against @sd
 246  * @sd: optional seccomp data to be passed to filters
 247  * @match: stores struct seccomp_filter that resulted in the return value,
 248  *         unless filter returned SECCOMP_RET_ALLOW, in which case it will
 249  *         be unchanged.
 250  *
 251  * Returns valid seccomp BPF response codes.
 252  */
 253 #define ACTION_ONLY(ret) ((s32)((ret) & (SECCOMP_RET_ACTION_FULL)))
 254 static u32 seccomp_run_filters(const struct seccomp_data *sd,
 255                                struct seccomp_filter **match)
 256 {
 257         u32 ret = SECCOMP_RET_ALLOW;
 258         /* Make sure cross-thread synced filter points somewhere sane. */
 259         struct seccomp_filter *f =
 260                         READ_ONCE(current->seccomp.filter);
 261 
 262         /* Ensure unexpected behavior doesn't result in failing open. */
 263         if (WARN_ON(f == NULL))
 264                 return SECCOMP_RET_KILL_PROCESS;
 265 
 266         /*
 267          * All filters in the list are evaluated and the lowest BPF return
 268          * value always takes priority (ignoring the DATA).
 269          */
 270         preempt_disable();
 271         for (; f; f = f->prev) {
 272                 u32 cur_ret = BPF_PROG_RUN(f->prog, sd);
 273 
 274                 if (ACTION_ONLY(cur_ret) < ACTION_ONLY(ret)) {
 275                         ret = cur_ret;
 276                         *match = f;
 277                 }
 278         }
 279         preempt_enable();
 280         return ret;
 281 }
 282 #endif /* CONFIG_SECCOMP_FILTER */
 283 
 284 static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode)
 285 {
 286         assert_spin_locked(&current->sighand->siglock);
 287 
 288         if (current->seccomp.mode && current->seccomp.mode != seccomp_mode)
 289                 return false;
 290 
 291         return true;
 292 }
 293 
 294 void __weak arch_seccomp_spec_mitigate(struct task_struct *task) { }
 295 
 296 static inline void seccomp_assign_mode(struct task_struct *task,
 297                                        unsigned long seccomp_mode,
 298                                        unsigned long flags)
 299 {
 300         assert_spin_locked(&task->sighand->siglock);
 301 
 302         task->seccomp.mode = seccomp_mode;
 303         /*
 304          * Make sure TIF_SECCOMP cannot be set before the mode (and
 305          * filter) is set.
 306          */
 307         smp_mb__before_atomic();
 308         /* Assume default seccomp processes want spec flaw mitigation. */
 309         if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0)
 310                 arch_seccomp_spec_mitigate(task);
 311         set_tsk_thread_flag(task, TIF_SECCOMP);
 312 }
 313 
 314 #ifdef CONFIG_SECCOMP_FILTER
 315 /* Returns 1 if the parent is an ancestor of the child. */
 316 static int is_ancestor(struct seccomp_filter *parent,
 317                        struct seccomp_filter *child)
 318 {
 319         /* NULL is the root ancestor. */
 320         if (parent == NULL)
 321                 return 1;
 322         for (; child; child = child->prev)
 323                 if (child == parent)
 324                         return 1;
 325         return 0;
 326 }
 327 
 328 /**
 329  * seccomp_can_sync_threads: checks if all threads can be synchronized
 330  *
 331  * Expects sighand and cred_guard_mutex locks to be held.
 332  *
 333  * Returns 0 on success, -ve on error, or the pid of a thread which was
 334  * either not in the correct seccomp mode or did not have an ancestral
 335  * seccomp filter.
 336  */
 337 static inline pid_t seccomp_can_sync_threads(void)
 338 {
 339         struct task_struct *thread, *caller;
 340 
 341         BUG_ON(!mutex_is_locked(&current->signal->cred_guard_mutex));
 342         assert_spin_locked(&current->sighand->siglock);
 343 
 344         /* Validate all threads being eligible for synchronization. */
 345         caller = current;
 346         for_each_thread(caller, thread) {
 347                 pid_t failed;
 348 
 349                 /* Skip current, since it is initiating the sync. */
 350                 if (thread == caller)
 351                         continue;
 352 
 353                 if (thread->seccomp.mode == SECCOMP_MODE_DISABLED ||
 354                     (thread->seccomp.mode == SECCOMP_MODE_FILTER &&
 355                      is_ancestor(thread->seccomp.filter,
 356                                  caller->seccomp.filter)))
 357                         continue;
 358 
 359                 /* Return the first thread that cannot be synchronized. */
 360                 failed = task_pid_vnr(thread);
 361                 /* If the pid cannot be resolved, then return -ESRCH */
 362                 if (WARN_ON(failed == 0))
 363                         failed = -ESRCH;
 364                 return failed;
 365         }
 366 
 367         return 0;
 368 }
 369 
 370 /**
 371  * seccomp_sync_threads: sets all threads to use current's filter
 372  *
 373  * Expects sighand and cred_guard_mutex locks to be held, and for
 374  * seccomp_can_sync_threads() to have returned success already
 375  * without dropping the locks.
 376  *
 377  */
 378 static inline void seccomp_sync_threads(unsigned long flags)
 379 {
 380         struct task_struct *thread, *caller;
 381 
 382         BUG_ON(!mutex_is_locked(&current->signal->cred_guard_mutex));
 383         assert_spin_locked(&current->sighand->siglock);
 384 
 385         /* Synchronize all threads. */
 386         caller = current;
 387         for_each_thread(caller, thread) {
 388                 /* Skip current, since it needs no changes. */
 389                 if (thread == caller)
 390                         continue;
 391 
 392                 /* Get a task reference for the new leaf node. */
 393                 get_seccomp_filter(caller);
 394                 /*
 395                  * Drop the task reference to the shared ancestor since
 396                  * current's path will hold a reference.  (This also
 397                  * allows a put before the assignment.)
 398                  */
 399                 put_seccomp_filter(thread);
 400                 smp_store_release(&thread->seccomp.filter,
 401                                   caller->seccomp.filter);
 402 
 403                 /*
 404                  * Don't let an unprivileged task work around
 405                  * the no_new_privs restriction by creating
 406                  * a thread that sets it up, enters seccomp,
 407                  * then dies.
 408                  */
 409                 if (task_no_new_privs(caller))
 410                         task_set_no_new_privs(thread);
 411 
 412                 /*
 413                  * Opt the other thread into seccomp if needed.
 414                  * As threads are considered to be trust-realm
 415                  * equivalent (see ptrace_may_access), it is safe to
 416                  * allow one thread to transition the other.
 417                  */
 418                 if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
 419                         seccomp_assign_mode(thread, SECCOMP_MODE_FILTER,
 420                                             flags);
 421         }
 422 }
 423 
 424 /**
 425  * seccomp_prepare_filter: Prepares a seccomp filter for use.
 426  * @fprog: BPF program to install
 427  *
 428  * Returns filter on success or an ERR_PTR on failure.
 429  */
 430 static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
 431 {
 432         struct seccomp_filter *sfilter;
 433         int ret;
 434         const bool save_orig = IS_ENABLED(CONFIG_CHECKPOINT_RESTORE);
 435 
 436         if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
 437                 return ERR_PTR(-EINVAL);
 438 
 439         BUG_ON(INT_MAX / fprog->len < sizeof(struct sock_filter));
 440 
 441         /*
 442          * Installing a seccomp filter requires that the task has
 443          * CAP_SYS_ADMIN in its namespace or be running with no_new_privs.
 444          * This avoids scenarios where unprivileged tasks can affect the
 445          * behavior of privileged children.
 446          */
 447         if (!task_no_new_privs(current) &&
 448             security_capable(current_cred(), current_user_ns(),
 449                                      CAP_SYS_ADMIN, CAP_OPT_NOAUDIT) != 0)
 450                 return ERR_PTR(-EACCES);
 451 
 452         /* Allocate a new seccomp_filter */
 453         sfilter = kzalloc(sizeof(*sfilter), GFP_KERNEL | __GFP_NOWARN);
 454         if (!sfilter)
 455                 return ERR_PTR(-ENOMEM);
 456 
 457         mutex_init(&sfilter->notify_lock);
 458         ret = bpf_prog_create_from_user(&sfilter->prog, fprog,
 459                                         seccomp_check_filter, save_orig);
 460         if (ret < 0) {
 461                 kfree(sfilter);
 462                 return ERR_PTR(ret);
 463         }
 464 
 465         refcount_set(&sfilter->usage, 1);
 466 
 467         return sfilter;
 468 }
 469 
 470 /**
 471  * seccomp_prepare_user_filter - prepares a user-supplied sock_fprog
 472  * @user_filter: pointer to the user data containing a sock_fprog.
 473  *
 474  * Returns 0 on success and non-zero otherwise.
 475  */
 476 static struct seccomp_filter *
 477 seccomp_prepare_user_filter(const char __user *user_filter)
 478 {
 479         struct sock_fprog fprog;
 480         struct seccomp_filter *filter = ERR_PTR(-EFAULT);
 481 
 482 #ifdef CONFIG_COMPAT
 483         if (in_compat_syscall()) {
 484                 struct compat_sock_fprog fprog32;
 485                 if (copy_from_user(&fprog32, user_filter, sizeof(fprog32)))
 486                         goto out;
 487                 fprog.len = fprog32.len;
 488                 fprog.filter = compat_ptr(fprog32.filter);
 489         } else /* falls through to the if below. */
 490 #endif
 491         if (copy_from_user(&fprog, user_filter, sizeof(fprog)))
 492                 goto out;
 493         filter = seccomp_prepare_filter(&fprog);
 494 out:
 495         return filter;
 496 }
 497 
 498 /**
 499  * seccomp_attach_filter: validate and attach filter
 500  * @flags:  flags to change filter behavior
 501  * @filter: seccomp filter to add to the current process
 502  *
 503  * Caller must be holding current->sighand->siglock lock.
 504  *
 505  * Returns 0 on success, -ve on error, or
 506  *   - in TSYNC mode: the pid of a thread which was either not in the correct
 507  *     seccomp mode or did not have an ancestral seccomp filter
 508  *   - in NEW_LISTENER mode: the fd of the new listener
 509  */
 510 static long seccomp_attach_filter(unsigned int flags,
 511                                   struct seccomp_filter *filter)
 512 {
 513         unsigned long total_insns;
 514         struct seccomp_filter *walker;
 515 
 516         assert_spin_locked(&current->sighand->siglock);
 517 
 518         /* Validate resulting filter length. */
 519         total_insns = filter->prog->len;
 520         for (walker = current->seccomp.filter; walker; walker = walker->prev)
 521                 total_insns += walker->prog->len + 4;  /* 4 instr penalty */
 522         if (total_insns > MAX_INSNS_PER_PATH)
 523                 return -ENOMEM;
 524 
 525         /* If thread sync has been requested, check that it is possible. */
 526         if (flags & SECCOMP_FILTER_FLAG_TSYNC) {
 527                 int ret;
 528 
 529                 ret = seccomp_can_sync_threads();
 530                 if (ret)
 531                         return ret;
 532         }
 533 
 534         /* Set log flag, if present. */
 535         if (flags & SECCOMP_FILTER_FLAG_LOG)
 536                 filter->log = true;
 537 
 538         /*
 539          * If there is an existing filter, make it the prev and don't drop its
 540          * task reference.
 541          */
 542         filter->prev = current->seccomp.filter;
 543         current->seccomp.filter = filter;
 544 
 545         /* Now that the new filter is in place, synchronize to all threads. */
 546         if (flags & SECCOMP_FILTER_FLAG_TSYNC)
 547                 seccomp_sync_threads(flags);
 548 
 549         return 0;
 550 }
 551 
 552 static void __get_seccomp_filter(struct seccomp_filter *filter)
 553 {
 554         refcount_inc(&filter->usage);
 555 }
 556 
 557 /* get_seccomp_filter - increments the reference count of the filter on @tsk */
 558 void get_seccomp_filter(struct task_struct *tsk)
 559 {
 560         struct seccomp_filter *orig = tsk->seccomp.filter;
 561         if (!orig)
 562                 return;
 563         __get_seccomp_filter(orig);
 564 }
 565 
 566 static inline void seccomp_filter_free(struct seccomp_filter *filter)
 567 {
 568         if (filter) {
 569                 bpf_prog_destroy(filter->prog);
 570                 kfree(filter);
 571         }
 572 }
 573 
 574 static void __put_seccomp_filter(struct seccomp_filter *orig)
 575 {
 576         /* Clean up single-reference branches iteratively. */
 577         while (orig && refcount_dec_and_test(&orig->usage)) {
 578                 struct seccomp_filter *freeme = orig;
 579                 orig = orig->prev;
 580                 seccomp_filter_free(freeme);
 581         }
 582 }
 583 
 584 /* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
 585 void put_seccomp_filter(struct task_struct *tsk)
 586 {
 587         __put_seccomp_filter(tsk->seccomp.filter);
 588 }
 589 
 590 static void seccomp_init_siginfo(kernel_siginfo_t *info, int syscall, int reason)
 591 {
 592         clear_siginfo(info);
 593         info->si_signo = SIGSYS;
 594         info->si_code = SYS_SECCOMP;
 595         info->si_call_addr = (void __user *)KSTK_EIP(current);
 596         info->si_errno = reason;
 597         info->si_arch = syscall_get_arch(current);
 598         info->si_syscall = syscall;
 599 }
 600 
 601 /**
 602  * seccomp_send_sigsys - signals the task to allow in-process syscall emulation
 603  * @syscall: syscall number to send to userland
 604  * @reason: filter-supplied reason code to send to userland (via si_errno)
 605  *
 606  * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
 607  */
 608 static void seccomp_send_sigsys(int syscall, int reason)
 609 {
 610         struct kernel_siginfo info;
 611         seccomp_init_siginfo(&info, syscall, reason);
 612         force_sig_info(&info);
 613 }
 614 #endif  /* CONFIG_SECCOMP_FILTER */
 615 
 616 /* For use with seccomp_actions_logged */
 617 #define SECCOMP_LOG_KILL_PROCESS        (1 << 0)
 618 #define SECCOMP_LOG_KILL_THREAD         (1 << 1)
 619 #define SECCOMP_LOG_TRAP                (1 << 2)
 620 #define SECCOMP_LOG_ERRNO               (1 << 3)
 621 #define SECCOMP_LOG_TRACE               (1 << 4)
 622 #define SECCOMP_LOG_LOG                 (1 << 5)
 623 #define SECCOMP_LOG_ALLOW               (1 << 6)
 624 #define SECCOMP_LOG_USER_NOTIF          (1 << 7)
 625 
 626 static u32 seccomp_actions_logged = SECCOMP_LOG_KILL_PROCESS |
 627                                     SECCOMP_LOG_KILL_THREAD  |
 628                                     SECCOMP_LOG_TRAP  |
 629                                     SECCOMP_LOG_ERRNO |
 630                                     SECCOMP_LOG_USER_NOTIF |
 631                                     SECCOMP_LOG_TRACE |
 632                                     SECCOMP_LOG_LOG;
 633 
 634 static inline void seccomp_log(unsigned long syscall, long signr, u32 action,
 635                                bool requested)
 636 {
 637         bool log = false;
 638 
 639         switch (action) {
 640         case SECCOMP_RET_ALLOW:
 641                 break;
 642         case SECCOMP_RET_TRAP:
 643                 log = requested && seccomp_actions_logged & SECCOMP_LOG_TRAP;
 644                 break;
 645         case SECCOMP_RET_ERRNO:
 646                 log = requested && seccomp_actions_logged & SECCOMP_LOG_ERRNO;
 647                 break;
 648         case SECCOMP_RET_TRACE:
 649                 log = requested && seccomp_actions_logged & SECCOMP_LOG_TRACE;
 650                 break;
 651         case SECCOMP_RET_USER_NOTIF:
 652                 log = requested && seccomp_actions_logged & SECCOMP_LOG_USER_NOTIF;
 653                 break;
 654         case SECCOMP_RET_LOG:
 655                 log = seccomp_actions_logged & SECCOMP_LOG_LOG;
 656                 break;
 657         case SECCOMP_RET_KILL_THREAD:
 658                 log = seccomp_actions_logged & SECCOMP_LOG_KILL_THREAD;
 659                 break;
 660         case SECCOMP_RET_KILL_PROCESS:
 661         default:
 662                 log = seccomp_actions_logged & SECCOMP_LOG_KILL_PROCESS;
 663         }
 664 
 665         /*
 666          * Emit an audit message when the action is RET_KILL_*, RET_LOG, or the
 667          * FILTER_FLAG_LOG bit was set. The admin has the ability to silence
 668          * any action from being logged by removing the action name from the
 669          * seccomp_actions_logged sysctl.
 670          */
 671         if (!log)
 672                 return;
 673 
 674         audit_seccomp(syscall, signr, action);
 675 }
 676 
 677 /*
 678  * Secure computing mode 1 allows only read/write/exit/sigreturn.
 679  * To be fully secure this must be combined with rlimit
 680  * to limit the stack allocations too.
 681  */
 682 static const int mode1_syscalls[] = {
 683         __NR_seccomp_read, __NR_seccomp_write, __NR_seccomp_exit, __NR_seccomp_sigreturn,
 684         0, /* null terminated */
 685 };
 686 
 687 static void __secure_computing_strict(int this_syscall)
 688 {
 689         const int *syscall_whitelist = mode1_syscalls;
 690 #ifdef CONFIG_COMPAT
 691         if (in_compat_syscall())
 692                 syscall_whitelist = get_compat_mode1_syscalls();
 693 #endif
 694         do {
 695                 if (*syscall_whitelist == this_syscall)
 696                         return;
 697         } while (*++syscall_whitelist);
 698 
 699 #ifdef SECCOMP_DEBUG
 700         dump_stack();
 701 #endif
 702         seccomp_log(this_syscall, SIGKILL, SECCOMP_RET_KILL_THREAD, true);
 703         do_exit(SIGKILL);
 704 }
 705 
 706 #ifndef CONFIG_HAVE_ARCH_SECCOMP_FILTER
 707 void secure_computing_strict(int this_syscall)
 708 {
 709         int mode = current->seccomp.mode;
 710 
 711         if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) &&
 712             unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
 713                 return;
 714 
 715         if (mode == SECCOMP_MODE_DISABLED)
 716                 return;
 717         else if (mode == SECCOMP_MODE_STRICT)
 718                 __secure_computing_strict(this_syscall);
 719         else
 720                 BUG();
 721 }
 722 #else
 723 
 724 #ifdef CONFIG_SECCOMP_FILTER
 725 static u64 seccomp_next_notify_id(struct seccomp_filter *filter)
 726 {
 727         /*
 728          * Note: overflow is ok here, the id just needs to be unique per
 729          * filter.
 730          */
 731         lockdep_assert_held(&filter->notify_lock);
 732         return filter->notif->next_id++;
 733 }
 734 
 735 static void seccomp_do_user_notification(int this_syscall,
 736                                          struct seccomp_filter *match,
 737                                          const struct seccomp_data *sd)
 738 {
 739         int err;
 740         long ret = 0;
 741         struct seccomp_knotif n = {};
 742 
 743         mutex_lock(&match->notify_lock);
 744         err = -ENOSYS;
 745         if (!match->notif)
 746                 goto out;
 747 
 748         n.task = current;
 749         n.state = SECCOMP_NOTIFY_INIT;
 750         n.data = sd;
 751         n.id = seccomp_next_notify_id(match);
 752         init_completion(&n.ready);
 753         list_add(&n.list, &match->notif->notifications);
 754 
 755         up(&match->notif->request);
 756         wake_up_poll(&match->notif->wqh, EPOLLIN | EPOLLRDNORM);
 757         mutex_unlock(&match->notify_lock);
 758 
 759         /*
 760          * This is where we wait for a reply from userspace.
 761          */
 762         err = wait_for_completion_interruptible(&n.ready);
 763         mutex_lock(&match->notify_lock);
 764         if (err == 0) {
 765                 ret = n.val;
 766                 err = n.error;
 767         }
 768 
 769         /*
 770          * Note that it's possible the listener died in between the time when
 771          * we were notified of a respons (or a signal) and when we were able to
 772          * re-acquire the lock, so only delete from the list if the
 773          * notification actually exists.
 774          *
 775          * Also note that this test is only valid because there's no way to
 776          * *reattach* to a notifier right now. If one is added, we'll need to
 777          * keep track of the notif itself and make sure they match here.
 778          */
 779         if (match->notif)
 780                 list_del(&n.list);
 781 out:
 782         mutex_unlock(&match->notify_lock);
 783         syscall_set_return_value(current, task_pt_regs(current),
 784                                  err, ret);
 785 }
 786 
 787 static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
 788                             const bool recheck_after_trace)
 789 {
 790         u32 filter_ret, action;
 791         struct seccomp_filter *match = NULL;
 792         int data;
 793         struct seccomp_data sd_local;
 794 
 795         /*
 796          * Make sure that any changes to mode from another thread have
 797          * been seen after TIF_SECCOMP was seen.
 798          */
 799         rmb();
 800 
 801         if (!sd) {
 802                 populate_seccomp_data(&sd_local);
 803                 sd = &sd_local;
 804         }
 805 
 806         filter_ret = seccomp_run_filters(sd, &match);
 807         data = filter_ret & SECCOMP_RET_DATA;
 808         action = filter_ret & SECCOMP_RET_ACTION_FULL;
 809 
 810         switch (action) {
 811         case SECCOMP_RET_ERRNO:
 812                 /* Set low-order bits as an errno, capped at MAX_ERRNO. */
 813                 if (data > MAX_ERRNO)
 814                         data = MAX_ERRNO;
 815                 syscall_set_return_value(current, task_pt_regs(current),
 816                                          -data, 0);
 817                 goto skip;
 818 
 819         case SECCOMP_RET_TRAP:
 820                 /* Show the handler the original registers. */
 821                 syscall_rollback(current, task_pt_regs(current));
 822                 /* Let the filter pass back 16 bits of data. */
 823                 seccomp_send_sigsys(this_syscall, data);
 824                 goto skip;
 825 
 826         case SECCOMP_RET_TRACE:
 827                 /* We've been put in this state by the ptracer already. */
 828                 if (recheck_after_trace)
 829                         return 0;
 830 
 831                 /* ENOSYS these calls if there is no tracer attached. */
 832                 if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP)) {
 833                         syscall_set_return_value(current,
 834                                                  task_pt_regs(current),
 835                                                  -ENOSYS, 0);
 836                         goto skip;
 837                 }
 838 
 839                 /* Allow the BPF to provide the event message */
 840                 ptrace_event(PTRACE_EVENT_SECCOMP, data);
 841                 /*
 842                  * The delivery of a fatal signal during event
 843                  * notification may silently skip tracer notification,
 844                  * which could leave us with a potentially unmodified
 845                  * syscall that the tracer would have liked to have
 846                  * changed. Since the process is about to die, we just
 847                  * force the syscall to be skipped and let the signal
 848                  * kill the process and correctly handle any tracer exit
 849                  * notifications.
 850                  */
 851                 if (fatal_signal_pending(current))
 852                         goto skip;
 853                 /* Check if the tracer forced the syscall to be skipped. */
 854                 this_syscall = syscall_get_nr(current, task_pt_regs(current));
 855                 if (this_syscall < 0)
 856                         goto skip;
 857 
 858                 /*
 859                  * Recheck the syscall, since it may have changed. This
 860                  * intentionally uses a NULL struct seccomp_data to force
 861                  * a reload of all registers. This does not goto skip since
 862                  * a skip would have already been reported.
 863                  */
 864                 if (__seccomp_filter(this_syscall, NULL, true))
 865                         return -1;
 866 
 867                 return 0;
 868 
 869         case SECCOMP_RET_USER_NOTIF:
 870                 seccomp_do_user_notification(this_syscall, match, sd);
 871                 goto skip;
 872 
 873         case SECCOMP_RET_LOG:
 874                 seccomp_log(this_syscall, 0, action, true);
 875                 return 0;
 876 
 877         case SECCOMP_RET_ALLOW:
 878                 /*
 879                  * Note that the "match" filter will always be NULL for
 880                  * this action since SECCOMP_RET_ALLOW is the starting
 881                  * state in seccomp_run_filters().
 882                  */
 883                 return 0;
 884 
 885         case SECCOMP_RET_KILL_THREAD:
 886         case SECCOMP_RET_KILL_PROCESS:
 887         default:
 888                 seccomp_log(this_syscall, SIGSYS, action, true);
 889                 /* Dump core only if this is the last remaining thread. */
 890                 if (action == SECCOMP_RET_KILL_PROCESS ||
 891                     get_nr_threads(current) == 1) {
 892                         kernel_siginfo_t info;
 893 
 894                         /* Show the original registers in the dump. */
 895                         syscall_rollback(current, task_pt_regs(current));
 896                         /* Trigger a manual coredump since do_exit skips it. */
 897                         seccomp_init_siginfo(&info, this_syscall, data);
 898                         do_coredump(&info);
 899                 }
 900                 if (action == SECCOMP_RET_KILL_PROCESS)
 901                         do_group_exit(SIGSYS);
 902                 else
 903                         do_exit(SIGSYS);
 904         }
 905 
 906         unreachable();
 907 
 908 skip:
 909         seccomp_log(this_syscall, 0, action, match ? match->log : false);
 910         return -1;
 911 }
 912 #else
 913 static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
 914                             const bool recheck_after_trace)
 915 {
 916         BUG();
 917 }
 918 #endif
 919 
 920 int __secure_computing(const struct seccomp_data *sd)
 921 {
 922         int mode = current->seccomp.mode;
 923         int this_syscall;
 924 
 925         if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) &&
 926             unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
 927                 return 0;
 928 
 929         this_syscall = sd ? sd->nr :
 930                 syscall_get_nr(current, task_pt_regs(current));
 931 
 932         switch (mode) {
 933         case SECCOMP_MODE_STRICT:
 934                 __secure_computing_strict(this_syscall);  /* may call do_exit */
 935                 return 0;
 936         case SECCOMP_MODE_FILTER:
 937                 return __seccomp_filter(this_syscall, sd, false);
 938         default:
 939                 BUG();
 940         }
 941 }
 942 #endif /* CONFIG_HAVE_ARCH_SECCOMP_FILTER */
 943 
 944 long prctl_get_seccomp(void)
 945 {
 946         return current->seccomp.mode;
 947 }
 948 
 949 /**
 950  * seccomp_set_mode_strict: internal function for setting strict seccomp
 951  *
 952  * Once current->seccomp.mode is non-zero, it may not be changed.
 953  *
 954  * Returns 0 on success or -EINVAL on failure.
 955  */
 956 static long seccomp_set_mode_strict(void)
 957 {
 958         const unsigned long seccomp_mode = SECCOMP_MODE_STRICT;
 959         long ret = -EINVAL;
 960 
 961         spin_lock_irq(&current->sighand->siglock);
 962 
 963         if (!seccomp_may_assign_mode(seccomp_mode))
 964                 goto out;
 965 
 966 #ifdef TIF_NOTSC
 967         disable_TSC();
 968 #endif
 969         seccomp_assign_mode(current, seccomp_mode, 0);
 970         ret = 0;
 971 
 972 out:
 973         spin_unlock_irq(&current->sighand->siglock);
 974 
 975         return ret;
 976 }
 977 
 978 #ifdef CONFIG_SECCOMP_FILTER
 979 static int seccomp_notify_release(struct inode *inode, struct file *file)
 980 {
 981         struct seccomp_filter *filter = file->private_data;
 982         struct seccomp_knotif *knotif;
 983 
 984         if (!filter)
 985                 return 0;
 986 
 987         mutex_lock(&filter->notify_lock);
 988 
 989         /*
 990          * If this file is being closed because e.g. the task who owned it
 991          * died, let's wake everyone up who was waiting on us.
 992          */
 993         list_for_each_entry(knotif, &filter->notif->notifications, list) {
 994                 if (knotif->state == SECCOMP_NOTIFY_REPLIED)
 995                         continue;
 996 
 997                 knotif->state = SECCOMP_NOTIFY_REPLIED;
 998                 knotif->error = -ENOSYS;
 999                 knotif->val = 0;
1000 
1001                 complete(&knotif->ready);
1002         }
1003 
1004         kfree(filter->notif);
1005         filter->notif = NULL;
1006         mutex_unlock(&filter->notify_lock);
1007         __put_seccomp_filter(filter);
1008         return 0;
1009 }
1010 
1011 static long seccomp_notify_recv(struct seccomp_filter *filter,
1012                                 void __user *buf)
1013 {
1014         struct seccomp_knotif *knotif = NULL, *cur;
1015         struct seccomp_notif unotif;
1016         ssize_t ret;
1017 
1018         /* Verify that we're not given garbage to keep struct extensible. */
1019         ret = check_zeroed_user(buf, sizeof(unotif));
1020         if (ret < 0)
1021                 return ret;
1022         if (!ret)
1023                 return -EINVAL;
1024 
1025         memset(&unotif, 0, sizeof(unotif));
1026 
1027         ret = down_interruptible(&filter->notif->request);
1028         if (ret < 0)
1029                 return ret;
1030 
1031         mutex_lock(&filter->notify_lock);
1032         list_for_each_entry(cur, &filter->notif->notifications, list) {
1033                 if (cur->state == SECCOMP_NOTIFY_INIT) {
1034                         knotif = cur;
1035                         break;
1036                 }
1037         }
1038 
1039         /*
1040          * If we didn't find a notification, it could be that the task was
1041          * interrupted by a fatal signal between the time we were woken and
1042          * when we were able to acquire the rw lock.
1043          */
1044         if (!knotif) {
1045                 ret = -ENOENT;
1046                 goto out;
1047         }
1048 
1049         unotif.id = knotif->id;
1050         unotif.pid = task_pid_vnr(knotif->task);
1051         unotif.data = *(knotif->data);
1052 
1053         knotif->state = SECCOMP_NOTIFY_SENT;
1054         wake_up_poll(&filter->notif->wqh, EPOLLOUT | EPOLLWRNORM);
1055         ret = 0;
1056 out:
1057         mutex_unlock(&filter->notify_lock);
1058 
1059         if (ret == 0 && copy_to_user(buf, &unotif, sizeof(unotif))) {
1060                 ret = -EFAULT;
1061 
1062                 /*
1063                  * Userspace screwed up. To make sure that we keep this
1064                  * notification alive, let's reset it back to INIT. It
1065                  * may have died when we released the lock, so we need to make
1066                  * sure it's still around.
1067                  */
1068                 knotif = NULL;
1069                 mutex_lock(&filter->notify_lock);
1070                 list_for_each_entry(cur, &filter->notif->notifications, list) {
1071                         if (cur->id == unotif.id) {
1072                                 knotif = cur;
1073                                 break;
1074                         }
1075                 }
1076 
1077                 if (knotif) {
1078                         knotif->state = SECCOMP_NOTIFY_INIT;
1079                         up(&filter->notif->request);
1080                 }
1081                 mutex_unlock(&filter->notify_lock);
1082         }
1083 
1084         return ret;
1085 }
1086 
1087 static long seccomp_notify_send(struct seccomp_filter *filter,
1088                                 void __user *buf)
1089 {
1090         struct seccomp_notif_resp resp = {};
1091         struct seccomp_knotif *knotif = NULL, *cur;
1092         long ret;
1093 
1094         if (copy_from_user(&resp, buf, sizeof(resp)))
1095                 return -EFAULT;
1096 
1097         if (resp.flags)
1098                 return -EINVAL;
1099 
1100         ret = mutex_lock_interruptible(&filter->notify_lock);
1101         if (ret < 0)
1102                 return ret;
1103 
1104         list_for_each_entry(cur, &filter->notif->notifications, list) {
1105                 if (cur->id == resp.id) {
1106                         knotif = cur;
1107                         break;
1108                 }
1109         }
1110 
1111         if (!knotif) {
1112                 ret = -ENOENT;
1113                 goto out;
1114         }
1115 
1116         /* Allow exactly one reply. */
1117         if (knotif->state != SECCOMP_NOTIFY_SENT) {
1118                 ret = -EINPROGRESS;
1119                 goto out;
1120         }
1121 
1122         ret = 0;
1123         knotif->state = SECCOMP_NOTIFY_REPLIED;
1124         knotif->error = resp.error;
1125         knotif->val = resp.val;
1126         complete(&knotif->ready);
1127 out:
1128         mutex_unlock(&filter->notify_lock);
1129         return ret;
1130 }
1131 
1132 static long seccomp_notify_id_valid(struct seccomp_filter *filter,
1133                                     void __user *buf)
1134 {
1135         struct seccomp_knotif *knotif = NULL;
1136         u64 id;
1137         long ret;
1138 
1139         if (copy_from_user(&id, buf, sizeof(id)))
1140                 return -EFAULT;
1141 
1142         ret = mutex_lock_interruptible(&filter->notify_lock);
1143         if (ret < 0)
1144                 return ret;
1145 
1146         ret = -ENOENT;
1147         list_for_each_entry(knotif, &filter->notif->notifications, list) {
1148                 if (knotif->id == id) {
1149                         if (knotif->state == SECCOMP_NOTIFY_SENT)
1150                                 ret = 0;
1151                         goto out;
1152                 }
1153         }
1154 
1155 out:
1156         mutex_unlock(&filter->notify_lock);
1157         return ret;
1158 }
1159 
1160 static long seccomp_notify_ioctl(struct file *file, unsigned int cmd,
1161                                  unsigned long arg)
1162 {
1163         struct seccomp_filter *filter = file->private_data;
1164         void __user *buf = (void __user *)arg;
1165 
1166         switch (cmd) {
1167         case SECCOMP_IOCTL_NOTIF_RECV:
1168                 return seccomp_notify_recv(filter, buf);
1169         case SECCOMP_IOCTL_NOTIF_SEND:
1170                 return seccomp_notify_send(filter, buf);
1171         case SECCOMP_IOCTL_NOTIF_ID_VALID:
1172                 return seccomp_notify_id_valid(filter, buf);
1173         default:
1174                 return -EINVAL;
1175         }
1176 }
1177 
1178 static __poll_t seccomp_notify_poll(struct file *file,
1179                                     struct poll_table_struct *poll_tab)
1180 {
1181         struct seccomp_filter *filter = file->private_data;
1182         __poll_t ret = 0;
1183         struct seccomp_knotif *cur;
1184 
1185         poll_wait(file, &filter->notif->wqh, poll_tab);
1186 
1187         if (mutex_lock_interruptible(&filter->notify_lock) < 0)
1188                 return EPOLLERR;
1189 
1190         list_for_each_entry(cur, &filter->notif->notifications, list) {
1191                 if (cur->state == SECCOMP_NOTIFY_INIT)
1192                         ret |= EPOLLIN | EPOLLRDNORM;
1193                 if (cur->state == SECCOMP_NOTIFY_SENT)
1194                         ret |= EPOLLOUT | EPOLLWRNORM;
1195                 if ((ret & EPOLLIN) && (ret & EPOLLOUT))
1196                         break;
1197         }
1198 
1199         mutex_unlock(&filter->notify_lock);
1200 
1201         return ret;
1202 }
1203 
1204 static const struct file_operations seccomp_notify_ops = {
1205         .poll = seccomp_notify_poll,
1206         .release = seccomp_notify_release,
1207         .unlocked_ioctl = seccomp_notify_ioctl,
1208         .compat_ioctl = seccomp_notify_ioctl,
1209 };
1210 
1211 static struct file *init_listener(struct seccomp_filter *filter)
1212 {
1213         struct file *ret = ERR_PTR(-EBUSY);
1214         struct seccomp_filter *cur;
1215 
1216         for (cur = current->seccomp.filter; cur; cur = cur->prev) {
1217                 if (cur->notif)
1218                         goto out;
1219         }
1220 
1221         ret = ERR_PTR(-ENOMEM);
1222         filter->notif = kzalloc(sizeof(*(filter->notif)), GFP_KERNEL);
1223         if (!filter->notif)
1224                 goto out;
1225 
1226         sema_init(&filter->notif->request, 0);
1227         filter->notif->next_id = get_random_u64();
1228         INIT_LIST_HEAD(&filter->notif->notifications);
1229         init_waitqueue_head(&filter->notif->wqh);
1230 
1231         ret = anon_inode_getfile("seccomp notify", &seccomp_notify_ops,
1232                                  filter, O_RDWR);
1233         if (IS_ERR(ret))
1234                 goto out_notif;
1235 
1236         /* The file has a reference to it now */
1237         __get_seccomp_filter(filter);
1238 
1239 out_notif:
1240         if (IS_ERR(ret))
1241                 kfree(filter->notif);
1242 out:
1243         return ret;
1244 }
1245 
1246 /**
1247  * seccomp_set_mode_filter: internal function for setting seccomp filter
1248  * @flags:  flags to change filter behavior
1249  * @filter: struct sock_fprog containing filter
1250  *
1251  * This function may be called repeatedly to install additional filters.
1252  * Every filter successfully installed will be evaluated (in reverse order)
1253  * for each system call the task makes.
1254  *
1255  * Once current->seccomp.mode is non-zero, it may not be changed.
1256  *
1257  * Returns 0 on success or -EINVAL on failure.
1258  */
1259 static long seccomp_set_mode_filter(unsigned int flags,
1260                                     const char __user *filter)
1261 {
1262         const unsigned long seccomp_mode = SECCOMP_MODE_FILTER;
1263         struct seccomp_filter *prepared = NULL;
1264         long ret = -EINVAL;
1265         int listener = -1;
1266         struct file *listener_f = NULL;
1267 
1268         /* Validate flags. */
1269         if (flags & ~SECCOMP_FILTER_FLAG_MASK)
1270                 return -EINVAL;
1271 
1272         /*
1273          * In the successful case, NEW_LISTENER returns the new listener fd.
1274          * But in the failure case, TSYNC returns the thread that died. If you
1275          * combine these two flags, there's no way to tell whether something
1276          * succeeded or failed. So, let's disallow this combination.
1277          */
1278         if ((flags & SECCOMP_FILTER_FLAG_TSYNC) &&
1279             (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER))
1280                 return -EINVAL;
1281 
1282         /* Prepare the new filter before holding any locks. */
1283         prepared = seccomp_prepare_user_filter(filter);
1284         if (IS_ERR(prepared))
1285                 return PTR_ERR(prepared);
1286 
1287         if (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) {
1288                 listener = get_unused_fd_flags(O_CLOEXEC);
1289                 if (listener < 0) {
1290                         ret = listener;
1291                         goto out_free;
1292                 }
1293 
1294                 listener_f = init_listener(prepared);
1295                 if (IS_ERR(listener_f)) {
1296                         put_unused_fd(listener);
1297                         ret = PTR_ERR(listener_f);
1298                         goto out_free;
1299                 }
1300         }
1301 
1302         /*
1303          * Make sure we cannot change seccomp or nnp state via TSYNC
1304          * while another thread is in the middle of calling exec.
1305          */
1306         if (flags & SECCOMP_FILTER_FLAG_TSYNC &&
1307             mutex_lock_killable(&current->signal->cred_guard_mutex))
1308                 goto out_put_fd;
1309 
1310         spin_lock_irq(&current->sighand->siglock);
1311 
1312         if (!seccomp_may_assign_mode(seccomp_mode))
1313                 goto out;
1314 
1315         ret = seccomp_attach_filter(flags, prepared);
1316         if (ret)
1317                 goto out;
1318         /* Do not free the successfully attached filter. */
1319         prepared = NULL;
1320 
1321         seccomp_assign_mode(current, seccomp_mode, flags);
1322 out:
1323         spin_unlock_irq(&current->sighand->siglock);
1324         if (flags & SECCOMP_FILTER_FLAG_TSYNC)
1325                 mutex_unlock(&current->signal->cred_guard_mutex);
1326 out_put_fd:
1327         if (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) {
1328                 if (ret) {
1329                         listener_f->private_data = NULL;
1330                         fput(listener_f);
1331                         put_unused_fd(listener);
1332                 } else {
1333                         fd_install(listener, listener_f);
1334                         ret = listener;
1335                 }
1336         }
1337 out_free:
1338         seccomp_filter_free(prepared);
1339         return ret;
1340 }
1341 #else
1342 static inline long seccomp_set_mode_filter(unsigned int flags,
1343                                            const char __user *filter)
1344 {
1345         return -EINVAL;
1346 }
1347 #endif
1348 
1349 static long seccomp_get_action_avail(const char __user *uaction)
1350 {
1351         u32 action;
1352 
1353         if (copy_from_user(&action, uaction, sizeof(action)))
1354                 return -EFAULT;
1355 
1356         switch (action) {
1357         case SECCOMP_RET_KILL_PROCESS:
1358         case SECCOMP_RET_KILL_THREAD:
1359         case SECCOMP_RET_TRAP:
1360         case SECCOMP_RET_ERRNO:
1361         case SECCOMP_RET_USER_NOTIF:
1362         case SECCOMP_RET_TRACE:
1363         case SECCOMP_RET_LOG:
1364         case SECCOMP_RET_ALLOW:
1365                 break;
1366         default:
1367                 return -EOPNOTSUPP;
1368         }
1369 
1370         return 0;
1371 }
1372 
1373 static long seccomp_get_notif_sizes(void __user *usizes)
1374 {
1375         struct seccomp_notif_sizes sizes = {
1376                 .seccomp_notif = sizeof(struct seccomp_notif),
1377                 .seccomp_notif_resp = sizeof(struct seccomp_notif_resp),
1378                 .seccomp_data = sizeof(struct seccomp_data),
1379         };
1380 
1381         if (copy_to_user(usizes, &sizes, sizeof(sizes)))
1382                 return -EFAULT;
1383 
1384         return 0;
1385 }
1386 
1387 /* Common entry point for both prctl and syscall. */
1388 static long do_seccomp(unsigned int op, unsigned int flags,
1389                        void __user *uargs)
1390 {
1391         switch (op) {
1392         case SECCOMP_SET_MODE_STRICT:
1393                 if (flags != 0 || uargs != NULL)
1394                         return -EINVAL;
1395                 return seccomp_set_mode_strict();
1396         case SECCOMP_SET_MODE_FILTER:
1397                 return seccomp_set_mode_filter(flags, uargs);
1398         case SECCOMP_GET_ACTION_AVAIL:
1399                 if (flags != 0)
1400                         return -EINVAL;
1401 
1402                 return seccomp_get_action_avail(uargs);
1403         case SECCOMP_GET_NOTIF_SIZES:
1404                 if (flags != 0)
1405                         return -EINVAL;
1406 
1407                 return seccomp_get_notif_sizes(uargs);
1408         default:
1409                 return -EINVAL;
1410         }
1411 }
1412 
1413 SYSCALL_DEFINE3(seccomp, unsigned int, op, unsigned int, flags,
1414                          void __user *, uargs)
1415 {
1416         return do_seccomp(op, flags, uargs);
1417 }
1418 
1419 /**
1420  * prctl_set_seccomp: configures current->seccomp.mode
1421  * @seccomp_mode: requested mode to use
1422  * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER
1423  *
1424  * Returns 0 on success or -EINVAL on failure.
1425  */
1426 long prctl_set_seccomp(unsigned long seccomp_mode, void __user *filter)
1427 {
1428         unsigned int op;
1429         void __user *uargs;
1430 
1431         switch (seccomp_mode) {
1432         case SECCOMP_MODE_STRICT:
1433                 op = SECCOMP_SET_MODE_STRICT;
1434                 /*
1435                  * Setting strict mode through prctl always ignored filter,
1436                  * so make sure it is always NULL here to pass the internal
1437                  * check in do_seccomp().
1438                  */
1439                 uargs = NULL;
1440                 break;
1441         case SECCOMP_MODE_FILTER:
1442                 op = SECCOMP_SET_MODE_FILTER;
1443                 uargs = filter;
1444                 break;
1445         default:
1446                 return -EINVAL;
1447         }
1448 
1449         /* prctl interface doesn't have flags, so they are always zero. */
1450         return do_seccomp(op, 0, uargs);
1451 }
1452 
1453 #if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE)
1454 static struct seccomp_filter *get_nth_filter(struct task_struct *task,
1455                                              unsigned long filter_off)
1456 {
1457         struct seccomp_filter *orig, *filter;
1458         unsigned long count;
1459 
1460         /*
1461          * Note: this is only correct because the caller should be the (ptrace)
1462          * tracer of the task, otherwise lock_task_sighand is needed.
1463          */
1464         spin_lock_irq(&task->sighand->siglock);
1465 
1466         if (task->seccomp.mode != SECCOMP_MODE_FILTER) {
1467                 spin_unlock_irq(&task->sighand->siglock);
1468                 return ERR_PTR(-EINVAL);
1469         }
1470 
1471         orig = task->seccomp.filter;
1472         __get_seccomp_filter(orig);
1473         spin_unlock_irq(&task->sighand->siglock);
1474 
1475         count = 0;
1476         for (filter = orig; filter; filter = filter->prev)
1477                 count++;
1478 
1479         if (filter_off >= count) {
1480                 filter = ERR_PTR(-ENOENT);
1481                 goto out;
1482         }
1483 
1484         count -= filter_off;
1485         for (filter = orig; filter && count > 1; filter = filter->prev)
1486                 count--;
1487 
1488         if (WARN_ON(count != 1 || !filter)) {
1489                 filter = ERR_PTR(-ENOENT);
1490                 goto out;
1491         }
1492 
1493         __get_seccomp_filter(filter);
1494 
1495 out:
1496         __put_seccomp_filter(orig);
1497         return filter;
1498 }
1499 
1500 long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
1501                         void __user *data)
1502 {
1503         struct seccomp_filter *filter;
1504         struct sock_fprog_kern *fprog;
1505         long ret;
1506 
1507         if (!capable(CAP_SYS_ADMIN) ||
1508             current->seccomp.mode != SECCOMP_MODE_DISABLED) {
1509                 return -EACCES;
1510         }
1511 
1512         filter = get_nth_filter(task, filter_off);
1513         if (IS_ERR(filter))
1514                 return PTR_ERR(filter);
1515 
1516         fprog = filter->prog->orig_prog;
1517         if (!fprog) {
1518                 /* This must be a new non-cBPF filter, since we save
1519                  * every cBPF filter's orig_prog above when
1520                  * CONFIG_CHECKPOINT_RESTORE is enabled.
1521                  */
1522                 ret = -EMEDIUMTYPE;
1523                 goto out;
1524         }
1525 
1526         ret = fprog->len;
1527         if (!data)
1528                 goto out;
1529 
1530         if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog)))
1531                 ret = -EFAULT;
1532 
1533 out:
1534         __put_seccomp_filter(filter);
1535         return ret;
1536 }
1537 
1538 long seccomp_get_metadata(struct task_struct *task,
1539                           unsigned long size, void __user *data)
1540 {
1541         long ret;
1542         struct seccomp_filter *filter;
1543         struct seccomp_metadata kmd = {};
1544 
1545         if (!capable(CAP_SYS_ADMIN) ||
1546             current->seccomp.mode != SECCOMP_MODE_DISABLED) {
1547                 return -EACCES;
1548         }
1549 
1550         size = min_t(unsigned long, size, sizeof(kmd));
1551 
1552         if (size < sizeof(kmd.filter_off))
1553                 return -EINVAL;
1554 
1555         if (copy_from_user(&kmd.filter_off, data, sizeof(kmd.filter_off)))
1556                 return -EFAULT;
1557 
1558         filter = get_nth_filter(task, kmd.filter_off);
1559         if (IS_ERR(filter))
1560                 return PTR_ERR(filter);
1561 
1562         if (filter->log)
1563                 kmd.flags |= SECCOMP_FILTER_FLAG_LOG;
1564 
1565         ret = size;
1566         if (copy_to_user(data, &kmd, size))
1567                 ret = -EFAULT;
1568 
1569         __put_seccomp_filter(filter);
1570         return ret;
1571 }
1572 #endif
1573 
1574 #ifdef CONFIG_SYSCTL
1575 
1576 /* Human readable action names for friendly sysctl interaction */
1577 #define SECCOMP_RET_KILL_PROCESS_NAME   "kill_process"
1578 #define SECCOMP_RET_KILL_THREAD_NAME    "kill_thread"
1579 #define SECCOMP_RET_TRAP_NAME           "trap"
1580 #define SECCOMP_RET_ERRNO_NAME          "errno"
1581 #define SECCOMP_RET_USER_NOTIF_NAME     "user_notif"
1582 #define SECCOMP_RET_TRACE_NAME          "trace"
1583 #define SECCOMP_RET_LOG_NAME            "log"
1584 #define SECCOMP_RET_ALLOW_NAME          "allow"
1585 
1586 static const char seccomp_actions_avail[] =
1587                                 SECCOMP_RET_KILL_PROCESS_NAME   " "
1588                                 SECCOMP_RET_KILL_THREAD_NAME    " "
1589                                 SECCOMP_RET_TRAP_NAME           " "
1590                                 SECCOMP_RET_ERRNO_NAME          " "
1591                                 SECCOMP_RET_USER_NOTIF_NAME     " "
1592                                 SECCOMP_RET_TRACE_NAME          " "
1593                                 SECCOMP_RET_LOG_NAME            " "
1594                                 SECCOMP_RET_ALLOW_NAME;
1595 
1596 struct seccomp_log_name {
1597         u32             log;
1598         const char      *name;
1599 };
1600 
1601 static const struct seccomp_log_name seccomp_log_names[] = {
1602         { SECCOMP_LOG_KILL_PROCESS, SECCOMP_RET_KILL_PROCESS_NAME },
1603         { SECCOMP_LOG_KILL_THREAD, SECCOMP_RET_KILL_THREAD_NAME },
1604         { SECCOMP_LOG_TRAP, SECCOMP_RET_TRAP_NAME },
1605         { SECCOMP_LOG_ERRNO, SECCOMP_RET_ERRNO_NAME },
1606         { SECCOMP_LOG_USER_NOTIF, SECCOMP_RET_USER_NOTIF_NAME },
1607         { SECCOMP_LOG_TRACE, SECCOMP_RET_TRACE_NAME },
1608         { SECCOMP_LOG_LOG, SECCOMP_RET_LOG_NAME },
1609         { SECCOMP_LOG_ALLOW, SECCOMP_RET_ALLOW_NAME },
1610         { }
1611 };
1612 
1613 static bool seccomp_names_from_actions_logged(char *names, size_t size,
1614                                               u32 actions_logged,
1615                                               const char *sep)
1616 {
1617         const struct seccomp_log_name *cur;
1618         bool append_sep = false;
1619 
1620         for (cur = seccomp_log_names; cur->name && size; cur++) {
1621                 ssize_t ret;
1622 
1623                 if (!(actions_logged & cur->log))
1624                         continue;
1625 
1626                 if (append_sep) {
1627                         ret = strscpy(names, sep, size);
1628                         if (ret < 0)
1629                                 return false;
1630 
1631                         names += ret;
1632                         size -= ret;
1633                 } else
1634                         append_sep = true;
1635 
1636                 ret = strscpy(names, cur->name, size);
1637                 if (ret < 0)
1638                         return false;
1639 
1640                 names += ret;
1641                 size -= ret;
1642         }
1643 
1644         return true;
1645 }
1646 
1647 static bool seccomp_action_logged_from_name(u32 *action_logged,
1648                                             const char *name)
1649 {
1650         const struct seccomp_log_name *cur;
1651 
1652         for (cur = seccomp_log_names; cur->name; cur++) {
1653                 if (!strcmp(cur->name, name)) {
1654                         *action_logged = cur->log;
1655                         return true;
1656                 }
1657         }
1658 
1659         return false;
1660 }
1661 
1662 static bool seccomp_actions_logged_from_names(u32 *actions_logged, char *names)
1663 {
1664         char *name;
1665 
1666         *actions_logged = 0;
1667         while ((name = strsep(&names, " ")) && *name) {
1668                 u32 action_logged = 0;
1669 
1670                 if (!seccomp_action_logged_from_name(&action_logged, name))
1671                         return false;
1672 
1673                 *actions_logged |= action_logged;
1674         }
1675 
1676         return true;
1677 }
1678 
1679 static int read_actions_logged(struct ctl_table *ro_table, void __user *buffer,
1680                                size_t *lenp, loff_t *ppos)
1681 {
1682         char names[sizeof(seccomp_actions_avail)];
1683         struct ctl_table table;
1684 
1685         memset(names, 0, sizeof(names));
1686 
1687         if (!seccomp_names_from_actions_logged(names, sizeof(names),
1688                                                seccomp_actions_logged, " "))
1689                 return -EINVAL;
1690 
1691         table = *ro_table;
1692         table.data = names;
1693         table.maxlen = sizeof(names);
1694         return proc_dostring(&table, 0, buffer, lenp, ppos);
1695 }
1696 
1697 static int write_actions_logged(struct ctl_table *ro_table, void __user *buffer,
1698                                 size_t *lenp, loff_t *ppos, u32 *actions_logged)
1699 {
1700         char names[sizeof(seccomp_actions_avail)];
1701         struct ctl_table table;
1702         int ret;
1703 
1704         if (!capable(CAP_SYS_ADMIN))
1705                 return -EPERM;
1706 
1707         memset(names, 0, sizeof(names));
1708 
1709         table = *ro_table;
1710         table.data = names;
1711         table.maxlen = sizeof(names);
1712         ret = proc_dostring(&table, 1, buffer, lenp, ppos);
1713         if (ret)
1714                 return ret;
1715 
1716         if (!seccomp_actions_logged_from_names(actions_logged, table.data))
1717                 return -EINVAL;
1718 
1719         if (*actions_logged & SECCOMP_LOG_ALLOW)
1720                 return -EINVAL;
1721 
1722         seccomp_actions_logged = *actions_logged;
1723         return 0;
1724 }
1725 
1726 static void audit_actions_logged(u32 actions_logged, u32 old_actions_logged,
1727                                  int ret)
1728 {
1729         char names[sizeof(seccomp_actions_avail)];
1730         char old_names[sizeof(seccomp_actions_avail)];
1731         const char *new = names;
1732         const char *old = old_names;
1733 
1734         if (!audit_enabled)
1735                 return;
1736 
1737         memset(names, 0, sizeof(names));
1738         memset(old_names, 0, sizeof(old_names));
1739 
1740         if (ret)
1741                 new = "?";
1742         else if (!actions_logged)
1743                 new = "(none)";
1744         else if (!seccomp_names_from_actions_logged(names, sizeof(names),
1745                                                     actions_logged, ","))
1746                 new = "?";
1747 
1748         if (!old_actions_logged)
1749                 old = "(none)";
1750         else if (!seccomp_names_from_actions_logged(old_names,
1751                                                     sizeof(old_names),
1752                                                     old_actions_logged, ","))
1753                 old = "?";
1754 
1755         return audit_seccomp_actions_logged(new, old, !ret);
1756 }
1757 
1758 static int seccomp_actions_logged_handler(struct ctl_table *ro_table, int write,
1759                                           void __user *buffer, size_t *lenp,
1760                                           loff_t *ppos)
1761 {
1762         int ret;
1763 
1764         if (write) {
1765                 u32 actions_logged = 0;
1766                 u32 old_actions_logged = seccomp_actions_logged;
1767 
1768                 ret = write_actions_logged(ro_table, buffer, lenp, ppos,
1769                                            &actions_logged);
1770                 audit_actions_logged(actions_logged, old_actions_logged, ret);
1771         } else
1772                 ret = read_actions_logged(ro_table, buffer, lenp, ppos);
1773 
1774         return ret;
1775 }
1776 
1777 static struct ctl_path seccomp_sysctl_path[] = {
1778         { .procname = "kernel", },
1779         { .procname = "seccomp", },
1780         { }
1781 };
1782 
1783 static struct ctl_table seccomp_sysctl_table[] = {
1784         {
1785                 .procname       = "actions_avail",
1786                 .data           = (void *) &seccomp_actions_avail,
1787                 .maxlen         = sizeof(seccomp_actions_avail),
1788                 .mode           = 0444,
1789                 .proc_handler   = proc_dostring,
1790         },
1791         {
1792                 .procname       = "actions_logged",
1793                 .mode           = 0644,
1794                 .proc_handler   = seccomp_actions_logged_handler,
1795         },
1796         { }
1797 };
1798 
1799 static int __init seccomp_sysctl_init(void)
1800 {
1801         struct ctl_table_header *hdr;
1802 
1803         hdr = register_sysctl_paths(seccomp_sysctl_path, seccomp_sysctl_table);
1804         if (!hdr)
1805                 pr_warn("seccomp: sysctl registration failed\n");
1806         else
1807                 kmemleak_not_leak(hdr);
1808 
1809         return 0;
1810 }
1811 
1812 device_initcall(seccomp_sysctl_init)
1813 
1814 #endif /* CONFIG_SYSCTL */

/* [<][>][^][v][top][bottom][index][help] */