root/fs/exec.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __register_binfmt
  2. unregister_binfmt
  3. put_binfmt
  4. path_noexec
  5. SYSCALL_DEFINE1
  6. acct_arg_size
  7. get_arg_page
  8. put_arg_page
  9. free_arg_pages
  10. flush_arg_page
  11. __bprm_mm_init
  12. valid_arg_len
  13. acct_arg_size
  14. get_arg_page
  15. put_arg_page
  16. free_arg_page
  17. free_arg_pages
  18. flush_arg_page
  19. __bprm_mm_init
  20. valid_arg_len
  21. bprm_mm_init
  22. get_user_arg_ptr
  23. count
  24. prepare_arg_pages
  25. copy_strings
  26. copy_strings_kernel
  27. shift_arg_pages
  28. setup_arg_pages
  29. transfer_args_to_stack
  30. do_open_execat
  31. open_exec
  32. kernel_read_file
  33. kernel_read_file_from_path
  34. kernel_read_file_from_fd
  35. read_code
  36. exec_mmap
  37. de_thread
  38. __get_task_comm
  39. __set_task_comm
  40. flush_old_exec
  41. would_dump
  42. setup_new_exec
  43. finalize_exec
  44. prepare_bprm_creds
  45. free_bprm
  46. bprm_change_interp
  47. install_exec_creds
  48. check_unsafe_exec
  49. bprm_fill_uid
  50. prepare_binprm
  51. remove_arg_zero
  52. search_binary_handler
  53. exec_binprm
  54. __do_execve_file
  55. do_execveat_common
  56. do_execve_file
  57. do_execve
  58. do_execveat
  59. compat_do_execve
  60. compat_do_execveat
  61. set_binfmt
  62. set_dumpable
  63. SYSCALL_DEFINE3
  64. SYSCALL_DEFINE5
  65. COMPAT_SYSCALL_DEFINE3
  66. COMPAT_SYSCALL_DEFINE5

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  *  linux/fs/exec.c
   4  *
   5  *  Copyright (C) 1991, 1992  Linus Torvalds
   6  */
   7 
   8 /*
   9  * #!-checking implemented by tytso.
  10  */
  11 /*
  12  * Demand-loading implemented 01.12.91 - no need to read anything but
  13  * the header into memory. The inode of the executable is put into
  14  * "current->executable", and page faults do the actual loading. Clean.
  15  *
  16  * Once more I can proudly say that linux stood up to being changed: it
  17  * was less than 2 hours work to get demand-loading completely implemented.
  18  *
  19  * Demand loading changed July 1993 by Eric Youngdale.   Use mmap instead,
  20  * current->executable is only used by the procfs.  This allows a dispatch
  21  * table to check for several different types  of binary formats.  We keep
  22  * trying until we recognize the file or we run out of supported binary
  23  * formats.
  24  */
  25 
  26 #include <linux/slab.h>
  27 #include <linux/file.h>
  28 #include <linux/fdtable.h>
  29 #include <linux/mm.h>
  30 #include <linux/vmacache.h>
  31 #include <linux/stat.h>
  32 #include <linux/fcntl.h>
  33 #include <linux/swap.h>
  34 #include <linux/string.h>
  35 #include <linux/init.h>
  36 #include <linux/sched/mm.h>
  37 #include <linux/sched/coredump.h>
  38 #include <linux/sched/signal.h>
  39 #include <linux/sched/numa_balancing.h>
  40 #include <linux/sched/task.h>
  41 #include <linux/pagemap.h>
  42 #include <linux/perf_event.h>
  43 #include <linux/highmem.h>
  44 #include <linux/spinlock.h>
  45 #include <linux/key.h>
  46 #include <linux/personality.h>
  47 #include <linux/binfmts.h>
  48 #include <linux/utsname.h>
  49 #include <linux/pid_namespace.h>
  50 #include <linux/module.h>
  51 #include <linux/namei.h>
  52 #include <linux/mount.h>
  53 #include <linux/security.h>
  54 #include <linux/syscalls.h>
  55 #include <linux/tsacct_kern.h>
  56 #include <linux/cn_proc.h>
  57 #include <linux/audit.h>
  58 #include <linux/tracehook.h>
  59 #include <linux/kmod.h>
  60 #include <linux/fsnotify.h>
  61 #include <linux/fs_struct.h>
  62 #include <linux/pipe_fs_i.h>
  63 #include <linux/oom.h>
  64 #include <linux/compat.h>
  65 #include <linux/vmalloc.h>
  66 
  67 #include <linux/uaccess.h>
  68 #include <asm/mmu_context.h>
  69 #include <asm/tlb.h>
  70 
  71 #include <trace/events/task.h>
  72 #include "internal.h"
  73 
  74 #include <trace/events/sched.h>
  75 
  76 int suid_dumpable = 0;
  77 
  78 static LIST_HEAD(formats);
  79 static DEFINE_RWLOCK(binfmt_lock);
  80 
  81 void __register_binfmt(struct linux_binfmt * fmt, int insert)
  82 {
  83         BUG_ON(!fmt);
  84         if (WARN_ON(!fmt->load_binary))
  85                 return;
  86         write_lock(&binfmt_lock);
  87         insert ? list_add(&fmt->lh, &formats) :
  88                  list_add_tail(&fmt->lh, &formats);
  89         write_unlock(&binfmt_lock);
  90 }
  91 
  92 EXPORT_SYMBOL(__register_binfmt);
  93 
  94 void unregister_binfmt(struct linux_binfmt * fmt)
  95 {
  96         write_lock(&binfmt_lock);
  97         list_del(&fmt->lh);
  98         write_unlock(&binfmt_lock);
  99 }
 100 
 101 EXPORT_SYMBOL(unregister_binfmt);
 102 
 103 static inline void put_binfmt(struct linux_binfmt * fmt)
 104 {
 105         module_put(fmt->module);
 106 }
 107 
 108 bool path_noexec(const struct path *path)
 109 {
 110         return (path->mnt->mnt_flags & MNT_NOEXEC) ||
 111                (path->mnt->mnt_sb->s_iflags & SB_I_NOEXEC);
 112 }
 113 
 114 #ifdef CONFIG_USELIB
 115 /*
 116  * Note that a shared library must be both readable and executable due to
 117  * security reasons.
 118  *
 119  * Also note that we take the address to load from from the file itself.
 120  */
 121 SYSCALL_DEFINE1(uselib, const char __user *, library)
 122 {
 123         struct linux_binfmt *fmt;
 124         struct file *file;
 125         struct filename *tmp = getname(library);
 126         int error = PTR_ERR(tmp);
 127         static const struct open_flags uselib_flags = {
 128                 .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
 129                 .acc_mode = MAY_READ | MAY_EXEC,
 130                 .intent = LOOKUP_OPEN,
 131                 .lookup_flags = LOOKUP_FOLLOW,
 132         };
 133 
 134         if (IS_ERR(tmp))
 135                 goto out;
 136 
 137         file = do_filp_open(AT_FDCWD, tmp, &uselib_flags);
 138         putname(tmp);
 139         error = PTR_ERR(file);
 140         if (IS_ERR(file))
 141                 goto out;
 142 
 143         error = -EINVAL;
 144         if (!S_ISREG(file_inode(file)->i_mode))
 145                 goto exit;
 146 
 147         error = -EACCES;
 148         if (path_noexec(&file->f_path))
 149                 goto exit;
 150 
 151         fsnotify_open(file);
 152 
 153         error = -ENOEXEC;
 154 
 155         read_lock(&binfmt_lock);
 156         list_for_each_entry(fmt, &formats, lh) {
 157                 if (!fmt->load_shlib)
 158                         continue;
 159                 if (!try_module_get(fmt->module))
 160                         continue;
 161                 read_unlock(&binfmt_lock);
 162                 error = fmt->load_shlib(file);
 163                 read_lock(&binfmt_lock);
 164                 put_binfmt(fmt);
 165                 if (error != -ENOEXEC)
 166                         break;
 167         }
 168         read_unlock(&binfmt_lock);
 169 exit:
 170         fput(file);
 171 out:
 172         return error;
 173 }
 174 #endif /* #ifdef CONFIG_USELIB */
 175 
 176 #ifdef CONFIG_MMU
 177 /*
 178  * The nascent bprm->mm is not visible until exec_mmap() but it can
 179  * use a lot of memory, account these pages in current->mm temporary
 180  * for oom_badness()->get_mm_rss(). Once exec succeeds or fails, we
 181  * change the counter back via acct_arg_size(0).
 182  */
 183 static void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
 184 {
 185         struct mm_struct *mm = current->mm;
 186         long diff = (long)(pages - bprm->vma_pages);
 187 
 188         if (!mm || !diff)
 189                 return;
 190 
 191         bprm->vma_pages = pages;
 192         add_mm_counter(mm, MM_ANONPAGES, diff);
 193 }
 194 
 195 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
 196                 int write)
 197 {
 198         struct page *page;
 199         int ret;
 200         unsigned int gup_flags = FOLL_FORCE;
 201 
 202 #ifdef CONFIG_STACK_GROWSUP
 203         if (write) {
 204                 ret = expand_downwards(bprm->vma, pos);
 205                 if (ret < 0)
 206                         return NULL;
 207         }
 208 #endif
 209 
 210         if (write)
 211                 gup_flags |= FOLL_WRITE;
 212 
 213         /*
 214          * We are doing an exec().  'current' is the process
 215          * doing the exec and bprm->mm is the new process's mm.
 216          */
 217         ret = get_user_pages_remote(current, bprm->mm, pos, 1, gup_flags,
 218                         &page, NULL, NULL);
 219         if (ret <= 0)
 220                 return NULL;
 221 
 222         if (write)
 223                 acct_arg_size(bprm, vma_pages(bprm->vma));
 224 
 225         return page;
 226 }
 227 
 228 static void put_arg_page(struct page *page)
 229 {
 230         put_page(page);
 231 }
 232 
 233 static void free_arg_pages(struct linux_binprm *bprm)
 234 {
 235 }
 236 
 237 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
 238                 struct page *page)
 239 {
 240         flush_cache_page(bprm->vma, pos, page_to_pfn(page));
 241 }
 242 
 243 static int __bprm_mm_init(struct linux_binprm *bprm)
 244 {
 245         int err;
 246         struct vm_area_struct *vma = NULL;
 247         struct mm_struct *mm = bprm->mm;
 248 
 249         bprm->vma = vma = vm_area_alloc(mm);
 250         if (!vma)
 251                 return -ENOMEM;
 252         vma_set_anonymous(vma);
 253 
 254         if (down_write_killable(&mm->mmap_sem)) {
 255                 err = -EINTR;
 256                 goto err_free;
 257         }
 258 
 259         /*
 260          * Place the stack at the largest stack address the architecture
 261          * supports. Later, we'll move this to an appropriate place. We don't
 262          * use STACK_TOP because that can depend on attributes which aren't
 263          * configured yet.
 264          */
 265         BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP);
 266         vma->vm_end = STACK_TOP_MAX;
 267         vma->vm_start = vma->vm_end - PAGE_SIZE;
 268         vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
 269         vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 270 
 271         err = insert_vm_struct(mm, vma);
 272         if (err)
 273                 goto err;
 274 
 275         mm->stack_vm = mm->total_vm = 1;
 276         arch_bprm_mm_init(mm, vma);
 277         up_write(&mm->mmap_sem);
 278         bprm->p = vma->vm_end - sizeof(void *);
 279         return 0;
 280 err:
 281         up_write(&mm->mmap_sem);
 282 err_free:
 283         bprm->vma = NULL;
 284         vm_area_free(vma);
 285         return err;
 286 }
 287 
 288 static bool valid_arg_len(struct linux_binprm *bprm, long len)
 289 {
 290         return len <= MAX_ARG_STRLEN;
 291 }
 292 
 293 #else
 294 
 295 static inline void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
 296 {
 297 }
 298 
 299 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
 300                 int write)
 301 {
 302         struct page *page;
 303 
 304         page = bprm->page[pos / PAGE_SIZE];
 305         if (!page && write) {
 306                 page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
 307                 if (!page)
 308                         return NULL;
 309                 bprm->page[pos / PAGE_SIZE] = page;
 310         }
 311 
 312         return page;
 313 }
 314 
 315 static void put_arg_page(struct page *page)
 316 {
 317 }
 318 
 319 static void free_arg_page(struct linux_binprm *bprm, int i)
 320 {
 321         if (bprm->page[i]) {
 322                 __free_page(bprm->page[i]);
 323                 bprm->page[i] = NULL;
 324         }
 325 }
 326 
 327 static void free_arg_pages(struct linux_binprm *bprm)
 328 {
 329         int i;
 330 
 331         for (i = 0; i < MAX_ARG_PAGES; i++)
 332                 free_arg_page(bprm, i);
 333 }
 334 
 335 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
 336                 struct page *page)
 337 {
 338 }
 339 
 340 static int __bprm_mm_init(struct linux_binprm *bprm)
 341 {
 342         bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
 343         return 0;
 344 }
 345 
 346 static bool valid_arg_len(struct linux_binprm *bprm, long len)
 347 {
 348         return len <= bprm->p;
 349 }
 350 
 351 #endif /* CONFIG_MMU */
 352 
 353 /*
 354  * Create a new mm_struct and populate it with a temporary stack
 355  * vm_area_struct.  We don't have enough context at this point to set the stack
 356  * flags, permissions, and offset, so we use temporary values.  We'll update
 357  * them later in setup_arg_pages().
 358  */
 359 static int bprm_mm_init(struct linux_binprm *bprm)
 360 {
 361         int err;
 362         struct mm_struct *mm = NULL;
 363 
 364         bprm->mm = mm = mm_alloc();
 365         err = -ENOMEM;
 366         if (!mm)
 367                 goto err;
 368 
 369         /* Save current stack limit for all calculations made during exec. */
 370         task_lock(current->group_leader);
 371         bprm->rlim_stack = current->signal->rlim[RLIMIT_STACK];
 372         task_unlock(current->group_leader);
 373 
 374         err = __bprm_mm_init(bprm);
 375         if (err)
 376                 goto err;
 377 
 378         return 0;
 379 
 380 err:
 381         if (mm) {
 382                 bprm->mm = NULL;
 383                 mmdrop(mm);
 384         }
 385 
 386         return err;
 387 }
 388 
 389 struct user_arg_ptr {
 390 #ifdef CONFIG_COMPAT
 391         bool is_compat;
 392 #endif
 393         union {
 394                 const char __user *const __user *native;
 395 #ifdef CONFIG_COMPAT
 396                 const compat_uptr_t __user *compat;
 397 #endif
 398         } ptr;
 399 };
 400 
 401 static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
 402 {
 403         const char __user *native;
 404 
 405 #ifdef CONFIG_COMPAT
 406         if (unlikely(argv.is_compat)) {
 407                 compat_uptr_t compat;
 408 
 409                 if (get_user(compat, argv.ptr.compat + nr))
 410                         return ERR_PTR(-EFAULT);
 411 
 412                 return compat_ptr(compat);
 413         }
 414 #endif
 415 
 416         if (get_user(native, argv.ptr.native + nr))
 417                 return ERR_PTR(-EFAULT);
 418 
 419         return native;
 420 }
 421 
 422 /*
 423  * count() counts the number of strings in array ARGV.
 424  */
 425 static int count(struct user_arg_ptr argv, int max)
 426 {
 427         int i = 0;
 428 
 429         if (argv.ptr.native != NULL) {
 430                 for (;;) {
 431                         const char __user *p = get_user_arg_ptr(argv, i);
 432 
 433                         if (!p)
 434                                 break;
 435 
 436                         if (IS_ERR(p))
 437                                 return -EFAULT;
 438 
 439                         if (i >= max)
 440                                 return -E2BIG;
 441                         ++i;
 442 
 443                         if (fatal_signal_pending(current))
 444                                 return -ERESTARTNOHAND;
 445                         cond_resched();
 446                 }
 447         }
 448         return i;
 449 }
 450 
 451 static int prepare_arg_pages(struct linux_binprm *bprm,
 452                         struct user_arg_ptr argv, struct user_arg_ptr envp)
 453 {
 454         unsigned long limit, ptr_size;
 455 
 456         bprm->argc = count(argv, MAX_ARG_STRINGS);
 457         if (bprm->argc < 0)
 458                 return bprm->argc;
 459 
 460         bprm->envc = count(envp, MAX_ARG_STRINGS);
 461         if (bprm->envc < 0)
 462                 return bprm->envc;
 463 
 464         /*
 465          * Limit to 1/4 of the max stack size or 3/4 of _STK_LIM
 466          * (whichever is smaller) for the argv+env strings.
 467          * This ensures that:
 468          *  - the remaining binfmt code will not run out of stack space,
 469          *  - the program will have a reasonable amount of stack left
 470          *    to work from.
 471          */
 472         limit = _STK_LIM / 4 * 3;
 473         limit = min(limit, bprm->rlim_stack.rlim_cur / 4);
 474         /*
 475          * We've historically supported up to 32 pages (ARG_MAX)
 476          * of argument strings even with small stacks
 477          */
 478         limit = max_t(unsigned long, limit, ARG_MAX);
 479         /*
 480          * We must account for the size of all the argv and envp pointers to
 481          * the argv and envp strings, since they will also take up space in
 482          * the stack. They aren't stored until much later when we can't
 483          * signal to the parent that the child has run out of stack space.
 484          * Instead, calculate it here so it's possible to fail gracefully.
 485          */
 486         ptr_size = (bprm->argc + bprm->envc) * sizeof(void *);
 487         if (limit <= ptr_size)
 488                 return -E2BIG;
 489         limit -= ptr_size;
 490 
 491         bprm->argmin = bprm->p - limit;
 492         return 0;
 493 }
 494 
 495 /*
 496  * 'copy_strings()' copies argument/environment strings from the old
 497  * processes's memory to the new process's stack.  The call to get_user_pages()
 498  * ensures the destination page is created and not swapped out.
 499  */
 500 static int copy_strings(int argc, struct user_arg_ptr argv,
 501                         struct linux_binprm *bprm)
 502 {
 503         struct page *kmapped_page = NULL;
 504         char *kaddr = NULL;
 505         unsigned long kpos = 0;
 506         int ret;
 507 
 508         while (argc-- > 0) {
 509                 const char __user *str;
 510                 int len;
 511                 unsigned long pos;
 512 
 513                 ret = -EFAULT;
 514                 str = get_user_arg_ptr(argv, argc);
 515                 if (IS_ERR(str))
 516                         goto out;
 517 
 518                 len = strnlen_user(str, MAX_ARG_STRLEN);
 519                 if (!len)
 520                         goto out;
 521 
 522                 ret = -E2BIG;
 523                 if (!valid_arg_len(bprm, len))
 524                         goto out;
 525 
 526                 /* We're going to work our way backwords. */
 527                 pos = bprm->p;
 528                 str += len;
 529                 bprm->p -= len;
 530 #ifdef CONFIG_MMU
 531                 if (bprm->p < bprm->argmin)
 532                         goto out;
 533 #endif
 534 
 535                 while (len > 0) {
 536                         int offset, bytes_to_copy;
 537 
 538                         if (fatal_signal_pending(current)) {
 539                                 ret = -ERESTARTNOHAND;
 540                                 goto out;
 541                         }
 542                         cond_resched();
 543 
 544                         offset = pos % PAGE_SIZE;
 545                         if (offset == 0)
 546                                 offset = PAGE_SIZE;
 547 
 548                         bytes_to_copy = offset;
 549                         if (bytes_to_copy > len)
 550                                 bytes_to_copy = len;
 551 
 552                         offset -= bytes_to_copy;
 553                         pos -= bytes_to_copy;
 554                         str -= bytes_to_copy;
 555                         len -= bytes_to_copy;
 556 
 557                         if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
 558                                 struct page *page;
 559 
 560                                 page = get_arg_page(bprm, pos, 1);
 561                                 if (!page) {
 562                                         ret = -E2BIG;
 563                                         goto out;
 564                                 }
 565 
 566                                 if (kmapped_page) {
 567                                         flush_kernel_dcache_page(kmapped_page);
 568                                         kunmap(kmapped_page);
 569                                         put_arg_page(kmapped_page);
 570                                 }
 571                                 kmapped_page = page;
 572                                 kaddr = kmap(kmapped_page);
 573                                 kpos = pos & PAGE_MASK;
 574                                 flush_arg_page(bprm, kpos, kmapped_page);
 575                         }
 576                         if (copy_from_user(kaddr+offset, str, bytes_to_copy)) {
 577                                 ret = -EFAULT;
 578                                 goto out;
 579                         }
 580                 }
 581         }
 582         ret = 0;
 583 out:
 584         if (kmapped_page) {
 585                 flush_kernel_dcache_page(kmapped_page);
 586                 kunmap(kmapped_page);
 587                 put_arg_page(kmapped_page);
 588         }
 589         return ret;
 590 }
 591 
 592 /*
 593  * Like copy_strings, but get argv and its values from kernel memory.
 594  */
 595 int copy_strings_kernel(int argc, const char *const *__argv,
 596                         struct linux_binprm *bprm)
 597 {
 598         int r;
 599         mm_segment_t oldfs = get_fs();
 600         struct user_arg_ptr argv = {
 601                 .ptr.native = (const char __user *const  __user *)__argv,
 602         };
 603 
 604         set_fs(KERNEL_DS);
 605         r = copy_strings(argc, argv, bprm);
 606         set_fs(oldfs);
 607 
 608         return r;
 609 }
 610 EXPORT_SYMBOL(copy_strings_kernel);
 611 
 612 #ifdef CONFIG_MMU
 613 
 614 /*
 615  * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX.  Once
 616  * the binfmt code determines where the new stack should reside, we shift it to
 617  * its final location.  The process proceeds as follows:
 618  *
 619  * 1) Use shift to calculate the new vma endpoints.
 620  * 2) Extend vma to cover both the old and new ranges.  This ensures the
 621  *    arguments passed to subsequent functions are consistent.
 622  * 3) Move vma's page tables to the new range.
 623  * 4) Free up any cleared pgd range.
 624  * 5) Shrink the vma to cover only the new range.
 625  */
 626 static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
 627 {
 628         struct mm_struct *mm = vma->vm_mm;
 629         unsigned long old_start = vma->vm_start;
 630         unsigned long old_end = vma->vm_end;
 631         unsigned long length = old_end - old_start;
 632         unsigned long new_start = old_start - shift;
 633         unsigned long new_end = old_end - shift;
 634         struct mmu_gather tlb;
 635 
 636         BUG_ON(new_start > new_end);
 637 
 638         /*
 639          * ensure there are no vmas between where we want to go
 640          * and where we are
 641          */
 642         if (vma != find_vma(mm, new_start))
 643                 return -EFAULT;
 644 
 645         /*
 646          * cover the whole range: [new_start, old_end)
 647          */
 648         if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL))
 649                 return -ENOMEM;
 650 
 651         /*
 652          * move the page tables downwards, on failure we rely on
 653          * process cleanup to remove whatever mess we made.
 654          */
 655         if (length != move_page_tables(vma, old_start,
 656                                        vma, new_start, length, false))
 657                 return -ENOMEM;
 658 
 659         lru_add_drain();
 660         tlb_gather_mmu(&tlb, mm, old_start, old_end);
 661         if (new_end > old_start) {
 662                 /*
 663                  * when the old and new regions overlap clear from new_end.
 664                  */
 665                 free_pgd_range(&tlb, new_end, old_end, new_end,
 666                         vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
 667         } else {
 668                 /*
 669                  * otherwise, clean from old_start; this is done to not touch
 670                  * the address space in [new_end, old_start) some architectures
 671                  * have constraints on va-space that make this illegal (IA64) -
 672                  * for the others its just a little faster.
 673                  */
 674                 free_pgd_range(&tlb, old_start, old_end, new_end,
 675                         vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
 676         }
 677         tlb_finish_mmu(&tlb, old_start, old_end);
 678 
 679         /*
 680          * Shrink the vma to just the new range.  Always succeeds.
 681          */
 682         vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
 683 
 684         return 0;
 685 }
 686 
 687 /*
 688  * Finalizes the stack vm_area_struct. The flags and permissions are updated,
 689  * the stack is optionally relocated, and some extra space is added.
 690  */
 691 int setup_arg_pages(struct linux_binprm *bprm,
 692                     unsigned long stack_top,
 693                     int executable_stack)
 694 {
 695         unsigned long ret;
 696         unsigned long stack_shift;
 697         struct mm_struct *mm = current->mm;
 698         struct vm_area_struct *vma = bprm->vma;
 699         struct vm_area_struct *prev = NULL;
 700         unsigned long vm_flags;
 701         unsigned long stack_base;
 702         unsigned long stack_size;
 703         unsigned long stack_expand;
 704         unsigned long rlim_stack;
 705 
 706 #ifdef CONFIG_STACK_GROWSUP
 707         /* Limit stack size */
 708         stack_base = bprm->rlim_stack.rlim_max;
 709         if (stack_base > STACK_SIZE_MAX)
 710                 stack_base = STACK_SIZE_MAX;
 711 
 712         /* Add space for stack randomization. */
 713         stack_base += (STACK_RND_MASK << PAGE_SHIFT);
 714 
 715         /* Make sure we didn't let the argument array grow too large. */
 716         if (vma->vm_end - vma->vm_start > stack_base)
 717                 return -ENOMEM;
 718 
 719         stack_base = PAGE_ALIGN(stack_top - stack_base);
 720 
 721         stack_shift = vma->vm_start - stack_base;
 722         mm->arg_start = bprm->p - stack_shift;
 723         bprm->p = vma->vm_end - stack_shift;
 724 #else
 725         stack_top = arch_align_stack(stack_top);
 726         stack_top = PAGE_ALIGN(stack_top);
 727 
 728         if (unlikely(stack_top < mmap_min_addr) ||
 729             unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
 730                 return -ENOMEM;
 731 
 732         stack_shift = vma->vm_end - stack_top;
 733 
 734         bprm->p -= stack_shift;
 735         mm->arg_start = bprm->p;
 736 #endif
 737 
 738         if (bprm->loader)
 739                 bprm->loader -= stack_shift;
 740         bprm->exec -= stack_shift;
 741 
 742         if (down_write_killable(&mm->mmap_sem))
 743                 return -EINTR;
 744 
 745         vm_flags = VM_STACK_FLAGS;
 746 
 747         /*
 748          * Adjust stack execute permissions; explicitly enable for
 749          * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
 750          * (arch default) otherwise.
 751          */
 752         if (unlikely(executable_stack == EXSTACK_ENABLE_X))
 753                 vm_flags |= VM_EXEC;
 754         else if (executable_stack == EXSTACK_DISABLE_X)
 755                 vm_flags &= ~VM_EXEC;
 756         vm_flags |= mm->def_flags;
 757         vm_flags |= VM_STACK_INCOMPLETE_SETUP;
 758 
 759         ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
 760                         vm_flags);
 761         if (ret)
 762                 goto out_unlock;
 763         BUG_ON(prev != vma);
 764 
 765         /* Move stack pages down in memory. */
 766         if (stack_shift) {
 767                 ret = shift_arg_pages(vma, stack_shift);
 768                 if (ret)
 769                         goto out_unlock;
 770         }
 771 
 772         /* mprotect_fixup is overkill to remove the temporary stack flags */
 773         vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
 774 
 775         stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */
 776         stack_size = vma->vm_end - vma->vm_start;
 777         /*
 778          * Align this down to a page boundary as expand_stack
 779          * will align it up.
 780          */
 781         rlim_stack = bprm->rlim_stack.rlim_cur & PAGE_MASK;
 782 #ifdef CONFIG_STACK_GROWSUP
 783         if (stack_size + stack_expand > rlim_stack)
 784                 stack_base = vma->vm_start + rlim_stack;
 785         else
 786                 stack_base = vma->vm_end + stack_expand;
 787 #else
 788         if (stack_size + stack_expand > rlim_stack)
 789                 stack_base = vma->vm_end - rlim_stack;
 790         else
 791                 stack_base = vma->vm_start - stack_expand;
 792 #endif
 793         current->mm->start_stack = bprm->p;
 794         ret = expand_stack(vma, stack_base);
 795         if (ret)
 796                 ret = -EFAULT;
 797 
 798 out_unlock:
 799         up_write(&mm->mmap_sem);
 800         return ret;
 801 }
 802 EXPORT_SYMBOL(setup_arg_pages);
 803 
 804 #else
 805 
 806 /*
 807  * Transfer the program arguments and environment from the holding pages
 808  * onto the stack. The provided stack pointer is adjusted accordingly.
 809  */
 810 int transfer_args_to_stack(struct linux_binprm *bprm,
 811                            unsigned long *sp_location)
 812 {
 813         unsigned long index, stop, sp;
 814         int ret = 0;
 815 
 816         stop = bprm->p >> PAGE_SHIFT;
 817         sp = *sp_location;
 818 
 819         for (index = MAX_ARG_PAGES - 1; index >= stop; index--) {
 820                 unsigned int offset = index == stop ? bprm->p & ~PAGE_MASK : 0;
 821                 char *src = kmap(bprm->page[index]) + offset;
 822                 sp -= PAGE_SIZE - offset;
 823                 if (copy_to_user((void *) sp, src, PAGE_SIZE - offset) != 0)
 824                         ret = -EFAULT;
 825                 kunmap(bprm->page[index]);
 826                 if (ret)
 827                         goto out;
 828         }
 829 
 830         *sp_location = sp;
 831 
 832 out:
 833         return ret;
 834 }
 835 EXPORT_SYMBOL(transfer_args_to_stack);
 836 
 837 #endif /* CONFIG_MMU */
 838 
 839 static struct file *do_open_execat(int fd, struct filename *name, int flags)
 840 {
 841         struct file *file;
 842         int err;
 843         struct open_flags open_exec_flags = {
 844                 .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
 845                 .acc_mode = MAY_EXEC,
 846                 .intent = LOOKUP_OPEN,
 847                 .lookup_flags = LOOKUP_FOLLOW,
 848         };
 849 
 850         if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0)
 851                 return ERR_PTR(-EINVAL);
 852         if (flags & AT_SYMLINK_NOFOLLOW)
 853                 open_exec_flags.lookup_flags &= ~LOOKUP_FOLLOW;
 854         if (flags & AT_EMPTY_PATH)
 855                 open_exec_flags.lookup_flags |= LOOKUP_EMPTY;
 856 
 857         file = do_filp_open(fd, name, &open_exec_flags);
 858         if (IS_ERR(file))
 859                 goto out;
 860 
 861         err = -EACCES;
 862         if (!S_ISREG(file_inode(file)->i_mode))
 863                 goto exit;
 864 
 865         if (path_noexec(&file->f_path))
 866                 goto exit;
 867 
 868         err = deny_write_access(file);
 869         if (err)
 870                 goto exit;
 871 
 872         if (name->name[0] != '\0')
 873                 fsnotify_open(file);
 874 
 875 out:
 876         return file;
 877 
 878 exit:
 879         fput(file);
 880         return ERR_PTR(err);
 881 }
 882 
 883 struct file *open_exec(const char *name)
 884 {
 885         struct filename *filename = getname_kernel(name);
 886         struct file *f = ERR_CAST(filename);
 887 
 888         if (!IS_ERR(filename)) {
 889                 f = do_open_execat(AT_FDCWD, filename, 0);
 890                 putname(filename);
 891         }
 892         return f;
 893 }
 894 EXPORT_SYMBOL(open_exec);
 895 
 896 int kernel_read_file(struct file *file, void **buf, loff_t *size,
 897                      loff_t max_size, enum kernel_read_file_id id)
 898 {
 899         loff_t i_size, pos;
 900         ssize_t bytes = 0;
 901         int ret;
 902 
 903         if (!S_ISREG(file_inode(file)->i_mode) || max_size < 0)
 904                 return -EINVAL;
 905 
 906         ret = deny_write_access(file);
 907         if (ret)
 908                 return ret;
 909 
 910         ret = security_kernel_read_file(file, id);
 911         if (ret)
 912                 goto out;
 913 
 914         i_size = i_size_read(file_inode(file));
 915         if (i_size <= 0) {
 916                 ret = -EINVAL;
 917                 goto out;
 918         }
 919         if (i_size > SIZE_MAX || (max_size > 0 && i_size > max_size)) {
 920                 ret = -EFBIG;
 921                 goto out;
 922         }
 923 
 924         if (id != READING_FIRMWARE_PREALLOC_BUFFER)
 925                 *buf = vmalloc(i_size);
 926         if (!*buf) {
 927                 ret = -ENOMEM;
 928                 goto out;
 929         }
 930 
 931         pos = 0;
 932         while (pos < i_size) {
 933                 bytes = kernel_read(file, *buf + pos, i_size - pos, &pos);
 934                 if (bytes < 0) {
 935                         ret = bytes;
 936                         goto out_free;
 937                 }
 938 
 939                 if (bytes == 0)
 940                         break;
 941         }
 942 
 943         if (pos != i_size) {
 944                 ret = -EIO;
 945                 goto out_free;
 946         }
 947 
 948         ret = security_kernel_post_read_file(file, *buf, i_size, id);
 949         if (!ret)
 950                 *size = pos;
 951 
 952 out_free:
 953         if (ret < 0) {
 954                 if (id != READING_FIRMWARE_PREALLOC_BUFFER) {
 955                         vfree(*buf);
 956                         *buf = NULL;
 957                 }
 958         }
 959 
 960 out:
 961         allow_write_access(file);
 962         return ret;
 963 }
 964 EXPORT_SYMBOL_GPL(kernel_read_file);
 965 
 966 int kernel_read_file_from_path(const char *path, void **buf, loff_t *size,
 967                                loff_t max_size, enum kernel_read_file_id id)
 968 {
 969         struct file *file;
 970         int ret;
 971 
 972         if (!path || !*path)
 973                 return -EINVAL;
 974 
 975         file = filp_open(path, O_RDONLY, 0);
 976         if (IS_ERR(file))
 977                 return PTR_ERR(file);
 978 
 979         ret = kernel_read_file(file, buf, size, max_size, id);
 980         fput(file);
 981         return ret;
 982 }
 983 EXPORT_SYMBOL_GPL(kernel_read_file_from_path);
 984 
 985 int kernel_read_file_from_fd(int fd, void **buf, loff_t *size, loff_t max_size,
 986                              enum kernel_read_file_id id)
 987 {
 988         struct fd f = fdget(fd);
 989         int ret = -EBADF;
 990 
 991         if (!f.file)
 992                 goto out;
 993 
 994         ret = kernel_read_file(f.file, buf, size, max_size, id);
 995 out:
 996         fdput(f);
 997         return ret;
 998 }
 999 EXPORT_SYMBOL_GPL(kernel_read_file_from_fd);
1000 
1001 ssize_t read_code(struct file *file, unsigned long addr, loff_t pos, size_t len)
1002 {
1003         ssize_t res = vfs_read(file, (void __user *)addr, len, &pos);
1004         if (res > 0)
1005                 flush_icache_range(addr, addr + len);
1006         return res;
1007 }
1008 EXPORT_SYMBOL(read_code);
1009 
1010 static int exec_mmap(struct mm_struct *mm)
1011 {
1012         struct task_struct *tsk;
1013         struct mm_struct *old_mm, *active_mm;
1014 
1015         /* Notify parent that we're no longer interested in the old VM */
1016         tsk = current;
1017         old_mm = current->mm;
1018         exec_mm_release(tsk, old_mm);
1019 
1020         if (old_mm) {
1021                 sync_mm_rss(old_mm);
1022                 /*
1023                  * Make sure that if there is a core dump in progress
1024                  * for the old mm, we get out and die instead of going
1025                  * through with the exec.  We must hold mmap_sem around
1026                  * checking core_state and changing tsk->mm.
1027                  */
1028                 down_read(&old_mm->mmap_sem);
1029                 if (unlikely(old_mm->core_state)) {
1030                         up_read(&old_mm->mmap_sem);
1031                         return -EINTR;
1032                 }
1033         }
1034         task_lock(tsk);
1035         active_mm = tsk->active_mm;
1036         membarrier_exec_mmap(mm);
1037         tsk->mm = mm;
1038         tsk->active_mm = mm;
1039         activate_mm(active_mm, mm);
1040         tsk->mm->vmacache_seqnum = 0;
1041         vmacache_flush(tsk);
1042         task_unlock(tsk);
1043         if (old_mm) {
1044                 up_read(&old_mm->mmap_sem);
1045                 BUG_ON(active_mm != old_mm);
1046                 setmax_mm_hiwater_rss(&tsk->signal->maxrss, old_mm);
1047                 mm_update_next_owner(old_mm);
1048                 mmput(old_mm);
1049                 return 0;
1050         }
1051         mmdrop(active_mm);
1052         return 0;
1053 }
1054 
1055 /*
1056  * This function makes sure the current process has its own signal table,
1057  * so that flush_signal_handlers can later reset the handlers without
1058  * disturbing other processes.  (Other processes might share the signal
1059  * table via the CLONE_SIGHAND option to clone().)
1060  */
1061 static int de_thread(struct task_struct *tsk)
1062 {
1063         struct signal_struct *sig = tsk->signal;
1064         struct sighand_struct *oldsighand = tsk->sighand;
1065         spinlock_t *lock = &oldsighand->siglock;
1066 
1067         if (thread_group_empty(tsk))
1068                 goto no_thread_group;
1069 
1070         /*
1071          * Kill all other threads in the thread group.
1072          */
1073         spin_lock_irq(lock);
1074         if (signal_group_exit(sig)) {
1075                 /*
1076                  * Another group action in progress, just
1077                  * return so that the signal is processed.
1078                  */
1079                 spin_unlock_irq(lock);
1080                 return -EAGAIN;
1081         }
1082 
1083         sig->group_exit_task = tsk;
1084         sig->notify_count = zap_other_threads(tsk);
1085         if (!thread_group_leader(tsk))
1086                 sig->notify_count--;
1087 
1088         while (sig->notify_count) {
1089                 __set_current_state(TASK_KILLABLE);
1090                 spin_unlock_irq(lock);
1091                 schedule();
1092                 if (__fatal_signal_pending(tsk))
1093                         goto killed;
1094                 spin_lock_irq(lock);
1095         }
1096         spin_unlock_irq(lock);
1097 
1098         /*
1099          * At this point all other threads have exited, all we have to
1100          * do is to wait for the thread group leader to become inactive,
1101          * and to assume its PID:
1102          */
1103         if (!thread_group_leader(tsk)) {
1104                 struct task_struct *leader = tsk->group_leader;
1105 
1106                 for (;;) {
1107                         cgroup_threadgroup_change_begin(tsk);
1108                         write_lock_irq(&tasklist_lock);
1109                         /*
1110                          * Do this under tasklist_lock to ensure that
1111                          * exit_notify() can't miss ->group_exit_task
1112                          */
1113                         sig->notify_count = -1;
1114                         if (likely(leader->exit_state))
1115                                 break;
1116                         __set_current_state(TASK_KILLABLE);
1117                         write_unlock_irq(&tasklist_lock);
1118                         cgroup_threadgroup_change_end(tsk);
1119                         schedule();
1120                         if (__fatal_signal_pending(tsk))
1121                                 goto killed;
1122                 }
1123 
1124                 /*
1125                  * The only record we have of the real-time age of a
1126                  * process, regardless of execs it's done, is start_time.
1127                  * All the past CPU time is accumulated in signal_struct
1128                  * from sister threads now dead.  But in this non-leader
1129                  * exec, nothing survives from the original leader thread,
1130                  * whose birth marks the true age of this process now.
1131                  * When we take on its identity by switching to its PID, we
1132                  * also take its birthdate (always earlier than our own).
1133                  */
1134                 tsk->start_time = leader->start_time;
1135                 tsk->real_start_time = leader->real_start_time;
1136 
1137                 BUG_ON(!same_thread_group(leader, tsk));
1138                 BUG_ON(has_group_leader_pid(tsk));
1139                 /*
1140                  * An exec() starts a new thread group with the
1141                  * TGID of the previous thread group. Rehash the
1142                  * two threads with a switched PID, and release
1143                  * the former thread group leader:
1144                  */
1145 
1146                 /* Become a process group leader with the old leader's pid.
1147                  * The old leader becomes a thread of the this thread group.
1148                  * Note: The old leader also uses this pid until release_task
1149                  *       is called.  Odd but simple and correct.
1150                  */
1151                 tsk->pid = leader->pid;
1152                 change_pid(tsk, PIDTYPE_PID, task_pid(leader));
1153                 transfer_pid(leader, tsk, PIDTYPE_TGID);
1154                 transfer_pid(leader, tsk, PIDTYPE_PGID);
1155                 transfer_pid(leader, tsk, PIDTYPE_SID);
1156 
1157                 list_replace_rcu(&leader->tasks, &tsk->tasks);
1158                 list_replace_init(&leader->sibling, &tsk->sibling);
1159 
1160                 tsk->group_leader = tsk;
1161                 leader->group_leader = tsk;
1162 
1163                 tsk->exit_signal = SIGCHLD;
1164                 leader->exit_signal = -1;
1165 
1166                 BUG_ON(leader->exit_state != EXIT_ZOMBIE);
1167                 leader->exit_state = EXIT_DEAD;
1168 
1169                 /*
1170                  * We are going to release_task()->ptrace_unlink() silently,
1171                  * the tracer can sleep in do_wait(). EXIT_DEAD guarantees
1172                  * the tracer wont't block again waiting for this thread.
1173                  */
1174                 if (unlikely(leader->ptrace))
1175                         __wake_up_parent(leader, leader->parent);
1176                 write_unlock_irq(&tasklist_lock);
1177                 cgroup_threadgroup_change_end(tsk);
1178 
1179                 release_task(leader);
1180         }
1181 
1182         sig->group_exit_task = NULL;
1183         sig->notify_count = 0;
1184 
1185 no_thread_group:
1186         /* we have changed execution domain */
1187         tsk->exit_signal = SIGCHLD;
1188 
1189 #ifdef CONFIG_POSIX_TIMERS
1190         exit_itimers(sig);
1191         flush_itimer_signals();
1192 #endif
1193 
1194         if (refcount_read(&oldsighand->count) != 1) {
1195                 struct sighand_struct *newsighand;
1196                 /*
1197                  * This ->sighand is shared with the CLONE_SIGHAND
1198                  * but not CLONE_THREAD task, switch to the new one.
1199                  */
1200                 newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
1201                 if (!newsighand)
1202                         return -ENOMEM;
1203 
1204                 refcount_set(&newsighand->count, 1);
1205                 memcpy(newsighand->action, oldsighand->action,
1206                        sizeof(newsighand->action));
1207 
1208                 write_lock_irq(&tasklist_lock);
1209                 spin_lock(&oldsighand->siglock);
1210                 rcu_assign_pointer(tsk->sighand, newsighand);
1211                 spin_unlock(&oldsighand->siglock);
1212                 write_unlock_irq(&tasklist_lock);
1213 
1214                 __cleanup_sighand(oldsighand);
1215         }
1216 
1217         BUG_ON(!thread_group_leader(tsk));
1218         return 0;
1219 
1220 killed:
1221         /* protects against exit_notify() and __exit_signal() */
1222         read_lock(&tasklist_lock);
1223         sig->group_exit_task = NULL;
1224         sig->notify_count = 0;
1225         read_unlock(&tasklist_lock);
1226         return -EAGAIN;
1227 }
1228 
1229 char *__get_task_comm(char *buf, size_t buf_size, struct task_struct *tsk)
1230 {
1231         task_lock(tsk);
1232         strncpy(buf, tsk->comm, buf_size);
1233         task_unlock(tsk);
1234         return buf;
1235 }
1236 EXPORT_SYMBOL_GPL(__get_task_comm);
1237 
1238 /*
1239  * These functions flushes out all traces of the currently running executable
1240  * so that a new one can be started
1241  */
1242 
1243 void __set_task_comm(struct task_struct *tsk, const char *buf, bool exec)
1244 {
1245         task_lock(tsk);
1246         trace_task_rename(tsk, buf);
1247         strlcpy(tsk->comm, buf, sizeof(tsk->comm));
1248         task_unlock(tsk);
1249         perf_event_comm(tsk, exec);
1250 }
1251 
1252 /*
1253  * Calling this is the point of no return. None of the failures will be
1254  * seen by userspace since either the process is already taking a fatal
1255  * signal (via de_thread() or coredump), or will have SEGV raised
1256  * (after exec_mmap()) by search_binary_handlers (see below).
1257  */
1258 int flush_old_exec(struct linux_binprm * bprm)
1259 {
1260         int retval;
1261 
1262         /*
1263          * Make sure we have a private signal table and that
1264          * we are unassociated from the previous thread group.
1265          */
1266         retval = de_thread(current);
1267         if (retval)
1268                 goto out;
1269 
1270         /*
1271          * Must be called _before_ exec_mmap() as bprm->mm is
1272          * not visibile until then. This also enables the update
1273          * to be lockless.
1274          */
1275         set_mm_exe_file(bprm->mm, bprm->file);
1276 
1277         would_dump(bprm, bprm->file);
1278 
1279         /*
1280          * Release all of the old mmap stuff
1281          */
1282         acct_arg_size(bprm, 0);
1283         retval = exec_mmap(bprm->mm);
1284         if (retval)
1285                 goto out;
1286 
1287         /*
1288          * After clearing bprm->mm (to mark that current is using the
1289          * prepared mm now), we have nothing left of the original
1290          * process. If anything from here on returns an error, the check
1291          * in search_binary_handler() will SEGV current.
1292          */
1293         bprm->mm = NULL;
1294 
1295         set_fs(USER_DS);
1296         current->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD |
1297                                         PF_NOFREEZE | PF_NO_SETAFFINITY);
1298         flush_thread();
1299         current->personality &= ~bprm->per_clear;
1300 
1301         /*
1302          * We have to apply CLOEXEC before we change whether the process is
1303          * dumpable (in setup_new_exec) to avoid a race with a process in userspace
1304          * trying to access the should-be-closed file descriptors of a process
1305          * undergoing exec(2).
1306          */
1307         do_close_on_exec(current->files);
1308         return 0;
1309 
1310 out:
1311         return retval;
1312 }
1313 EXPORT_SYMBOL(flush_old_exec);
1314 
1315 void would_dump(struct linux_binprm *bprm, struct file *file)
1316 {
1317         struct inode *inode = file_inode(file);
1318         if (inode_permission(inode, MAY_READ) < 0) {
1319                 struct user_namespace *old, *user_ns;
1320                 bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
1321 
1322                 /* Ensure mm->user_ns contains the executable */
1323                 user_ns = old = bprm->mm->user_ns;
1324                 while ((user_ns != &init_user_ns) &&
1325                        !privileged_wrt_inode_uidgid(user_ns, inode))
1326                         user_ns = user_ns->parent;
1327 
1328                 if (old != user_ns) {
1329                         bprm->mm->user_ns = get_user_ns(user_ns);
1330                         put_user_ns(old);
1331                 }
1332         }
1333 }
1334 EXPORT_SYMBOL(would_dump);
1335 
1336 void setup_new_exec(struct linux_binprm * bprm)
1337 {
1338         /*
1339          * Once here, prepare_binrpm() will not be called any more, so
1340          * the final state of setuid/setgid/fscaps can be merged into the
1341          * secureexec flag.
1342          */
1343         bprm->secureexec |= bprm->cap_elevated;
1344 
1345         if (bprm->secureexec) {
1346                 /* Make sure parent cannot signal privileged process. */
1347                 current->pdeath_signal = 0;
1348 
1349                 /*
1350                  * For secureexec, reset the stack limit to sane default to
1351                  * avoid bad behavior from the prior rlimits. This has to
1352                  * happen before arch_pick_mmap_layout(), which examines
1353                  * RLIMIT_STACK, but after the point of no return to avoid
1354                  * needing to clean up the change on failure.
1355                  */
1356                 if (bprm->rlim_stack.rlim_cur > _STK_LIM)
1357                         bprm->rlim_stack.rlim_cur = _STK_LIM;
1358         }
1359 
1360         arch_pick_mmap_layout(current->mm, &bprm->rlim_stack);
1361 
1362         current->sas_ss_sp = current->sas_ss_size = 0;
1363 
1364         /*
1365          * Figure out dumpability. Note that this checking only of current
1366          * is wrong, but userspace depends on it. This should be testing
1367          * bprm->secureexec instead.
1368          */
1369         if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP ||
1370             !(uid_eq(current_euid(), current_uid()) &&
1371               gid_eq(current_egid(), current_gid())))
1372                 set_dumpable(current->mm, suid_dumpable);
1373         else
1374                 set_dumpable(current->mm, SUID_DUMP_USER);
1375 
1376         arch_setup_new_exec();
1377         perf_event_exec();
1378         __set_task_comm(current, kbasename(bprm->filename), true);
1379 
1380         /* Set the new mm task size. We have to do that late because it may
1381          * depend on TIF_32BIT which is only updated in flush_thread() on
1382          * some architectures like powerpc
1383          */
1384         current->mm->task_size = TASK_SIZE;
1385 
1386         /* An exec changes our domain. We are no longer part of the thread
1387            group */
1388         WRITE_ONCE(current->self_exec_id, current->self_exec_id + 1);
1389         flush_signal_handlers(current, 0);
1390 }
1391 EXPORT_SYMBOL(setup_new_exec);
1392 
1393 /* Runs immediately before start_thread() takes over. */
1394 void finalize_exec(struct linux_binprm *bprm)
1395 {
1396         /* Store any stack rlimit changes before starting thread. */
1397         task_lock(current->group_leader);
1398         current->signal->rlim[RLIMIT_STACK] = bprm->rlim_stack;
1399         task_unlock(current->group_leader);
1400 }
1401 EXPORT_SYMBOL(finalize_exec);
1402 
1403 /*
1404  * Prepare credentials and lock ->cred_guard_mutex.
1405  * install_exec_creds() commits the new creds and drops the lock.
1406  * Or, if exec fails before, free_bprm() should release ->cred and
1407  * and unlock.
1408  */
1409 static int prepare_bprm_creds(struct linux_binprm *bprm)
1410 {
1411         if (mutex_lock_interruptible(&current->signal->cred_guard_mutex))
1412                 return -ERESTARTNOINTR;
1413 
1414         bprm->cred = prepare_exec_creds();
1415         if (likely(bprm->cred))
1416                 return 0;
1417 
1418         mutex_unlock(&current->signal->cred_guard_mutex);
1419         return -ENOMEM;
1420 }
1421 
1422 static void free_bprm(struct linux_binprm *bprm)
1423 {
1424         free_arg_pages(bprm);
1425         if (bprm->cred) {
1426                 mutex_unlock(&current->signal->cred_guard_mutex);
1427                 abort_creds(bprm->cred);
1428         }
1429         if (bprm->file) {
1430                 allow_write_access(bprm->file);
1431                 fput(bprm->file);
1432         }
1433         /* If a binfmt changed the interp, free it. */
1434         if (bprm->interp != bprm->filename)
1435                 kfree(bprm->interp);
1436         kfree(bprm);
1437 }
1438 
1439 int bprm_change_interp(const char *interp, struct linux_binprm *bprm)
1440 {
1441         /* If a binfmt changed the interp, free it first. */
1442         if (bprm->interp != bprm->filename)
1443                 kfree(bprm->interp);
1444         bprm->interp = kstrdup(interp, GFP_KERNEL);
1445         if (!bprm->interp)
1446                 return -ENOMEM;
1447         return 0;
1448 }
1449 EXPORT_SYMBOL(bprm_change_interp);
1450 
1451 /*
1452  * install the new credentials for this executable
1453  */
1454 void install_exec_creds(struct linux_binprm *bprm)
1455 {
1456         security_bprm_committing_creds(bprm);
1457 
1458         commit_creds(bprm->cred);
1459         bprm->cred = NULL;
1460 
1461         /*
1462          * Disable monitoring for regular users
1463          * when executing setuid binaries. Must
1464          * wait until new credentials are committed
1465          * by commit_creds() above
1466          */
1467         if (get_dumpable(current->mm) != SUID_DUMP_USER)
1468                 perf_event_exit_task(current);
1469         /*
1470          * cred_guard_mutex must be held at least to this point to prevent
1471          * ptrace_attach() from altering our determination of the task's
1472          * credentials; any time after this it may be unlocked.
1473          */
1474         security_bprm_committed_creds(bprm);
1475         mutex_unlock(&current->signal->cred_guard_mutex);
1476 }
1477 EXPORT_SYMBOL(install_exec_creds);
1478 
1479 /*
1480  * determine how safe it is to execute the proposed program
1481  * - the caller must hold ->cred_guard_mutex to protect against
1482  *   PTRACE_ATTACH or seccomp thread-sync
1483  */
1484 static void check_unsafe_exec(struct linux_binprm *bprm)
1485 {
1486         struct task_struct *p = current, *t;
1487         unsigned n_fs;
1488 
1489         if (p->ptrace)
1490                 bprm->unsafe |= LSM_UNSAFE_PTRACE;
1491 
1492         /*
1493          * This isn't strictly necessary, but it makes it harder for LSMs to
1494          * mess up.
1495          */
1496         if (task_no_new_privs(current))
1497                 bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS;
1498 
1499         t = p;
1500         n_fs = 1;
1501         spin_lock(&p->fs->lock);
1502         rcu_read_lock();
1503         while_each_thread(p, t) {
1504                 if (t->fs == p->fs)
1505                         n_fs++;
1506         }
1507         rcu_read_unlock();
1508 
1509         if (p->fs->users > n_fs)
1510                 bprm->unsafe |= LSM_UNSAFE_SHARE;
1511         else
1512                 p->fs->in_exec = 1;
1513         spin_unlock(&p->fs->lock);
1514 }
1515 
1516 static void bprm_fill_uid(struct linux_binprm *bprm)
1517 {
1518         struct inode *inode;
1519         unsigned int mode;
1520         kuid_t uid;
1521         kgid_t gid;
1522 
1523         /*
1524          * Since this can be called multiple times (via prepare_binprm),
1525          * we must clear any previous work done when setting set[ug]id
1526          * bits from any earlier bprm->file uses (for example when run
1527          * first for a setuid script then again for its interpreter).
1528          */
1529         bprm->cred->euid = current_euid();
1530         bprm->cred->egid = current_egid();
1531 
1532         if (!mnt_may_suid(bprm->file->f_path.mnt))
1533                 return;
1534 
1535         if (task_no_new_privs(current))
1536                 return;
1537 
1538         inode = bprm->file->f_path.dentry->d_inode;
1539         mode = READ_ONCE(inode->i_mode);
1540         if (!(mode & (S_ISUID|S_ISGID)))
1541                 return;
1542 
1543         /* Be careful if suid/sgid is set */
1544         inode_lock(inode);
1545 
1546         /* reload atomically mode/uid/gid now that lock held */
1547         mode = inode->i_mode;
1548         uid = inode->i_uid;
1549         gid = inode->i_gid;
1550         inode_unlock(inode);
1551 
1552         /* We ignore suid/sgid if there are no mappings for them in the ns */
1553         if (!kuid_has_mapping(bprm->cred->user_ns, uid) ||
1554                  !kgid_has_mapping(bprm->cred->user_ns, gid))
1555                 return;
1556 
1557         if (mode & S_ISUID) {
1558                 bprm->per_clear |= PER_CLEAR_ON_SETID;
1559                 bprm->cred->euid = uid;
1560         }
1561 
1562         if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
1563                 bprm->per_clear |= PER_CLEAR_ON_SETID;
1564                 bprm->cred->egid = gid;
1565         }
1566 }
1567 
1568 /*
1569  * Fill the binprm structure from the inode.
1570  * Check permissions, then read the first BINPRM_BUF_SIZE bytes
1571  *
1572  * This may be called multiple times for binary chains (scripts for example).
1573  */
1574 int prepare_binprm(struct linux_binprm *bprm)
1575 {
1576         int retval;
1577         loff_t pos = 0;
1578 
1579         bprm_fill_uid(bprm);
1580 
1581         /* fill in binprm security blob */
1582         retval = security_bprm_set_creds(bprm);
1583         if (retval)
1584                 return retval;
1585         bprm->called_set_creds = 1;
1586 
1587         memset(bprm->buf, 0, BINPRM_BUF_SIZE);
1588         return kernel_read(bprm->file, bprm->buf, BINPRM_BUF_SIZE, &pos);
1589 }
1590 
1591 EXPORT_SYMBOL(prepare_binprm);
1592 
1593 /*
1594  * Arguments are '\0' separated strings found at the location bprm->p
1595  * points to; chop off the first by relocating brpm->p to right after
1596  * the first '\0' encountered.
1597  */
1598 int remove_arg_zero(struct linux_binprm *bprm)
1599 {
1600         int ret = 0;
1601         unsigned long offset;
1602         char *kaddr;
1603         struct page *page;
1604 
1605         if (!bprm->argc)
1606                 return 0;
1607 
1608         do {
1609                 offset = bprm->p & ~PAGE_MASK;
1610                 page = get_arg_page(bprm, bprm->p, 0);
1611                 if (!page) {
1612                         ret = -EFAULT;
1613                         goto out;
1614                 }
1615                 kaddr = kmap_atomic(page);
1616 
1617                 for (; offset < PAGE_SIZE && kaddr[offset];
1618                                 offset++, bprm->p++)
1619                         ;
1620 
1621                 kunmap_atomic(kaddr);
1622                 put_arg_page(page);
1623         } while (offset == PAGE_SIZE);
1624 
1625         bprm->p++;
1626         bprm->argc--;
1627         ret = 0;
1628 
1629 out:
1630         return ret;
1631 }
1632 EXPORT_SYMBOL(remove_arg_zero);
1633 
1634 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1635 /*
1636  * cycle the list of binary formats handler, until one recognizes the image
1637  */
1638 int search_binary_handler(struct linux_binprm *bprm)
1639 {
1640         bool need_retry = IS_ENABLED(CONFIG_MODULES);
1641         struct linux_binfmt *fmt;
1642         int retval;
1643 
1644         /* This allows 4 levels of binfmt rewrites before failing hard. */
1645         if (bprm->recursion_depth > 5)
1646                 return -ELOOP;
1647 
1648         retval = security_bprm_check(bprm);
1649         if (retval)
1650                 return retval;
1651 
1652         retval = -ENOENT;
1653  retry:
1654         read_lock(&binfmt_lock);
1655         list_for_each_entry(fmt, &formats, lh) {
1656                 if (!try_module_get(fmt->module))
1657                         continue;
1658                 read_unlock(&binfmt_lock);
1659 
1660                 bprm->recursion_depth++;
1661                 retval = fmt->load_binary(bprm);
1662                 bprm->recursion_depth--;
1663 
1664                 read_lock(&binfmt_lock);
1665                 put_binfmt(fmt);
1666                 if (retval < 0 && !bprm->mm) {
1667                         /* we got to flush_old_exec() and failed after it */
1668                         read_unlock(&binfmt_lock);
1669                         force_sigsegv(SIGSEGV);
1670                         return retval;
1671                 }
1672                 if (retval != -ENOEXEC || !bprm->file) {
1673                         read_unlock(&binfmt_lock);
1674                         return retval;
1675                 }
1676         }
1677         read_unlock(&binfmt_lock);
1678 
1679         if (need_retry) {
1680                 if (printable(bprm->buf[0]) && printable(bprm->buf[1]) &&
1681                     printable(bprm->buf[2]) && printable(bprm->buf[3]))
1682                         return retval;
1683                 if (request_module("binfmt-%04x", *(ushort *)(bprm->buf + 2)) < 0)
1684                         return retval;
1685                 need_retry = false;
1686                 goto retry;
1687         }
1688 
1689         return retval;
1690 }
1691 EXPORT_SYMBOL(search_binary_handler);
1692 
1693 static int exec_binprm(struct linux_binprm *bprm)
1694 {
1695         pid_t old_pid, old_vpid;
1696         int ret;
1697 
1698         /* Need to fetch pid before load_binary changes it */
1699         old_pid = current->pid;
1700         rcu_read_lock();
1701         old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent));
1702         rcu_read_unlock();
1703 
1704         ret = search_binary_handler(bprm);
1705         if (ret >= 0) {
1706                 audit_bprm(bprm);
1707                 trace_sched_process_exec(current, old_pid, bprm);
1708                 ptrace_event(PTRACE_EVENT_EXEC, old_vpid);
1709                 proc_exec_connector(current);
1710         }
1711 
1712         return ret;
1713 }
1714 
1715 /*
1716  * sys_execve() executes a new program.
1717  */
1718 static int __do_execve_file(int fd, struct filename *filename,
1719                             struct user_arg_ptr argv,
1720                             struct user_arg_ptr envp,
1721                             int flags, struct file *file)
1722 {
1723         char *pathbuf = NULL;
1724         struct linux_binprm *bprm;
1725         struct files_struct *displaced;
1726         int retval;
1727 
1728         if (IS_ERR(filename))
1729                 return PTR_ERR(filename);
1730 
1731         /*
1732          * We move the actual failure in case of RLIMIT_NPROC excess from
1733          * set*uid() to execve() because too many poorly written programs
1734          * don't check setuid() return code.  Here we additionally recheck
1735          * whether NPROC limit is still exceeded.
1736          */
1737         if ((current->flags & PF_NPROC_EXCEEDED) &&
1738             atomic_read(&current_user()->processes) > rlimit(RLIMIT_NPROC)) {
1739                 retval = -EAGAIN;
1740                 goto out_ret;
1741         }
1742 
1743         /* We're below the limit (still or again), so we don't want to make
1744          * further execve() calls fail. */
1745         current->flags &= ~PF_NPROC_EXCEEDED;
1746 
1747         retval = unshare_files(&displaced);
1748         if (retval)
1749                 goto out_ret;
1750 
1751         retval = -ENOMEM;
1752         bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
1753         if (!bprm)
1754                 goto out_files;
1755 
1756         retval = prepare_bprm_creds(bprm);
1757         if (retval)
1758                 goto out_free;
1759 
1760         check_unsafe_exec(bprm);
1761         current->in_execve = 1;
1762 
1763         if (!file)
1764                 file = do_open_execat(fd, filename, flags);
1765         retval = PTR_ERR(file);
1766         if (IS_ERR(file))
1767                 goto out_unmark;
1768 
1769         sched_exec();
1770 
1771         bprm->file = file;
1772         if (!filename) {
1773                 bprm->filename = "none";
1774         } else if (fd == AT_FDCWD || filename->name[0] == '/') {
1775                 bprm->filename = filename->name;
1776         } else {
1777                 if (filename->name[0] == '\0')
1778                         pathbuf = kasprintf(GFP_KERNEL, "/dev/fd/%d", fd);
1779                 else
1780                         pathbuf = kasprintf(GFP_KERNEL, "/dev/fd/%d/%s",
1781                                             fd, filename->name);
1782                 if (!pathbuf) {
1783                         retval = -ENOMEM;
1784                         goto out_unmark;
1785                 }
1786                 /*
1787                  * Record that a name derived from an O_CLOEXEC fd will be
1788                  * inaccessible after exec. Relies on having exclusive access to
1789                  * current->files (due to unshare_files above).
1790                  */
1791                 if (close_on_exec(fd, rcu_dereference_raw(current->files->fdt)))
1792                         bprm->interp_flags |= BINPRM_FLAGS_PATH_INACCESSIBLE;
1793                 bprm->filename = pathbuf;
1794         }
1795         bprm->interp = bprm->filename;
1796 
1797         retval = bprm_mm_init(bprm);
1798         if (retval)
1799                 goto out_unmark;
1800 
1801         retval = prepare_arg_pages(bprm, argv, envp);
1802         if (retval < 0)
1803                 goto out;
1804 
1805         retval = prepare_binprm(bprm);
1806         if (retval < 0)
1807                 goto out;
1808 
1809         retval = copy_strings_kernel(1, &bprm->filename, bprm);
1810         if (retval < 0)
1811                 goto out;
1812 
1813         bprm->exec = bprm->p;
1814         retval = copy_strings(bprm->envc, envp, bprm);
1815         if (retval < 0)
1816                 goto out;
1817 
1818         retval = copy_strings(bprm->argc, argv, bprm);
1819         if (retval < 0)
1820                 goto out;
1821 
1822         retval = exec_binprm(bprm);
1823         if (retval < 0)
1824                 goto out;
1825 
1826         /* execve succeeded */
1827         current->fs->in_exec = 0;
1828         current->in_execve = 0;
1829         rseq_execve(current);
1830         acct_update_integrals(current);
1831         task_numa_free(current, false);
1832         free_bprm(bprm);
1833         kfree(pathbuf);
1834         if (filename)
1835                 putname(filename);
1836         if (displaced)
1837                 put_files_struct(displaced);
1838         return retval;
1839 
1840 out:
1841         if (bprm->mm) {
1842                 acct_arg_size(bprm, 0);
1843                 mmput(bprm->mm);
1844         }
1845 
1846 out_unmark:
1847         current->fs->in_exec = 0;
1848         current->in_execve = 0;
1849 
1850 out_free:
1851         free_bprm(bprm);
1852         kfree(pathbuf);
1853 
1854 out_files:
1855         if (displaced)
1856                 reset_files_struct(displaced);
1857 out_ret:
1858         if (filename)
1859                 putname(filename);
1860         return retval;
1861 }
1862 
1863 static int do_execveat_common(int fd, struct filename *filename,
1864                               struct user_arg_ptr argv,
1865                               struct user_arg_ptr envp,
1866                               int flags)
1867 {
1868         return __do_execve_file(fd, filename, argv, envp, flags, NULL);
1869 }
1870 
1871 int do_execve_file(struct file *file, void *__argv, void *__envp)
1872 {
1873         struct user_arg_ptr argv = { .ptr.native = __argv };
1874         struct user_arg_ptr envp = { .ptr.native = __envp };
1875 
1876         return __do_execve_file(AT_FDCWD, NULL, argv, envp, 0, file);
1877 }
1878 
1879 int do_execve(struct filename *filename,
1880         const char __user *const __user *__argv,
1881         const char __user *const __user *__envp)
1882 {
1883         struct user_arg_ptr argv = { .ptr.native = __argv };
1884         struct user_arg_ptr envp = { .ptr.native = __envp };
1885         return do_execveat_common(AT_FDCWD, filename, argv, envp, 0);
1886 }
1887 
1888 int do_execveat(int fd, struct filename *filename,
1889                 const char __user *const __user *__argv,
1890                 const char __user *const __user *__envp,
1891                 int flags)
1892 {
1893         struct user_arg_ptr argv = { .ptr.native = __argv };
1894         struct user_arg_ptr envp = { .ptr.native = __envp };
1895 
1896         return do_execveat_common(fd, filename, argv, envp, flags);
1897 }
1898 
1899 #ifdef CONFIG_COMPAT
1900 static int compat_do_execve(struct filename *filename,
1901         const compat_uptr_t __user *__argv,
1902         const compat_uptr_t __user *__envp)
1903 {
1904         struct user_arg_ptr argv = {
1905                 .is_compat = true,
1906                 .ptr.compat = __argv,
1907         };
1908         struct user_arg_ptr envp = {
1909                 .is_compat = true,
1910                 .ptr.compat = __envp,
1911         };
1912         return do_execveat_common(AT_FDCWD, filename, argv, envp, 0);
1913 }
1914 
1915 static int compat_do_execveat(int fd, struct filename *filename,
1916                               const compat_uptr_t __user *__argv,
1917                               const compat_uptr_t __user *__envp,
1918                               int flags)
1919 {
1920         struct user_arg_ptr argv = {
1921                 .is_compat = true,
1922                 .ptr.compat = __argv,
1923         };
1924         struct user_arg_ptr envp = {
1925                 .is_compat = true,
1926                 .ptr.compat = __envp,
1927         };
1928         return do_execveat_common(fd, filename, argv, envp, flags);
1929 }
1930 #endif
1931 
1932 void set_binfmt(struct linux_binfmt *new)
1933 {
1934         struct mm_struct *mm = current->mm;
1935 
1936         if (mm->binfmt)
1937                 module_put(mm->binfmt->module);
1938 
1939         mm->binfmt = new;
1940         if (new)
1941                 __module_get(new->module);
1942 }
1943 EXPORT_SYMBOL(set_binfmt);
1944 
1945 /*
1946  * set_dumpable stores three-value SUID_DUMP_* into mm->flags.
1947  */
1948 void set_dumpable(struct mm_struct *mm, int value)
1949 {
1950         if (WARN_ON((unsigned)value > SUID_DUMP_ROOT))
1951                 return;
1952 
1953         set_mask_bits(&mm->flags, MMF_DUMPABLE_MASK, value);
1954 }
1955 
1956 SYSCALL_DEFINE3(execve,
1957                 const char __user *, filename,
1958                 const char __user *const __user *, argv,
1959                 const char __user *const __user *, envp)
1960 {
1961         return do_execve(getname(filename), argv, envp);
1962 }
1963 
1964 SYSCALL_DEFINE5(execveat,
1965                 int, fd, const char __user *, filename,
1966                 const char __user *const __user *, argv,
1967                 const char __user *const __user *, envp,
1968                 int, flags)
1969 {
1970         int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
1971 
1972         return do_execveat(fd,
1973                            getname_flags(filename, lookup_flags, NULL),
1974                            argv, envp, flags);
1975 }
1976 
1977 #ifdef CONFIG_COMPAT
1978 COMPAT_SYSCALL_DEFINE3(execve, const char __user *, filename,
1979         const compat_uptr_t __user *, argv,
1980         const compat_uptr_t __user *, envp)
1981 {
1982         return compat_do_execve(getname(filename), argv, envp);
1983 }
1984 
1985 COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
1986                        const char __user *, filename,
1987                        const compat_uptr_t __user *, argv,
1988                        const compat_uptr_t __user *, envp,
1989                        int,  flags)
1990 {
1991         int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
1992 
1993         return compat_do_execveat(fd,
1994                                   getname_flags(filename, lookup_flags, NULL),
1995                                   argv, envp, flags);
1996 }
1997 #endif

/* [<][>][^][v][top][bottom][index][help] */