1#include <linux/slab.h> 2#include <linux/file.h> 3#include <linux/fdtable.h> 4#include <linux/mm.h> 5#include <linux/stat.h> 6#include <linux/fcntl.h> 7#include <linux/swap.h> 8#include <linux/string.h> 9#include <linux/init.h> 10#include <linux/pagemap.h> 11#include <linux/perf_event.h> 12#include <linux/highmem.h> 13#include <linux/spinlock.h> 14#include <linux/key.h> 15#include <linux/personality.h> 16#include <linux/binfmts.h> 17#include <linux/coredump.h> 18#include <linux/utsname.h> 19#include <linux/pid_namespace.h> 20#include <linux/module.h> 21#include <linux/namei.h> 22#include <linux/mount.h> 23#include <linux/security.h> 24#include <linux/syscalls.h> 25#include <linux/tsacct_kern.h> 26#include <linux/cn_proc.h> 27#include <linux/audit.h> 28#include <linux/tracehook.h> 29#include <linux/kmod.h> 30#include <linux/fsnotify.h> 31#include <linux/fs_struct.h> 32#include <linux/pipe_fs_i.h> 33#include <linux/oom.h> 34#include <linux/compat.h> 35#include <linux/sched.h> 36#include <linux/fs.h> 37#include <linux/path.h> 38 39#include <asm/uaccess.h> 40#include <asm/mmu_context.h> 41#include <asm/tlb.h> 42#include <asm/exec.h> 43 44#include <trace/events/task.h> 45#include "internal.h" 46 47#include <trace/events/sched.h> 48 49int core_uses_pid; 50unsigned int core_pipe_limit; 51char core_pattern[CORENAME_MAX_SIZE] = "core"; 52static int core_name_size = CORENAME_MAX_SIZE; 53 54struct core_name { 55 char *corename; 56 int used, size; 57}; 58 59/* The maximal length of core_pattern is also specified in sysctl.c */ 60 61static int expand_corename(struct core_name *cn, int size) 62{ 63 char *corename = krealloc(cn->corename, size, GFP_KERNEL); 64 65 if (!corename) 66 return -ENOMEM; 67 68 if (size > core_name_size) /* racy but harmless */ 69 core_name_size = size; 70 71 cn->size = ksize(corename); 72 cn->corename = corename; 73 return 0; 74} 75 76static __printf(2, 0) int cn_vprintf(struct core_name *cn, const char *fmt, 77 va_list arg) 78{ 79 int free, need; 80 va_list arg_copy; 81 82again: 83 free = cn->size - cn->used; 84 85 va_copy(arg_copy, arg); 86 need = vsnprintf(cn->corename + cn->used, free, fmt, arg_copy); 87 va_end(arg_copy); 88 89 if (need < free) { 90 cn->used += need; 91 return 0; 92 } 93 94 if (!expand_corename(cn, cn->size + need - free + 1)) 95 goto again; 96 97 return -ENOMEM; 98} 99 100static __printf(2, 3) int cn_printf(struct core_name *cn, const char *fmt, ...) 101{ 102 va_list arg; 103 int ret; 104 105 va_start(arg, fmt); 106 ret = cn_vprintf(cn, fmt, arg); 107 va_end(arg); 108 109 return ret; 110} 111 112static __printf(2, 3) 113int cn_esc_printf(struct core_name *cn, const char *fmt, ...) 114{ 115 int cur = cn->used; 116 va_list arg; 117 int ret; 118 119 va_start(arg, fmt); 120 ret = cn_vprintf(cn, fmt, arg); 121 va_end(arg); 122 123 for (; cur < cn->used; ++cur) { 124 if (cn->corename[cur] == '/') 125 cn->corename[cur] = '!'; 126 } 127 return ret; 128} 129 130static int cn_print_exe_file(struct core_name *cn) 131{ 132 struct file *exe_file; 133 char *pathbuf, *path; 134 int ret; 135 136 exe_file = get_mm_exe_file(current->mm); 137 if (!exe_file) 138 return cn_esc_printf(cn, "%s (path unknown)", current->comm); 139 140 pathbuf = kmalloc(PATH_MAX, GFP_TEMPORARY); 141 if (!pathbuf) { 142 ret = -ENOMEM; 143 goto put_exe_file; 144 } 145 146 path = file_path(exe_file, pathbuf, PATH_MAX); 147 if (IS_ERR(path)) { 148 ret = PTR_ERR(path); 149 goto free_buf; 150 } 151 152 ret = cn_esc_printf(cn, "%s", path); 153 154free_buf: 155 kfree(pathbuf); 156put_exe_file: 157 fput(exe_file); 158 return ret; 159} 160 161/* format_corename will inspect the pattern parameter, and output a 162 * name into corename, which must have space for at least 163 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator. 164 */ 165static int format_corename(struct core_name *cn, struct coredump_params *cprm) 166{ 167 const struct cred *cred = current_cred(); 168 const char *pat_ptr = core_pattern; 169 int ispipe = (*pat_ptr == '|'); 170 int pid_in_pattern = 0; 171 int err = 0; 172 173 cn->used = 0; 174 cn->corename = NULL; 175 if (expand_corename(cn, core_name_size)) 176 return -ENOMEM; 177 cn->corename[0] = '\0'; 178 179 if (ispipe) 180 ++pat_ptr; 181 182 /* Repeat as long as we have more pattern to process and more output 183 space */ 184 while (*pat_ptr) { 185 if (*pat_ptr != '%') { 186 err = cn_printf(cn, "%c", *pat_ptr++); 187 } else { 188 switch (*++pat_ptr) { 189 /* single % at the end, drop that */ 190 case 0: 191 goto out; 192 /* Double percent, output one percent */ 193 case '%': 194 err = cn_printf(cn, "%c", '%'); 195 break; 196 /* pid */ 197 case 'p': 198 pid_in_pattern = 1; 199 err = cn_printf(cn, "%d", 200 task_tgid_vnr(current)); 201 break; 202 /* global pid */ 203 case 'P': 204 err = cn_printf(cn, "%d", 205 task_tgid_nr(current)); 206 break; 207 case 'i': 208 err = cn_printf(cn, "%d", 209 task_pid_vnr(current)); 210 break; 211 case 'I': 212 err = cn_printf(cn, "%d", 213 task_pid_nr(current)); 214 break; 215 /* uid */ 216 case 'u': 217 err = cn_printf(cn, "%u", 218 from_kuid(&init_user_ns, 219 cred->uid)); 220 break; 221 /* gid */ 222 case 'g': 223 err = cn_printf(cn, "%u", 224 from_kgid(&init_user_ns, 225 cred->gid)); 226 break; 227 case 'd': 228 err = cn_printf(cn, "%d", 229 __get_dumpable(cprm->mm_flags)); 230 break; 231 /* signal that caused the coredump */ 232 case 's': 233 err = cn_printf(cn, "%d", 234 cprm->siginfo->si_signo); 235 break; 236 /* UNIX time of coredump */ 237 case 't': { 238 struct timeval tv; 239 do_gettimeofday(&tv); 240 err = cn_printf(cn, "%lu", tv.tv_sec); 241 break; 242 } 243 /* hostname */ 244 case 'h': 245 down_read(&uts_sem); 246 err = cn_esc_printf(cn, "%s", 247 utsname()->nodename); 248 up_read(&uts_sem); 249 break; 250 /* executable */ 251 case 'e': 252 err = cn_esc_printf(cn, "%s", current->comm); 253 break; 254 case 'E': 255 err = cn_print_exe_file(cn); 256 break; 257 /* core limit size */ 258 case 'c': 259 err = cn_printf(cn, "%lu", 260 rlimit(RLIMIT_CORE)); 261 break; 262 default: 263 break; 264 } 265 ++pat_ptr; 266 } 267 268 if (err) 269 return err; 270 } 271 272out: 273 /* Backward compatibility with core_uses_pid: 274 * 275 * If core_pattern does not include a %p (as is the default) 276 * and core_uses_pid is set, then .%pid will be appended to 277 * the filename. Do not do this for piped commands. */ 278 if (!ispipe && !pid_in_pattern && core_uses_pid) { 279 err = cn_printf(cn, ".%d", task_tgid_vnr(current)); 280 if (err) 281 return err; 282 } 283 return ispipe; 284} 285 286static int zap_process(struct task_struct *start, int exit_code, int flags) 287{ 288 struct task_struct *t; 289 int nr = 0; 290 291 /* ignore all signals except SIGKILL, see prepare_signal() */ 292 start->signal->flags = SIGNAL_GROUP_COREDUMP | flags; 293 start->signal->group_exit_code = exit_code; 294 start->signal->group_stop_count = 0; 295 296 for_each_thread(start, t) { 297 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); 298 if (t != current && t->mm) { 299 sigaddset(&t->pending.signal, SIGKILL); 300 signal_wake_up(t, 1); 301 nr++; 302 } 303 } 304 305 return nr; 306} 307 308static int zap_threads(struct task_struct *tsk, struct mm_struct *mm, 309 struct core_state *core_state, int exit_code) 310{ 311 struct task_struct *g, *p; 312 unsigned long flags; 313 int nr = -EAGAIN; 314 315 spin_lock_irq(&tsk->sighand->siglock); 316 if (!signal_group_exit(tsk->signal)) { 317 mm->core_state = core_state; 318 tsk->signal->group_exit_task = tsk; 319 nr = zap_process(tsk, exit_code, 0); 320 clear_tsk_thread_flag(tsk, TIF_SIGPENDING); 321 } 322 spin_unlock_irq(&tsk->sighand->siglock); 323 if (unlikely(nr < 0)) 324 return nr; 325 326 tsk->flags |= PF_DUMPCORE; 327 if (atomic_read(&mm->mm_users) == nr + 1) 328 goto done; 329 /* 330 * We should find and kill all tasks which use this mm, and we should 331 * count them correctly into ->nr_threads. We don't take tasklist 332 * lock, but this is safe wrt: 333 * 334 * fork: 335 * None of sub-threads can fork after zap_process(leader). All 336 * processes which were created before this point should be 337 * visible to zap_threads() because copy_process() adds the new 338 * process to the tail of init_task.tasks list, and lock/unlock 339 * of ->siglock provides a memory barrier. 340 * 341 * do_exit: 342 * The caller holds mm->mmap_sem. This means that the task which 343 * uses this mm can't pass exit_mm(), so it can't exit or clear 344 * its ->mm. 345 * 346 * de_thread: 347 * It does list_replace_rcu(&leader->tasks, ¤t->tasks), 348 * we must see either old or new leader, this does not matter. 349 * However, it can change p->sighand, so lock_task_sighand(p) 350 * must be used. Since p->mm != NULL and we hold ->mmap_sem 351 * it can't fail. 352 * 353 * Note also that "g" can be the old leader with ->mm == NULL 354 * and already unhashed and thus removed from ->thread_group. 355 * This is OK, __unhash_process()->list_del_rcu() does not 356 * clear the ->next pointer, we will find the new leader via 357 * next_thread(). 358 */ 359 rcu_read_lock(); 360 for_each_process(g) { 361 if (g == tsk->group_leader) 362 continue; 363 if (g->flags & PF_KTHREAD) 364 continue; 365 366 for_each_thread(g, p) { 367 if (unlikely(!p->mm)) 368 continue; 369 if (unlikely(p->mm == mm)) { 370 lock_task_sighand(p, &flags); 371 nr += zap_process(p, exit_code, 372 SIGNAL_GROUP_EXIT); 373 unlock_task_sighand(p, &flags); 374 } 375 break; 376 } 377 } 378 rcu_read_unlock(); 379done: 380 atomic_set(&core_state->nr_threads, nr); 381 return nr; 382} 383 384static int coredump_wait(int exit_code, struct core_state *core_state) 385{ 386 struct task_struct *tsk = current; 387 struct mm_struct *mm = tsk->mm; 388 int core_waiters = -EBUSY; 389 390 init_completion(&core_state->startup); 391 core_state->dumper.task = tsk; 392 core_state->dumper.next = NULL; 393 394 down_write(&mm->mmap_sem); 395 if (!mm->core_state) 396 core_waiters = zap_threads(tsk, mm, core_state, exit_code); 397 up_write(&mm->mmap_sem); 398 399 if (core_waiters > 0) { 400 struct core_thread *ptr; 401 402 wait_for_completion(&core_state->startup); 403 /* 404 * Wait for all the threads to become inactive, so that 405 * all the thread context (extended register state, like 406 * fpu etc) gets copied to the memory. 407 */ 408 ptr = core_state->dumper.next; 409 while (ptr != NULL) { 410 wait_task_inactive(ptr->task, 0); 411 ptr = ptr->next; 412 } 413 } 414 415 return core_waiters; 416} 417 418static void coredump_finish(struct mm_struct *mm, bool core_dumped) 419{ 420 struct core_thread *curr, *next; 421 struct task_struct *task; 422 423 spin_lock_irq(¤t->sighand->siglock); 424 if (core_dumped && !__fatal_signal_pending(current)) 425 current->signal->group_exit_code |= 0x80; 426 current->signal->group_exit_task = NULL; 427 current->signal->flags = SIGNAL_GROUP_EXIT; 428 spin_unlock_irq(¤t->sighand->siglock); 429 430 next = mm->core_state->dumper.next; 431 while ((curr = next) != NULL) { 432 next = curr->next; 433 task = curr->task; 434 /* 435 * see exit_mm(), curr->task must not see 436 * ->task == NULL before we read ->next. 437 */ 438 smp_mb(); 439 curr->task = NULL; 440 wake_up_process(task); 441 } 442 443 mm->core_state = NULL; 444} 445 446static bool dump_interrupted(void) 447{ 448 /* 449 * SIGKILL or freezing() interrupt the coredumping. Perhaps we 450 * can do try_to_freeze() and check __fatal_signal_pending(), 451 * but then we need to teach dump_write() to restart and clear 452 * TIF_SIGPENDING. 453 */ 454 return signal_pending(current); 455} 456 457static void wait_for_dump_helpers(struct file *file) 458{ 459 struct pipe_inode_info *pipe = file->private_data; 460 461 pipe_lock(pipe); 462 pipe->readers++; 463 pipe->writers--; 464 wake_up_interruptible_sync(&pipe->wait); 465 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); 466 pipe_unlock(pipe); 467 468 /* 469 * We actually want wait_event_freezable() but then we need 470 * to clear TIF_SIGPENDING and improve dump_interrupted(). 471 */ 472 wait_event_interruptible(pipe->wait, pipe->readers == 1); 473 474 pipe_lock(pipe); 475 pipe->readers--; 476 pipe->writers++; 477 pipe_unlock(pipe); 478} 479 480/* 481 * umh_pipe_setup 482 * helper function to customize the process used 483 * to collect the core in userspace. Specifically 484 * it sets up a pipe and installs it as fd 0 (stdin) 485 * for the process. Returns 0 on success, or 486 * PTR_ERR on failure. 487 * Note that it also sets the core limit to 1. This 488 * is a special value that we use to trap recursive 489 * core dumps 490 */ 491static int umh_pipe_setup(struct subprocess_info *info, struct cred *new) 492{ 493 struct file *files[2]; 494 struct coredump_params *cp = (struct coredump_params *)info->data; 495 int err = create_pipe_files(files, 0); 496 if (err) 497 return err; 498 499 cp->file = files[1]; 500 501 err = replace_fd(0, files[0], 0); 502 fput(files[0]); 503 /* and disallow core files too */ 504 current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1}; 505 506 return err; 507} 508 509void do_coredump(const siginfo_t *siginfo) 510{ 511 struct core_state core_state; 512 struct core_name cn; 513 struct mm_struct *mm = current->mm; 514 struct linux_binfmt * binfmt; 515 const struct cred *old_cred; 516 struct cred *cred; 517 int retval = 0; 518 int ispipe; 519 struct files_struct *displaced; 520 /* require nonrelative corefile path and be extra careful */ 521 bool need_suid_safe = false; 522 bool core_dumped = false; 523 static atomic_t core_dump_count = ATOMIC_INIT(0); 524 struct coredump_params cprm = { 525 .siginfo = siginfo, 526 .regs = signal_pt_regs(), 527 .limit = rlimit(RLIMIT_CORE), 528 /* 529 * We must use the same mm->flags while dumping core to avoid 530 * inconsistency of bit flags, since this flag is not protected 531 * by any locks. 532 */ 533 .mm_flags = mm->flags, 534 }; 535 536 audit_core_dumps(siginfo->si_signo); 537 538 binfmt = mm->binfmt; 539 if (!binfmt || !binfmt->core_dump) 540 goto fail; 541 if (!__get_dumpable(cprm.mm_flags)) 542 goto fail; 543 544 cred = prepare_creds(); 545 if (!cred) 546 goto fail; 547 /* 548 * We cannot trust fsuid as being the "true" uid of the process 549 * nor do we know its entire history. We only know it was tainted 550 * so we dump it as root in mode 2, and only into a controlled 551 * environment (pipe handler or fully qualified path). 552 */ 553 if (__get_dumpable(cprm.mm_flags) == SUID_DUMP_ROOT) { 554 /* Setuid core dump mode */ 555 cred->fsuid = GLOBAL_ROOT_UID; /* Dump root private */ 556 need_suid_safe = true; 557 } 558 559 retval = coredump_wait(siginfo->si_signo, &core_state); 560 if (retval < 0) 561 goto fail_creds; 562 563 old_cred = override_creds(cred); 564 565 ispipe = format_corename(&cn, &cprm); 566 567 if (ispipe) { 568 int dump_count; 569 char **helper_argv; 570 struct subprocess_info *sub_info; 571 572 if (ispipe < 0) { 573 printk(KERN_WARNING "format_corename failed\n"); 574 printk(KERN_WARNING "Aborting core\n"); 575 goto fail_unlock; 576 } 577 578 if (cprm.limit == 1) { 579 /* See umh_pipe_setup() which sets RLIMIT_CORE = 1. 580 * 581 * Normally core limits are irrelevant to pipes, since 582 * we're not writing to the file system, but we use 583 * cprm.limit of 1 here as a special value, this is a 584 * consistent way to catch recursive crashes. 585 * We can still crash if the core_pattern binary sets 586 * RLIM_CORE = !1, but it runs as root, and can do 587 * lots of stupid things. 588 * 589 * Note that we use task_tgid_vnr here to grab the pid 590 * of the process group leader. That way we get the 591 * right pid if a thread in a multi-threaded 592 * core_pattern process dies. 593 */ 594 printk(KERN_WARNING 595 "Process %d(%s) has RLIMIT_CORE set to 1\n", 596 task_tgid_vnr(current), current->comm); 597 printk(KERN_WARNING "Aborting core\n"); 598 goto fail_unlock; 599 } 600 cprm.limit = RLIM_INFINITY; 601 602 dump_count = atomic_inc_return(&core_dump_count); 603 if (core_pipe_limit && (core_pipe_limit < dump_count)) { 604 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n", 605 task_tgid_vnr(current), current->comm); 606 printk(KERN_WARNING "Skipping core dump\n"); 607 goto fail_dropcount; 608 } 609 610 helper_argv = argv_split(GFP_KERNEL, cn.corename, NULL); 611 if (!helper_argv) { 612 printk(KERN_WARNING "%s failed to allocate memory\n", 613 __func__); 614 goto fail_dropcount; 615 } 616 617 retval = -ENOMEM; 618 sub_info = call_usermodehelper_setup(helper_argv[0], 619 helper_argv, NULL, GFP_KERNEL, 620 umh_pipe_setup, NULL, &cprm); 621 if (sub_info) 622 retval = call_usermodehelper_exec(sub_info, 623 UMH_WAIT_EXEC); 624 625 argv_free(helper_argv); 626 if (retval) { 627 printk(KERN_INFO "Core dump to |%s pipe failed\n", 628 cn.corename); 629 goto close_fail; 630 } 631 } else { 632 struct inode *inode; 633 int open_flags = O_CREAT | O_RDWR | O_NOFOLLOW | 634 O_LARGEFILE | O_EXCL; 635 636 if (cprm.limit < binfmt->min_coredump) 637 goto fail_unlock; 638 639 if (need_suid_safe && cn.corename[0] != '/') { 640 printk(KERN_WARNING "Pid %d(%s) can only dump core "\ 641 "to fully qualified path!\n", 642 task_tgid_vnr(current), current->comm); 643 printk(KERN_WARNING "Skipping core dump\n"); 644 goto fail_unlock; 645 } 646 647 /* 648 * Unlink the file if it exists unless this is a SUID 649 * binary - in that case, we're running around with root 650 * privs and don't want to unlink another user's coredump. 651 */ 652 if (!need_suid_safe) { 653 mm_segment_t old_fs; 654 655 old_fs = get_fs(); 656 set_fs(KERNEL_DS); 657 /* 658 * If it doesn't exist, that's fine. If there's some 659 * other problem, we'll catch it at the filp_open(). 660 */ 661 (void) sys_unlink((const char __user *)cn.corename); 662 set_fs(old_fs); 663 } 664 665 /* 666 * There is a race between unlinking and creating the 667 * file, but if that causes an EEXIST here, that's 668 * fine - another process raced with us while creating 669 * the corefile, and the other process won. To userspace, 670 * what matters is that at least one of the two processes 671 * writes its coredump successfully, not which one. 672 */ 673 if (need_suid_safe) { 674 /* 675 * Using user namespaces, normal user tasks can change 676 * their current->fs->root to point to arbitrary 677 * directories. Since the intention of the "only dump 678 * with a fully qualified path" rule is to control where 679 * coredumps may be placed using root privileges, 680 * current->fs->root must not be used. Instead, use the 681 * root directory of init_task. 682 */ 683 struct path root; 684 685 task_lock(&init_task); 686 get_fs_root(init_task.fs, &root); 687 task_unlock(&init_task); 688 cprm.file = file_open_root(root.dentry, root.mnt, 689 cn.corename, open_flags, 0600); 690 path_put(&root); 691 } else { 692 cprm.file = filp_open(cn.corename, open_flags, 0600); 693 } 694 if (IS_ERR(cprm.file)) 695 goto fail_unlock; 696 697 inode = file_inode(cprm.file); 698 if (inode->i_nlink > 1) 699 goto close_fail; 700 if (d_unhashed(cprm.file->f_path.dentry)) 701 goto close_fail; 702 /* 703 * AK: actually i see no reason to not allow this for named 704 * pipes etc, but keep the previous behaviour for now. 705 */ 706 if (!S_ISREG(inode->i_mode)) 707 goto close_fail; 708 /* 709 * Don't dump core if the filesystem changed owner or mode 710 * of the file during file creation. This is an issue when 711 * a process dumps core while its cwd is e.g. on a vfat 712 * filesystem. 713 */ 714 if (!uid_eq(inode->i_uid, current_fsuid())) 715 goto close_fail; 716 if ((inode->i_mode & 0677) != 0600) 717 goto close_fail; 718 if (!(cprm.file->f_mode & FMODE_CAN_WRITE)) 719 goto close_fail; 720 if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file)) 721 goto close_fail; 722 } 723 724 /* get us an unshared descriptor table; almost always a no-op */ 725 retval = unshare_files(&displaced); 726 if (retval) 727 goto close_fail; 728 if (displaced) 729 put_files_struct(displaced); 730 if (!dump_interrupted()) { 731 file_start_write(cprm.file); 732 core_dumped = binfmt->core_dump(&cprm); 733 file_end_write(cprm.file); 734 } 735 if (ispipe && core_pipe_limit) 736 wait_for_dump_helpers(cprm.file); 737close_fail: 738 if (cprm.file) 739 filp_close(cprm.file, NULL); 740fail_dropcount: 741 if (ispipe) 742 atomic_dec(&core_dump_count); 743fail_unlock: 744 kfree(cn.corename); 745 coredump_finish(mm, core_dumped); 746 revert_creds(old_cred); 747fail_creds: 748 put_cred(cred); 749fail: 750 return; 751} 752 753/* 754 * Core dumping helper functions. These are the only things you should 755 * do on a core-file: use only these functions to write out all the 756 * necessary info. 757 */ 758int dump_emit(struct coredump_params *cprm, const void *addr, int nr) 759{ 760 struct file *file = cprm->file; 761 loff_t pos = file->f_pos; 762 ssize_t n; 763 if (cprm->written + nr > cprm->limit) 764 return 0; 765 while (nr) { 766 if (dump_interrupted()) 767 return 0; 768 n = __kernel_write(file, addr, nr, &pos); 769 if (n <= 0) 770 return 0; 771 file->f_pos = pos; 772 cprm->written += n; 773 nr -= n; 774 } 775 return 1; 776} 777EXPORT_SYMBOL(dump_emit); 778 779int dump_skip(struct coredump_params *cprm, size_t nr) 780{ 781 static char zeroes[PAGE_SIZE]; 782 struct file *file = cprm->file; 783 if (file->f_op->llseek && file->f_op->llseek != no_llseek) { 784 if (cprm->written + nr > cprm->limit) 785 return 0; 786 if (dump_interrupted() || 787 file->f_op->llseek(file, nr, SEEK_CUR) < 0) 788 return 0; 789 cprm->written += nr; 790 return 1; 791 } else { 792 while (nr > PAGE_SIZE) { 793 if (!dump_emit(cprm, zeroes, PAGE_SIZE)) 794 return 0; 795 nr -= PAGE_SIZE; 796 } 797 return dump_emit(cprm, zeroes, nr); 798 } 799} 800EXPORT_SYMBOL(dump_skip); 801 802int dump_align(struct coredump_params *cprm, int align) 803{ 804 unsigned mod = cprm->written & (align - 1); 805 if (align & (align - 1)) 806 return 0; 807 return mod ? dump_skip(cprm, align - mod) : 1; 808} 809EXPORT_SYMBOL(dump_align); 810