This source file includes following definitions.
- sig_handler
- sig_handler_ignored
- sig_task_ignored
- sig_ignored
- has_pending_signals
- recalc_sigpending_tsk
- recalc_sigpending_and_wake
- recalc_sigpending
- calculate_sigpending
- next_signal
- print_dropped_signal
- task_set_jobctl_pending
- task_clear_jobctl_trapping
- task_clear_jobctl_pending
- task_participate_group_stop
- task_join_group_stop
- __sigqueue_alloc
- __sigqueue_free
- flush_sigqueue
- flush_signals
- __flush_itimer_signals
- flush_itimer_signals
- ignore_signals
- flush_signal_handlers
- unhandled_signal
- collect_signal
- __dequeue_signal
- dequeue_signal
- dequeue_synchronous_signal
- signal_wake_up_state
- flush_sigqueue_mask
- is_si_special
- si_fromuser
- kill_ok_by_cred
- check_kill_permission
- ptrace_trap_notify
- prepare_signal
- wants_signal
- complete_signal
- legacy_queue
- __send_signal
- has_si_pid_and_uid
- send_signal
- print_fatal_signal
- setup_print_fatal_signals
- __group_send_sig_info
- do_send_sig_info
- force_sig_info_to_task
- force_sig_info
- zap_other_threads
- __lock_task_sighand
- group_send_sig_info
- __kill_pgrp_info
- kill_pid_info
- kill_proc_info
- kill_as_cred_perm
- kill_pid_usb_asyncio
- kill_something_info
- send_sig_info
- send_sig
- force_sig
- force_sigsegv
- force_sig_fault_to_task
- force_sig_fault
- send_sig_fault
- force_sig_mceerr
- send_sig_mceerr
- force_sig_bnderr
- force_sig_pkuerr
- force_sig_ptrace_errno_trap
- kill_pgrp
- kill_pid
- sigqueue_alloc
- sigqueue_free
- send_sigqueue
- do_notify_pidfd
- do_notify_parent
- do_notify_parent_cldstop
- may_ptrace_stop
- sigkill_pending
- ptrace_stop
- ptrace_do_notify
- ptrace_notify
- do_signal_stop
- do_jobctl_trap
- do_freezer_trap
- ptrace_signal
- get_signal
- signal_delivered
- signal_setup_done
- retarget_shared_pending
- exit_signals
- SYSCALL_DEFINE0
- do_no_restart_syscall
- __set_task_blocked
- set_current_blocked
- __set_current_blocked
- sigprocmask
- set_user_sigmask
- set_compat_user_sigmask
- SYSCALL_DEFINE4
- COMPAT_SYSCALL_DEFINE4
- do_sigpending
- SYSCALL_DEFINE2
- COMPAT_SYSCALL_DEFINE2
- known_siginfo_layout
- si_expansion
- copy_siginfo_to_user
- post_copy_siginfo_from_user
- __copy_siginfo_from_user
- copy_siginfo_from_user
- copy_siginfo_to_user32
- __copy_siginfo_to_user32
- post_copy_siginfo_from_user32
- __copy_siginfo_from_user32
- copy_siginfo_from_user32
- do_sigtimedwait
- SYSCALL_DEFINE4
- SYSCALL_DEFINE4
- COMPAT_SYSCALL_DEFINE4
- COMPAT_SYSCALL_DEFINE4
- prepare_kill_siginfo
- SYSCALL_DEFINE2
- access_pidfd_pidns
- copy_siginfo_from_user_any
- pidfd_to_pid
- SYSCALL_DEFINE4
- do_send_specific
- do_tkill
- SYSCALL_DEFINE3
- SYSCALL_DEFINE2
- do_rt_sigqueueinfo
- SYSCALL_DEFINE3
- COMPAT_SYSCALL_DEFINE3
- do_rt_tgsigqueueinfo
- SYSCALL_DEFINE4
- COMPAT_SYSCALL_DEFINE4
- kernel_sigaction
- sigaction_compat_abi
- do_sigaction
- do_sigaltstack
- SYSCALL_DEFINE2
- restore_altstack
- __save_altstack
- do_compat_sigaltstack
- COMPAT_SYSCALL_DEFINE2
- compat_restore_altstack
- __compat_save_altstack
- SYSCALL_DEFINE1
- COMPAT_SYSCALL_DEFINE1
- SYSCALL_DEFINE3
- SYSCALL_DEFINE4
- COMPAT_SYSCALL_DEFINE4
- SYSCALL_DEFINE3
- COMPAT_SYSCALL_DEFINE3
- SYSCALL_DEFINE0
- SYSCALL_DEFINE1
- SYSCALL_DEFINE2
- SYSCALL_DEFINE0
- sigsuspend
- SYSCALL_DEFINE2
- COMPAT_SYSCALL_DEFINE2
- SYSCALL_DEFINE1
- SYSCALL_DEFINE3
- arch_vma_name
- siginfo_buildtime_checks
- signals_init
- kdb_send_sig
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sched/cputime.h>
23 #include <linux/file.h>
24 #include <linux/fs.h>
25 #include <linux/proc_fs.h>
26 #include <linux/tty.h>
27 #include <linux/binfmts.h>
28 #include <linux/coredump.h>
29 #include <linux/security.h>
30 #include <linux/syscalls.h>
31 #include <linux/ptrace.h>
32 #include <linux/signal.h>
33 #include <linux/signalfd.h>
34 #include <linux/ratelimit.h>
35 #include <linux/tracehook.h>
36 #include <linux/capability.h>
37 #include <linux/freezer.h>
38 #include <linux/pid_namespace.h>
39 #include <linux/nsproxy.h>
40 #include <linux/user_namespace.h>
41 #include <linux/uprobes.h>
42 #include <linux/compat.h>
43 #include <linux/cn_proc.h>
44 #include <linux/compiler.h>
45 #include <linux/posix-timers.h>
46 #include <linux/livepatch.h>
47 #include <linux/cgroup.h>
48 #include <linux/audit.h>
49
50 #define CREATE_TRACE_POINTS
51 #include <trace/events/signal.h>
52
53 #include <asm/param.h>
54 #include <linux/uaccess.h>
55 #include <asm/unistd.h>
56 #include <asm/siginfo.h>
57 #include <asm/cacheflush.h>
58
59
60
61
62
63 static struct kmem_cache *sigqueue_cachep;
64
65 int print_fatal_signals __read_mostly;
66
67 static void __user *sig_handler(struct task_struct *t, int sig)
68 {
69 return t->sighand->action[sig - 1].sa.sa_handler;
70 }
71
72 static inline bool sig_handler_ignored(void __user *handler, int sig)
73 {
74
75 return handler == SIG_IGN ||
76 (handler == SIG_DFL && sig_kernel_ignore(sig));
77 }
78
79 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
80 {
81 void __user *handler;
82
83 handler = sig_handler(t, sig);
84
85
86 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
87 return true;
88
89 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
90 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
91 return true;
92
93
94 if (unlikely((t->flags & PF_KTHREAD) &&
95 (handler == SIG_KTHREAD_KERNEL) && !force))
96 return true;
97
98 return sig_handler_ignored(handler, sig);
99 }
100
101 static bool sig_ignored(struct task_struct *t, int sig, bool force)
102 {
103
104
105
106
107
108 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
109 return false;
110
111
112
113
114
115
116 if (t->ptrace && sig != SIGKILL)
117 return false;
118
119 return sig_task_ignored(t, sig, force);
120 }
121
122
123
124
125
126 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
127 {
128 unsigned long ready;
129 long i;
130
131 switch (_NSIG_WORDS) {
132 default:
133 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
134 ready |= signal->sig[i] &~ blocked->sig[i];
135 break;
136
137 case 4: ready = signal->sig[3] &~ blocked->sig[3];
138 ready |= signal->sig[2] &~ blocked->sig[2];
139 ready |= signal->sig[1] &~ blocked->sig[1];
140 ready |= signal->sig[0] &~ blocked->sig[0];
141 break;
142
143 case 2: ready = signal->sig[1] &~ blocked->sig[1];
144 ready |= signal->sig[0] &~ blocked->sig[0];
145 break;
146
147 case 1: ready = signal->sig[0] &~ blocked->sig[0];
148 }
149 return ready != 0;
150 }
151
152 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
153
154 static bool recalc_sigpending_tsk(struct task_struct *t)
155 {
156 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
157 PENDING(&t->pending, &t->blocked) ||
158 PENDING(&t->signal->shared_pending, &t->blocked) ||
159 cgroup_task_frozen(t)) {
160 set_tsk_thread_flag(t, TIF_SIGPENDING);
161 return true;
162 }
163
164
165
166
167
168
169 return false;
170 }
171
172
173
174
175
176 void recalc_sigpending_and_wake(struct task_struct *t)
177 {
178 if (recalc_sigpending_tsk(t))
179 signal_wake_up(t, 0);
180 }
181
182 void recalc_sigpending(void)
183 {
184 if (!recalc_sigpending_tsk(current) && !freezing(current) &&
185 !klp_patch_pending(current))
186 clear_thread_flag(TIF_SIGPENDING);
187
188 }
189 EXPORT_SYMBOL(recalc_sigpending);
190
191 void calculate_sigpending(void)
192 {
193
194
195
196 spin_lock_irq(¤t->sighand->siglock);
197 set_tsk_thread_flag(current, TIF_SIGPENDING);
198 recalc_sigpending();
199 spin_unlock_irq(¤t->sighand->siglock);
200 }
201
202
203
204 #define SYNCHRONOUS_MASK \
205 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
206 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
207
208 int next_signal(struct sigpending *pending, sigset_t *mask)
209 {
210 unsigned long i, *s, *m, x;
211 int sig = 0;
212
213 s = pending->signal.sig;
214 m = mask->sig;
215
216
217
218
219
220 x = *s &~ *m;
221 if (x) {
222 if (x & SYNCHRONOUS_MASK)
223 x &= SYNCHRONOUS_MASK;
224 sig = ffz(~x) + 1;
225 return sig;
226 }
227
228 switch (_NSIG_WORDS) {
229 default:
230 for (i = 1; i < _NSIG_WORDS; ++i) {
231 x = *++s &~ *++m;
232 if (!x)
233 continue;
234 sig = ffz(~x) + i*_NSIG_BPW + 1;
235 break;
236 }
237 break;
238
239 case 2:
240 x = s[1] &~ m[1];
241 if (!x)
242 break;
243 sig = ffz(~x) + _NSIG_BPW + 1;
244 break;
245
246 case 1:
247
248 break;
249 }
250
251 return sig;
252 }
253
254 static inline void print_dropped_signal(int sig)
255 {
256 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
257
258 if (!print_fatal_signals)
259 return;
260
261 if (!__ratelimit(&ratelimit_state))
262 return;
263
264 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
265 current->comm, current->pid, sig);
266 }
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
286 {
287 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
288 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
289 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
290
291 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
292 return false;
293
294 if (mask & JOBCTL_STOP_SIGMASK)
295 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
296
297 task->jobctl |= mask;
298 return true;
299 }
300
301
302
303
304
305
306
307
308
309
310
311
312
313 void task_clear_jobctl_trapping(struct task_struct *task)
314 {
315 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
316 task->jobctl &= ~JOBCTL_TRAPPING;
317 smp_mb();
318 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
319 }
320 }
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
338 {
339 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
340
341 if (mask & JOBCTL_STOP_PENDING)
342 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
343
344 task->jobctl &= ~mask;
345
346 if (!(task->jobctl & JOBCTL_PENDING_MASK))
347 task_clear_jobctl_trapping(task);
348 }
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366 static bool task_participate_group_stop(struct task_struct *task)
367 {
368 struct signal_struct *sig = task->signal;
369 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
370
371 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
372
373 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
374
375 if (!consume)
376 return false;
377
378 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
379 sig->group_stop_count--;
380
381
382
383
384
385 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
386 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
387 return true;
388 }
389 return false;
390 }
391
392 void task_join_group_stop(struct task_struct *task)
393 {
394
395 unsigned long jobctl = current->jobctl;
396 if (jobctl & JOBCTL_STOP_PENDING) {
397 struct signal_struct *sig = current->signal;
398 unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK;
399 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
400 if (task_set_jobctl_pending(task, signr | gstop)) {
401 sig->group_stop_count++;
402 }
403 }
404 }
405
406
407
408
409
410
411 static struct sigqueue *
412 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
413 {
414 struct sigqueue *q = NULL;
415 struct user_struct *user;
416 int sigpending;
417
418
419
420
421
422
423
424
425
426 rcu_read_lock();
427 user = __task_cred(t)->user;
428 sigpending = atomic_inc_return(&user->sigpending);
429 if (sigpending == 1)
430 get_uid(user);
431 rcu_read_unlock();
432
433 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
434 q = kmem_cache_alloc(sigqueue_cachep, flags);
435 } else {
436 print_dropped_signal(sig);
437 }
438
439 if (unlikely(q == NULL)) {
440 if (atomic_dec_and_test(&user->sigpending))
441 free_uid(user);
442 } else {
443 INIT_LIST_HEAD(&q->list);
444 q->flags = 0;
445 q->user = user;
446 }
447
448 return q;
449 }
450
451 static void __sigqueue_free(struct sigqueue *q)
452 {
453 if (q->flags & SIGQUEUE_PREALLOC)
454 return;
455 if (atomic_dec_and_test(&q->user->sigpending))
456 free_uid(q->user);
457 kmem_cache_free(sigqueue_cachep, q);
458 }
459
460 void flush_sigqueue(struct sigpending *queue)
461 {
462 struct sigqueue *q;
463
464 sigemptyset(&queue->signal);
465 while (!list_empty(&queue->list)) {
466 q = list_entry(queue->list.next, struct sigqueue , list);
467 list_del_init(&q->list);
468 __sigqueue_free(q);
469 }
470 }
471
472
473
474
475 void flush_signals(struct task_struct *t)
476 {
477 unsigned long flags;
478
479 spin_lock_irqsave(&t->sighand->siglock, flags);
480 clear_tsk_thread_flag(t, TIF_SIGPENDING);
481 flush_sigqueue(&t->pending);
482 flush_sigqueue(&t->signal->shared_pending);
483 spin_unlock_irqrestore(&t->sighand->siglock, flags);
484 }
485 EXPORT_SYMBOL(flush_signals);
486
487 #ifdef CONFIG_POSIX_TIMERS
488 static void __flush_itimer_signals(struct sigpending *pending)
489 {
490 sigset_t signal, retain;
491 struct sigqueue *q, *n;
492
493 signal = pending->signal;
494 sigemptyset(&retain);
495
496 list_for_each_entry_safe(q, n, &pending->list, list) {
497 int sig = q->info.si_signo;
498
499 if (likely(q->info.si_code != SI_TIMER)) {
500 sigaddset(&retain, sig);
501 } else {
502 sigdelset(&signal, sig);
503 list_del_init(&q->list);
504 __sigqueue_free(q);
505 }
506 }
507
508 sigorsets(&pending->signal, &signal, &retain);
509 }
510
511 void flush_itimer_signals(void)
512 {
513 struct task_struct *tsk = current;
514 unsigned long flags;
515
516 spin_lock_irqsave(&tsk->sighand->siglock, flags);
517 __flush_itimer_signals(&tsk->pending);
518 __flush_itimer_signals(&tsk->signal->shared_pending);
519 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
520 }
521 #endif
522
523 void ignore_signals(struct task_struct *t)
524 {
525 int i;
526
527 for (i = 0; i < _NSIG; ++i)
528 t->sighand->action[i].sa.sa_handler = SIG_IGN;
529
530 flush_signals(t);
531 }
532
533
534
535
536
537 void
538 flush_signal_handlers(struct task_struct *t, int force_default)
539 {
540 int i;
541 struct k_sigaction *ka = &t->sighand->action[0];
542 for (i = _NSIG ; i != 0 ; i--) {
543 if (force_default || ka->sa.sa_handler != SIG_IGN)
544 ka->sa.sa_handler = SIG_DFL;
545 ka->sa.sa_flags = 0;
546 #ifdef __ARCH_HAS_SA_RESTORER
547 ka->sa.sa_restorer = NULL;
548 #endif
549 sigemptyset(&ka->sa.sa_mask);
550 ka++;
551 }
552 }
553
554 bool unhandled_signal(struct task_struct *tsk, int sig)
555 {
556 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
557 if (is_global_init(tsk))
558 return true;
559
560 if (handler != SIG_IGN && handler != SIG_DFL)
561 return false;
562
563
564 return !tsk->ptrace;
565 }
566
567 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
568 bool *resched_timer)
569 {
570 struct sigqueue *q, *first = NULL;
571
572
573
574
575
576 list_for_each_entry(q, &list->list, list) {
577 if (q->info.si_signo == sig) {
578 if (first)
579 goto still_pending;
580 first = q;
581 }
582 }
583
584 sigdelset(&list->signal, sig);
585
586 if (first) {
587 still_pending:
588 list_del_init(&first->list);
589 copy_siginfo(info, &first->info);
590
591 *resched_timer =
592 (first->flags & SIGQUEUE_PREALLOC) &&
593 (info->si_code == SI_TIMER) &&
594 (info->si_sys_private);
595
596 __sigqueue_free(first);
597 } else {
598
599
600
601
602
603 clear_siginfo(info);
604 info->si_signo = sig;
605 info->si_errno = 0;
606 info->si_code = SI_USER;
607 info->si_pid = 0;
608 info->si_uid = 0;
609 }
610 }
611
612 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
613 kernel_siginfo_t *info, bool *resched_timer)
614 {
615 int sig = next_signal(pending, mask);
616
617 if (sig)
618 collect_signal(sig, pending, info, resched_timer);
619 return sig;
620 }
621
622
623
624
625
626
627
628 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
629 {
630 bool resched_timer = false;
631 int signr;
632
633
634
635
636 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
637 if (!signr) {
638 signr = __dequeue_signal(&tsk->signal->shared_pending,
639 mask, info, &resched_timer);
640 #ifdef CONFIG_POSIX_TIMERS
641
642
643
644
645
646
647
648
649
650
651
652
653
654 if (unlikely(signr == SIGALRM)) {
655 struct hrtimer *tmr = &tsk->signal->real_timer;
656
657 if (!hrtimer_is_queued(tmr) &&
658 tsk->signal->it_real_incr != 0) {
659 hrtimer_forward(tmr, tmr->base->get_time(),
660 tsk->signal->it_real_incr);
661 hrtimer_restart(tmr);
662 }
663 }
664 #endif
665 }
666
667 recalc_sigpending();
668 if (!signr)
669 return 0;
670
671 if (unlikely(sig_kernel_stop(signr))) {
672
673
674
675
676
677
678
679
680
681
682
683
684 current->jobctl |= JOBCTL_STOP_DEQUEUED;
685 }
686 #ifdef CONFIG_POSIX_TIMERS
687 if (resched_timer) {
688
689
690
691
692
693
694 spin_unlock(&tsk->sighand->siglock);
695 posixtimer_rearm(info);
696 spin_lock(&tsk->sighand->siglock);
697
698
699 info->si_sys_private = 0;
700 }
701 #endif
702 return signr;
703 }
704 EXPORT_SYMBOL_GPL(dequeue_signal);
705
706 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
707 {
708 struct task_struct *tsk = current;
709 struct sigpending *pending = &tsk->pending;
710 struct sigqueue *q, *sync = NULL;
711
712
713
714
715 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
716 return 0;
717
718
719
720
721 list_for_each_entry(q, &pending->list, list) {
722
723 if ((q->info.si_code > SI_USER) &&
724 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
725 sync = q;
726 goto next;
727 }
728 }
729 return 0;
730 next:
731
732
733
734 list_for_each_entry_continue(q, &pending->list, list) {
735 if (q->info.si_signo == sync->info.si_signo)
736 goto still_pending;
737 }
738
739 sigdelset(&pending->signal, sync->info.si_signo);
740 recalc_sigpending();
741 still_pending:
742 list_del_init(&sync->list);
743 copy_siginfo(info, &sync->info);
744 __sigqueue_free(sync);
745 return info->si_signo;
746 }
747
748
749
750
751
752
753
754
755
756
757
758
759 void signal_wake_up_state(struct task_struct *t, unsigned int state)
760 {
761 set_tsk_thread_flag(t, TIF_SIGPENDING);
762
763
764
765
766
767
768
769 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
770 kick_process(t);
771 }
772
773
774
775
776
777
778
779 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
780 {
781 struct sigqueue *q, *n;
782 sigset_t m;
783
784 sigandsets(&m, mask, &s->signal);
785 if (sigisemptyset(&m))
786 return;
787
788 sigandnsets(&s->signal, &s->signal, mask);
789 list_for_each_entry_safe(q, n, &s->list, list) {
790 if (sigismember(mask, q->info.si_signo)) {
791 list_del_init(&q->list);
792 __sigqueue_free(q);
793 }
794 }
795 }
796
797 static inline int is_si_special(const struct kernel_siginfo *info)
798 {
799 return info <= SEND_SIG_PRIV;
800 }
801
802 static inline bool si_fromuser(const struct kernel_siginfo *info)
803 {
804 return info == SEND_SIG_NOINFO ||
805 (!is_si_special(info) && SI_FROMUSER(info));
806 }
807
808
809
810
811 static bool kill_ok_by_cred(struct task_struct *t)
812 {
813 const struct cred *cred = current_cred();
814 const struct cred *tcred = __task_cred(t);
815
816 return uid_eq(cred->euid, tcred->suid) ||
817 uid_eq(cred->euid, tcred->uid) ||
818 uid_eq(cred->uid, tcred->suid) ||
819 uid_eq(cred->uid, tcred->uid) ||
820 ns_capable(tcred->user_ns, CAP_KILL);
821 }
822
823
824
825
826
827 static int check_kill_permission(int sig, struct kernel_siginfo *info,
828 struct task_struct *t)
829 {
830 struct pid *sid;
831 int error;
832
833 if (!valid_signal(sig))
834 return -EINVAL;
835
836 if (!si_fromuser(info))
837 return 0;
838
839 error = audit_signal_info(sig, t);
840 if (error)
841 return error;
842
843 if (!same_thread_group(current, t) &&
844 !kill_ok_by_cred(t)) {
845 switch (sig) {
846 case SIGCONT:
847 sid = task_session(t);
848
849
850
851
852 if (!sid || sid == task_session(current))
853 break;
854
855 default:
856 return -EPERM;
857 }
858 }
859
860 return security_task_kill(t, info, sig, NULL);
861 }
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880 static void ptrace_trap_notify(struct task_struct *t)
881 {
882 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
883 assert_spin_locked(&t->sighand->siglock);
884
885 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
886 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
887 }
888
889
890
891
892
893
894
895
896
897
898
899 static bool prepare_signal(int sig, struct task_struct *p, bool force)
900 {
901 struct signal_struct *signal = p->signal;
902 struct task_struct *t;
903 sigset_t flush;
904
905 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
906 if (!(signal->flags & SIGNAL_GROUP_EXIT))
907 return sig == SIGKILL;
908
909
910
911 } else if (sig_kernel_stop(sig)) {
912
913
914
915 siginitset(&flush, sigmask(SIGCONT));
916 flush_sigqueue_mask(&flush, &signal->shared_pending);
917 for_each_thread(p, t)
918 flush_sigqueue_mask(&flush, &t->pending);
919 } else if (sig == SIGCONT) {
920 unsigned int why;
921
922
923
924 siginitset(&flush, SIG_KERNEL_STOP_MASK);
925 flush_sigqueue_mask(&flush, &signal->shared_pending);
926 for_each_thread(p, t) {
927 flush_sigqueue_mask(&flush, &t->pending);
928 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
929 if (likely(!(t->ptrace & PT_SEIZED)))
930 wake_up_state(t, __TASK_STOPPED);
931 else
932 ptrace_trap_notify(t);
933 }
934
935
936
937
938
939
940
941
942
943 why = 0;
944 if (signal->flags & SIGNAL_STOP_STOPPED)
945 why |= SIGNAL_CLD_CONTINUED;
946 else if (signal->group_stop_count)
947 why |= SIGNAL_CLD_STOPPED;
948
949 if (why) {
950
951
952
953
954
955 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
956 signal->group_stop_count = 0;
957 signal->group_exit_code = 0;
958 }
959 }
960
961 return !sig_ignored(p, sig, force);
962 }
963
964
965
966
967
968
969
970
971
972 static inline bool wants_signal(int sig, struct task_struct *p)
973 {
974 if (sigismember(&p->blocked, sig))
975 return false;
976
977 if (p->flags & PF_EXITING)
978 return false;
979
980 if (sig == SIGKILL)
981 return true;
982
983 if (task_is_stopped_or_traced(p))
984 return false;
985
986 return task_curr(p) || !signal_pending(p);
987 }
988
989 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
990 {
991 struct signal_struct *signal = p->signal;
992 struct task_struct *t;
993
994
995
996
997
998
999
1000 if (wants_signal(sig, p))
1001 t = p;
1002 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1003
1004
1005
1006
1007 return;
1008 else {
1009
1010
1011
1012 t = signal->curr_target;
1013 while (!wants_signal(sig, t)) {
1014 t = next_thread(t);
1015 if (t == signal->curr_target)
1016
1017
1018
1019
1020
1021 return;
1022 }
1023 signal->curr_target = t;
1024 }
1025
1026
1027
1028
1029
1030 if (sig_fatal(p, sig) &&
1031 !(signal->flags & SIGNAL_GROUP_EXIT) &&
1032 !sigismember(&t->real_blocked, sig) &&
1033 (sig == SIGKILL || !p->ptrace)) {
1034
1035
1036
1037 if (!sig_kernel_coredump(sig)) {
1038
1039
1040
1041
1042
1043
1044 signal->flags = SIGNAL_GROUP_EXIT;
1045 signal->group_exit_code = sig;
1046 signal->group_stop_count = 0;
1047 t = p;
1048 do {
1049 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1050 sigaddset(&t->pending.signal, SIGKILL);
1051 signal_wake_up(t, 1);
1052 } while_each_thread(p, t);
1053 return;
1054 }
1055 }
1056
1057
1058
1059
1060
1061 signal_wake_up(t, sig == SIGKILL);
1062 return;
1063 }
1064
1065 static inline bool legacy_queue(struct sigpending *signals, int sig)
1066 {
1067 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1068 }
1069
1070 static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1071 enum pid_type type, bool force)
1072 {
1073 struct sigpending *pending;
1074 struct sigqueue *q;
1075 int override_rlimit;
1076 int ret = 0, result;
1077
1078 assert_spin_locked(&t->sighand->siglock);
1079
1080 result = TRACE_SIGNAL_IGNORED;
1081 if (!prepare_signal(sig, t, force))
1082 goto ret;
1083
1084 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1085
1086
1087
1088
1089
1090 result = TRACE_SIGNAL_ALREADY_PENDING;
1091 if (legacy_queue(pending, sig))
1092 goto ret;
1093
1094 result = TRACE_SIGNAL_DELIVERED;
1095
1096
1097
1098 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1099 goto out_set;
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110 if (sig < SIGRTMIN)
1111 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1112 else
1113 override_rlimit = 0;
1114
1115 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1116 if (q) {
1117 list_add_tail(&q->list, &pending->list);
1118 switch ((unsigned long) info) {
1119 case (unsigned long) SEND_SIG_NOINFO:
1120 clear_siginfo(&q->info);
1121 q->info.si_signo = sig;
1122 q->info.si_errno = 0;
1123 q->info.si_code = SI_USER;
1124 q->info.si_pid = task_tgid_nr_ns(current,
1125 task_active_pid_ns(t));
1126 rcu_read_lock();
1127 q->info.si_uid =
1128 from_kuid_munged(task_cred_xxx(t, user_ns),
1129 current_uid());
1130 rcu_read_unlock();
1131 break;
1132 case (unsigned long) SEND_SIG_PRIV:
1133 clear_siginfo(&q->info);
1134 q->info.si_signo = sig;
1135 q->info.si_errno = 0;
1136 q->info.si_code = SI_KERNEL;
1137 q->info.si_pid = 0;
1138 q->info.si_uid = 0;
1139 break;
1140 default:
1141 copy_siginfo(&q->info, info);
1142 break;
1143 }
1144 } else if (!is_si_special(info) &&
1145 sig >= SIGRTMIN && info->si_code != SI_USER) {
1146
1147
1148
1149
1150
1151 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1152 ret = -EAGAIN;
1153 goto ret;
1154 } else {
1155
1156
1157
1158
1159 result = TRACE_SIGNAL_LOSE_INFO;
1160 }
1161
1162 out_set:
1163 signalfd_notify(t, sig);
1164 sigaddset(&pending->signal, sig);
1165
1166
1167 if (type > PIDTYPE_TGID) {
1168 struct multiprocess_signals *delayed;
1169 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1170 sigset_t *signal = &delayed->signal;
1171
1172 if (sig == SIGCONT)
1173 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1174 else if (sig_kernel_stop(sig))
1175 sigdelset(signal, SIGCONT);
1176 sigaddset(signal, sig);
1177 }
1178 }
1179
1180 complete_signal(sig, t, type);
1181 ret:
1182 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1183 return ret;
1184 }
1185
1186 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1187 {
1188 bool ret = false;
1189 switch (siginfo_layout(info->si_signo, info->si_code)) {
1190 case SIL_KILL:
1191 case SIL_CHLD:
1192 case SIL_RT:
1193 ret = true;
1194 break;
1195 case SIL_TIMER:
1196 case SIL_POLL:
1197 case SIL_FAULT:
1198 case SIL_FAULT_MCEERR:
1199 case SIL_FAULT_BNDERR:
1200 case SIL_FAULT_PKUERR:
1201 case SIL_SYS:
1202 ret = false;
1203 break;
1204 }
1205 return ret;
1206 }
1207
1208 static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1209 enum pid_type type)
1210 {
1211
1212 bool force = false;
1213
1214 if (info == SEND_SIG_NOINFO) {
1215
1216 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1217 } else if (info == SEND_SIG_PRIV) {
1218
1219 force = true;
1220 } else if (has_si_pid_and_uid(info)) {
1221
1222 struct user_namespace *t_user_ns;
1223
1224 rcu_read_lock();
1225 t_user_ns = task_cred_xxx(t, user_ns);
1226 if (current_user_ns() != t_user_ns) {
1227 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1228 info->si_uid = from_kuid_munged(t_user_ns, uid);
1229 }
1230 rcu_read_unlock();
1231
1232
1233 force = (info->si_code == SI_KERNEL);
1234
1235
1236 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1237 info->si_pid = 0;
1238 force = true;
1239 }
1240 }
1241 return __send_signal(sig, info, t, type, force);
1242 }
1243
1244 static void print_fatal_signal(int signr)
1245 {
1246 struct pt_regs *regs = signal_pt_regs();
1247 pr_info("potentially unexpected fatal signal %d.\n", signr);
1248
1249 #if defined(__i386__) && !defined(__arch_um__)
1250 pr_info("code at %08lx: ", regs->ip);
1251 {
1252 int i;
1253 for (i = 0; i < 16; i++) {
1254 unsigned char insn;
1255
1256 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1257 break;
1258 pr_cont("%02x ", insn);
1259 }
1260 }
1261 pr_cont("\n");
1262 #endif
1263 preempt_disable();
1264 show_regs(regs);
1265 preempt_enable();
1266 }
1267
1268 static int __init setup_print_fatal_signals(char *str)
1269 {
1270 get_option (&str, &print_fatal_signals);
1271
1272 return 1;
1273 }
1274
1275 __setup("print-fatal-signals=", setup_print_fatal_signals);
1276
1277 int
1278 __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1279 {
1280 return send_signal(sig, info, p, PIDTYPE_TGID);
1281 }
1282
1283 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1284 enum pid_type type)
1285 {
1286 unsigned long flags;
1287 int ret = -ESRCH;
1288
1289 if (lock_task_sighand(p, &flags)) {
1290 ret = send_signal(sig, info, p, type);
1291 unlock_task_sighand(p, &flags);
1292 }
1293
1294 return ret;
1295 }
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308 static int
1309 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t)
1310 {
1311 unsigned long int flags;
1312 int ret, blocked, ignored;
1313 struct k_sigaction *action;
1314 int sig = info->si_signo;
1315
1316 spin_lock_irqsave(&t->sighand->siglock, flags);
1317 action = &t->sighand->action[sig-1];
1318 ignored = action->sa.sa_handler == SIG_IGN;
1319 blocked = sigismember(&t->blocked, sig);
1320 if (blocked || ignored) {
1321 action->sa.sa_handler = SIG_DFL;
1322 if (blocked) {
1323 sigdelset(&t->blocked, sig);
1324 recalc_sigpending_and_wake(t);
1325 }
1326 }
1327
1328
1329
1330
1331 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1332 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1333 ret = send_signal(sig, info, t, PIDTYPE_PID);
1334 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1335
1336 return ret;
1337 }
1338
1339 int force_sig_info(struct kernel_siginfo *info)
1340 {
1341 return force_sig_info_to_task(info, current);
1342 }
1343
1344
1345
1346
1347 int zap_other_threads(struct task_struct *p)
1348 {
1349 struct task_struct *t = p;
1350 int count = 0;
1351
1352 p->signal->group_stop_count = 0;
1353
1354 while_each_thread(p, t) {
1355 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1356 count++;
1357
1358
1359 if (t->exit_state)
1360 continue;
1361 sigaddset(&t->pending.signal, SIGKILL);
1362 signal_wake_up(t, 1);
1363 }
1364
1365 return count;
1366 }
1367
1368 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1369 unsigned long *flags)
1370 {
1371 struct sighand_struct *sighand;
1372
1373 rcu_read_lock();
1374 for (;;) {
1375 sighand = rcu_dereference(tsk->sighand);
1376 if (unlikely(sighand == NULL))
1377 break;
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390 spin_lock_irqsave(&sighand->siglock, *flags);
1391 if (likely(sighand == tsk->sighand))
1392 break;
1393 spin_unlock_irqrestore(&sighand->siglock, *flags);
1394 }
1395 rcu_read_unlock();
1396
1397 return sighand;
1398 }
1399
1400
1401
1402
1403 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1404 struct task_struct *p, enum pid_type type)
1405 {
1406 int ret;
1407
1408 rcu_read_lock();
1409 ret = check_kill_permission(sig, info, p);
1410 rcu_read_unlock();
1411
1412 if (!ret && sig)
1413 ret = do_send_sig_info(sig, info, p, type);
1414
1415 return ret;
1416 }
1417
1418
1419
1420
1421
1422
1423 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1424 {
1425 struct task_struct *p = NULL;
1426 int retval, success;
1427
1428 success = 0;
1429 retval = -ESRCH;
1430 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1431 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1432 success |= !err;
1433 retval = err;
1434 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1435 return success ? 0 : retval;
1436 }
1437
1438 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1439 {
1440 int error = -ESRCH;
1441 struct task_struct *p;
1442
1443 for (;;) {
1444 rcu_read_lock();
1445 p = pid_task(pid, PIDTYPE_PID);
1446 if (p)
1447 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1448 rcu_read_unlock();
1449 if (likely(!p || error != -ESRCH))
1450 return error;
1451
1452
1453
1454
1455
1456
1457 }
1458 }
1459
1460 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1461 {
1462 int error;
1463 rcu_read_lock();
1464 error = kill_pid_info(sig, info, find_vpid(pid));
1465 rcu_read_unlock();
1466 return error;
1467 }
1468
1469 static inline bool kill_as_cred_perm(const struct cred *cred,
1470 struct task_struct *target)
1471 {
1472 const struct cred *pcred = __task_cred(target);
1473
1474 return uid_eq(cred->euid, pcred->suid) ||
1475 uid_eq(cred->euid, pcred->uid) ||
1476 uid_eq(cred->uid, pcred->suid) ||
1477 uid_eq(cred->uid, pcred->uid);
1478 }
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1506 struct pid *pid, const struct cred *cred)
1507 {
1508 struct kernel_siginfo info;
1509 struct task_struct *p;
1510 unsigned long flags;
1511 int ret = -EINVAL;
1512
1513 if (!valid_signal(sig))
1514 return ret;
1515
1516 clear_siginfo(&info);
1517 info.si_signo = sig;
1518 info.si_errno = errno;
1519 info.si_code = SI_ASYNCIO;
1520 *((sigval_t *)&info.si_pid) = addr;
1521
1522 rcu_read_lock();
1523 p = pid_task(pid, PIDTYPE_PID);
1524 if (!p) {
1525 ret = -ESRCH;
1526 goto out_unlock;
1527 }
1528 if (!kill_as_cred_perm(cred, p)) {
1529 ret = -EPERM;
1530 goto out_unlock;
1531 }
1532 ret = security_task_kill(p, &info, sig, cred);
1533 if (ret)
1534 goto out_unlock;
1535
1536 if (sig) {
1537 if (lock_task_sighand(p, &flags)) {
1538 ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
1539 unlock_task_sighand(p, &flags);
1540 } else
1541 ret = -ESRCH;
1542 }
1543 out_unlock:
1544 rcu_read_unlock();
1545 return ret;
1546 }
1547 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1548
1549
1550
1551
1552
1553
1554
1555
1556 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1557 {
1558 int ret;
1559
1560 if (pid > 0) {
1561 rcu_read_lock();
1562 ret = kill_pid_info(sig, info, find_vpid(pid));
1563 rcu_read_unlock();
1564 return ret;
1565 }
1566
1567
1568 if (pid == INT_MIN)
1569 return -ESRCH;
1570
1571 read_lock(&tasklist_lock);
1572 if (pid != -1) {
1573 ret = __kill_pgrp_info(sig, info,
1574 pid ? find_vpid(-pid) : task_pgrp(current));
1575 } else {
1576 int retval = 0, count = 0;
1577 struct task_struct * p;
1578
1579 for_each_process(p) {
1580 if (task_pid_vnr(p) > 1 &&
1581 !same_thread_group(p, current)) {
1582 int err = group_send_sig_info(sig, info, p,
1583 PIDTYPE_MAX);
1584 ++count;
1585 if (err != -EPERM)
1586 retval = err;
1587 }
1588 }
1589 ret = count ? retval : -ESRCH;
1590 }
1591 read_unlock(&tasklist_lock);
1592
1593 return ret;
1594 }
1595
1596
1597
1598
1599
1600 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1601 {
1602
1603
1604
1605
1606 if (!valid_signal(sig))
1607 return -EINVAL;
1608
1609 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1610 }
1611 EXPORT_SYMBOL(send_sig_info);
1612
1613 #define __si_special(priv) \
1614 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1615
1616 int
1617 send_sig(int sig, struct task_struct *p, int priv)
1618 {
1619 return send_sig_info(sig, __si_special(priv), p);
1620 }
1621 EXPORT_SYMBOL(send_sig);
1622
1623 void force_sig(int sig)
1624 {
1625 struct kernel_siginfo info;
1626
1627 clear_siginfo(&info);
1628 info.si_signo = sig;
1629 info.si_errno = 0;
1630 info.si_code = SI_KERNEL;
1631 info.si_pid = 0;
1632 info.si_uid = 0;
1633 force_sig_info(&info);
1634 }
1635 EXPORT_SYMBOL(force_sig);
1636
1637
1638
1639
1640
1641
1642
1643 void force_sigsegv(int sig)
1644 {
1645 struct task_struct *p = current;
1646
1647 if (sig == SIGSEGV) {
1648 unsigned long flags;
1649 spin_lock_irqsave(&p->sighand->siglock, flags);
1650 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1651 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1652 }
1653 force_sig(SIGSEGV);
1654 }
1655
1656 int force_sig_fault_to_task(int sig, int code, void __user *addr
1657 ___ARCH_SI_TRAPNO(int trapno)
1658 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1659 , struct task_struct *t)
1660 {
1661 struct kernel_siginfo info;
1662
1663 clear_siginfo(&info);
1664 info.si_signo = sig;
1665 info.si_errno = 0;
1666 info.si_code = code;
1667 info.si_addr = addr;
1668 #ifdef __ARCH_SI_TRAPNO
1669 info.si_trapno = trapno;
1670 #endif
1671 #ifdef __ia64__
1672 info.si_imm = imm;
1673 info.si_flags = flags;
1674 info.si_isr = isr;
1675 #endif
1676 return force_sig_info_to_task(&info, t);
1677 }
1678
1679 int force_sig_fault(int sig, int code, void __user *addr
1680 ___ARCH_SI_TRAPNO(int trapno)
1681 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1682 {
1683 return force_sig_fault_to_task(sig, code, addr
1684 ___ARCH_SI_TRAPNO(trapno)
1685 ___ARCH_SI_IA64(imm, flags, isr), current);
1686 }
1687
1688 int send_sig_fault(int sig, int code, void __user *addr
1689 ___ARCH_SI_TRAPNO(int trapno)
1690 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1691 , struct task_struct *t)
1692 {
1693 struct kernel_siginfo info;
1694
1695 clear_siginfo(&info);
1696 info.si_signo = sig;
1697 info.si_errno = 0;
1698 info.si_code = code;
1699 info.si_addr = addr;
1700 #ifdef __ARCH_SI_TRAPNO
1701 info.si_trapno = trapno;
1702 #endif
1703 #ifdef __ia64__
1704 info.si_imm = imm;
1705 info.si_flags = flags;
1706 info.si_isr = isr;
1707 #endif
1708 return send_sig_info(info.si_signo, &info, t);
1709 }
1710
1711 int force_sig_mceerr(int code, void __user *addr, short lsb)
1712 {
1713 struct kernel_siginfo info;
1714
1715 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1716 clear_siginfo(&info);
1717 info.si_signo = SIGBUS;
1718 info.si_errno = 0;
1719 info.si_code = code;
1720 info.si_addr = addr;
1721 info.si_addr_lsb = lsb;
1722 return force_sig_info(&info);
1723 }
1724
1725 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1726 {
1727 struct kernel_siginfo info;
1728
1729 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1730 clear_siginfo(&info);
1731 info.si_signo = SIGBUS;
1732 info.si_errno = 0;
1733 info.si_code = code;
1734 info.si_addr = addr;
1735 info.si_addr_lsb = lsb;
1736 return send_sig_info(info.si_signo, &info, t);
1737 }
1738 EXPORT_SYMBOL(send_sig_mceerr);
1739
1740 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1741 {
1742 struct kernel_siginfo info;
1743
1744 clear_siginfo(&info);
1745 info.si_signo = SIGSEGV;
1746 info.si_errno = 0;
1747 info.si_code = SEGV_BNDERR;
1748 info.si_addr = addr;
1749 info.si_lower = lower;
1750 info.si_upper = upper;
1751 return force_sig_info(&info);
1752 }
1753
1754 #ifdef SEGV_PKUERR
1755 int force_sig_pkuerr(void __user *addr, u32 pkey)
1756 {
1757 struct kernel_siginfo info;
1758
1759 clear_siginfo(&info);
1760 info.si_signo = SIGSEGV;
1761 info.si_errno = 0;
1762 info.si_code = SEGV_PKUERR;
1763 info.si_addr = addr;
1764 info.si_pkey = pkey;
1765 return force_sig_info(&info);
1766 }
1767 #endif
1768
1769
1770
1771
1772 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1773 {
1774 struct kernel_siginfo info;
1775
1776 clear_siginfo(&info);
1777 info.si_signo = SIGTRAP;
1778 info.si_errno = errno;
1779 info.si_code = TRAP_HWBKPT;
1780 info.si_addr = addr;
1781 return force_sig_info(&info);
1782 }
1783
1784 int kill_pgrp(struct pid *pid, int sig, int priv)
1785 {
1786 int ret;
1787
1788 read_lock(&tasklist_lock);
1789 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1790 read_unlock(&tasklist_lock);
1791
1792 return ret;
1793 }
1794 EXPORT_SYMBOL(kill_pgrp);
1795
1796 int kill_pid(struct pid *pid, int sig, int priv)
1797 {
1798 return kill_pid_info(sig, __si_special(priv), pid);
1799 }
1800 EXPORT_SYMBOL(kill_pid);
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811 struct sigqueue *sigqueue_alloc(void)
1812 {
1813 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1814
1815 if (q)
1816 q->flags |= SIGQUEUE_PREALLOC;
1817
1818 return q;
1819 }
1820
1821 void sigqueue_free(struct sigqueue *q)
1822 {
1823 unsigned long flags;
1824 spinlock_t *lock = ¤t->sighand->siglock;
1825
1826 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1827
1828
1829
1830
1831
1832 spin_lock_irqsave(lock, flags);
1833 q->flags &= ~SIGQUEUE_PREALLOC;
1834
1835
1836
1837
1838 if (!list_empty(&q->list))
1839 q = NULL;
1840 spin_unlock_irqrestore(lock, flags);
1841
1842 if (q)
1843 __sigqueue_free(q);
1844 }
1845
1846 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1847 {
1848 int sig = q->info.si_signo;
1849 struct sigpending *pending;
1850 struct task_struct *t;
1851 unsigned long flags;
1852 int ret, result;
1853
1854 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1855
1856 ret = -1;
1857 rcu_read_lock();
1858 t = pid_task(pid, type);
1859 if (!t || !likely(lock_task_sighand(t, &flags)))
1860 goto ret;
1861
1862 ret = 1;
1863 result = TRACE_SIGNAL_IGNORED;
1864 if (!prepare_signal(sig, t, false))
1865 goto out;
1866
1867 ret = 0;
1868 if (unlikely(!list_empty(&q->list))) {
1869
1870
1871
1872
1873 BUG_ON(q->info.si_code != SI_TIMER);
1874 q->info.si_overrun++;
1875 result = TRACE_SIGNAL_ALREADY_PENDING;
1876 goto out;
1877 }
1878 q->info.si_overrun = 0;
1879
1880 signalfd_notify(t, sig);
1881 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1882 list_add_tail(&q->list, &pending->list);
1883 sigaddset(&pending->signal, sig);
1884 complete_signal(sig, t, type);
1885 result = TRACE_SIGNAL_DELIVERED;
1886 out:
1887 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1888 unlock_task_sighand(t, &flags);
1889 ret:
1890 rcu_read_unlock();
1891 return ret;
1892 }
1893
1894 static void do_notify_pidfd(struct task_struct *task)
1895 {
1896 struct pid *pid;
1897
1898 WARN_ON(task->exit_state == 0);
1899 pid = task_pid(task);
1900 wake_up_all(&pid->wait_pidfd);
1901 }
1902
1903
1904
1905
1906
1907
1908
1909
1910 bool do_notify_parent(struct task_struct *tsk, int sig)
1911 {
1912 struct kernel_siginfo info;
1913 unsigned long flags;
1914 struct sighand_struct *psig;
1915 bool autoreap = false;
1916 u64 utime, stime;
1917
1918 BUG_ON(sig == -1);
1919
1920
1921 BUG_ON(task_is_stopped_or_traced(tsk));
1922
1923 BUG_ON(!tsk->ptrace &&
1924 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1925
1926
1927 do_notify_pidfd(tsk);
1928
1929 if (sig != SIGCHLD) {
1930
1931
1932
1933
1934 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
1935 sig = SIGCHLD;
1936 }
1937
1938 clear_siginfo(&info);
1939 info.si_signo = sig;
1940 info.si_errno = 0;
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952 rcu_read_lock();
1953 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1954 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1955 task_uid(tsk));
1956 rcu_read_unlock();
1957
1958 task_cputime(tsk, &utime, &stime);
1959 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1960 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1961
1962 info.si_status = tsk->exit_code & 0x7f;
1963 if (tsk->exit_code & 0x80)
1964 info.si_code = CLD_DUMPED;
1965 else if (tsk->exit_code & 0x7f)
1966 info.si_code = CLD_KILLED;
1967 else {
1968 info.si_code = CLD_EXITED;
1969 info.si_status = tsk->exit_code >> 8;
1970 }
1971
1972 psig = tsk->parent->sighand;
1973 spin_lock_irqsave(&psig->siglock, flags);
1974 if (!tsk->ptrace && sig == SIGCHLD &&
1975 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1976 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992 autoreap = true;
1993 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1994 sig = 0;
1995 }
1996
1997
1998
1999
2000 if (valid_signal(sig) && sig)
2001 __send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2002 __wake_up_parent(tsk, tsk->parent);
2003 spin_unlock_irqrestore(&psig->siglock, flags);
2004
2005 return autoreap;
2006 }
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021 static void do_notify_parent_cldstop(struct task_struct *tsk,
2022 bool for_ptracer, int why)
2023 {
2024 struct kernel_siginfo info;
2025 unsigned long flags;
2026 struct task_struct *parent;
2027 struct sighand_struct *sighand;
2028 u64 utime, stime;
2029
2030 if (for_ptracer) {
2031 parent = tsk->parent;
2032 } else {
2033 tsk = tsk->group_leader;
2034 parent = tsk->real_parent;
2035 }
2036
2037 clear_siginfo(&info);
2038 info.si_signo = SIGCHLD;
2039 info.si_errno = 0;
2040
2041
2042
2043 rcu_read_lock();
2044 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2045 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2046 rcu_read_unlock();
2047
2048 task_cputime(tsk, &utime, &stime);
2049 info.si_utime = nsec_to_clock_t(utime);
2050 info.si_stime = nsec_to_clock_t(stime);
2051
2052 info.si_code = why;
2053 switch (why) {
2054 case CLD_CONTINUED:
2055 info.si_status = SIGCONT;
2056 break;
2057 case CLD_STOPPED:
2058 info.si_status = tsk->signal->group_exit_code & 0x7f;
2059 break;
2060 case CLD_TRAPPED:
2061 info.si_status = tsk->exit_code & 0x7f;
2062 break;
2063 default:
2064 BUG();
2065 }
2066
2067 sighand = parent->sighand;
2068 spin_lock_irqsave(&sighand->siglock, flags);
2069 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2070 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2071 __group_send_sig_info(SIGCHLD, &info, parent);
2072
2073
2074
2075 __wake_up_parent(tsk, parent);
2076 spin_unlock_irqrestore(&sighand->siglock, flags);
2077 }
2078
2079 static inline bool may_ptrace_stop(void)
2080 {
2081 if (!likely(current->ptrace))
2082 return false;
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096 if (unlikely(current->mm->core_state) &&
2097 unlikely(current->mm == current->parent->mm))
2098 return false;
2099
2100 return true;
2101 }
2102
2103
2104
2105
2106
2107 static bool sigkill_pending(struct task_struct *tsk)
2108 {
2109 return sigismember(&tsk->pending.signal, SIGKILL) ||
2110 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
2111 }
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124 static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
2125 __releases(¤t->sighand->siglock)
2126 __acquires(¤t->sighand->siglock)
2127 {
2128 bool gstop_done = false;
2129
2130 if (arch_ptrace_stop_needed(exit_code, info)) {
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142 spin_unlock_irq(¤t->sighand->siglock);
2143 arch_ptrace_stop(exit_code, info);
2144 spin_lock_irq(¤t->sighand->siglock);
2145 if (sigkill_pending(current))
2146 return;
2147 }
2148
2149 set_special_state(TASK_TRACED);
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169 smp_wmb();
2170
2171 current->last_siginfo = info;
2172 current->exit_code = exit_code;
2173
2174
2175
2176
2177
2178
2179
2180
2181 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2182 gstop_done = task_participate_group_stop(current);
2183
2184
2185 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2186 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2187 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2188
2189
2190 task_clear_jobctl_trapping(current);
2191
2192 spin_unlock_irq(¤t->sighand->siglock);
2193 read_lock(&tasklist_lock);
2194 if (may_ptrace_stop()) {
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205 do_notify_parent_cldstop(current, true, why);
2206 if (gstop_done && ptrace_reparented(current))
2207 do_notify_parent_cldstop(current, false, why);
2208
2209
2210
2211
2212
2213
2214
2215 preempt_disable();
2216 read_unlock(&tasklist_lock);
2217 cgroup_enter_frozen();
2218 preempt_enable_no_resched();
2219 freezable_schedule();
2220 cgroup_leave_frozen(true);
2221 } else {
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232 if (gstop_done)
2233 do_notify_parent_cldstop(current, false, why);
2234
2235
2236 __set_current_state(TASK_RUNNING);
2237 if (clear_code)
2238 current->exit_code = 0;
2239 read_unlock(&tasklist_lock);
2240 }
2241
2242
2243
2244
2245
2246
2247 spin_lock_irq(¤t->sighand->siglock);
2248 current->last_siginfo = NULL;
2249
2250
2251 current->jobctl &= ~JOBCTL_LISTENING;
2252
2253
2254
2255
2256
2257
2258 recalc_sigpending_tsk(current);
2259 }
2260
2261 static void ptrace_do_notify(int signr, int exit_code, int why)
2262 {
2263 kernel_siginfo_t info;
2264
2265 clear_siginfo(&info);
2266 info.si_signo = signr;
2267 info.si_code = exit_code;
2268 info.si_pid = task_pid_vnr(current);
2269 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2270
2271
2272 ptrace_stop(exit_code, why, 1, &info);
2273 }
2274
2275 void ptrace_notify(int exit_code)
2276 {
2277 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2278 if (unlikely(current->task_works))
2279 task_work_run();
2280
2281 spin_lock_irq(¤t->sighand->siglock);
2282 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2283 spin_unlock_irq(¤t->sighand->siglock);
2284 }
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308 static bool do_signal_stop(int signr)
2309 __releases(¤t->sighand->siglock)
2310 {
2311 struct signal_struct *sig = current->signal;
2312
2313 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2314 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2315 struct task_struct *t;
2316
2317
2318 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2319
2320 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2321 unlikely(signal_group_exit(sig)))
2322 return false;
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2343 sig->group_exit_code = signr;
2344
2345 sig->group_stop_count = 0;
2346
2347 if (task_set_jobctl_pending(current, signr | gstop))
2348 sig->group_stop_count++;
2349
2350 t = current;
2351 while_each_thread(current, t) {
2352
2353
2354
2355
2356
2357 if (!task_is_stopped(t) &&
2358 task_set_jobctl_pending(t, signr | gstop)) {
2359 sig->group_stop_count++;
2360 if (likely(!(t->ptrace & PT_SEIZED)))
2361 signal_wake_up(t, 0);
2362 else
2363 ptrace_trap_notify(t);
2364 }
2365 }
2366 }
2367
2368 if (likely(!current->ptrace)) {
2369 int notify = 0;
2370
2371
2372
2373
2374
2375
2376 if (task_participate_group_stop(current))
2377 notify = CLD_STOPPED;
2378
2379 set_special_state(TASK_STOPPED);
2380 spin_unlock_irq(¤t->sighand->siglock);
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391 if (notify) {
2392 read_lock(&tasklist_lock);
2393 do_notify_parent_cldstop(current, false, notify);
2394 read_unlock(&tasklist_lock);
2395 }
2396
2397
2398 cgroup_enter_frozen();
2399 freezable_schedule();
2400 return true;
2401 } else {
2402
2403
2404
2405
2406 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2407 return false;
2408 }
2409 }
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426 static void do_jobctl_trap(void)
2427 {
2428 struct signal_struct *signal = current->signal;
2429 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2430
2431 if (current->ptrace & PT_SEIZED) {
2432 if (!signal->group_stop_count &&
2433 !(signal->flags & SIGNAL_STOP_STOPPED))
2434 signr = SIGTRAP;
2435 WARN_ON_ONCE(!signr);
2436 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2437 CLD_STOPPED);
2438 } else {
2439 WARN_ON_ONCE(!signr);
2440 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2441 current->exit_code = 0;
2442 }
2443 }
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455 static void do_freezer_trap(void)
2456 __releases(¤t->sighand->siglock)
2457 {
2458
2459
2460
2461
2462
2463 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2464 JOBCTL_TRAP_FREEZE) {
2465 spin_unlock_irq(¤t->sighand->siglock);
2466 return;
2467 }
2468
2469
2470
2471
2472
2473
2474
2475 __set_current_state(TASK_INTERRUPTIBLE);
2476 clear_thread_flag(TIF_SIGPENDING);
2477 spin_unlock_irq(¤t->sighand->siglock);
2478 cgroup_enter_frozen();
2479 freezable_schedule();
2480 }
2481
2482 static int ptrace_signal(int signr, kernel_siginfo_t *info)
2483 {
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2494 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2495
2496
2497 signr = current->exit_code;
2498 if (signr == 0)
2499 return signr;
2500
2501 current->exit_code = 0;
2502
2503
2504
2505
2506
2507
2508
2509 if (signr != info->si_signo) {
2510 clear_siginfo(info);
2511 info->si_signo = signr;
2512 info->si_errno = 0;
2513 info->si_code = SI_USER;
2514 rcu_read_lock();
2515 info->si_pid = task_pid_vnr(current->parent);
2516 info->si_uid = from_kuid_munged(current_user_ns(),
2517 task_uid(current->parent));
2518 rcu_read_unlock();
2519 }
2520
2521
2522 if (sigismember(¤t->blocked, signr)) {
2523 send_signal(signr, info, current, PIDTYPE_PID);
2524 signr = 0;
2525 }
2526
2527 return signr;
2528 }
2529
2530 bool get_signal(struct ksignal *ksig)
2531 {
2532 struct sighand_struct *sighand = current->sighand;
2533 struct signal_struct *signal = current->signal;
2534 int signr;
2535
2536 if (unlikely(current->task_works))
2537 task_work_run();
2538
2539 if (unlikely(uprobe_deny_signal()))
2540 return false;
2541
2542
2543
2544
2545
2546
2547 try_to_freeze();
2548
2549 relock:
2550 spin_lock_irq(&sighand->siglock);
2551
2552
2553
2554
2555
2556 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2557 int why;
2558
2559 if (signal->flags & SIGNAL_CLD_CONTINUED)
2560 why = CLD_CONTINUED;
2561 else
2562 why = CLD_STOPPED;
2563
2564 signal->flags &= ~SIGNAL_CLD_MASK;
2565
2566 spin_unlock_irq(&sighand->siglock);
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576 read_lock(&tasklist_lock);
2577 do_notify_parent_cldstop(current, false, why);
2578
2579 if (ptrace_reparented(current->group_leader))
2580 do_notify_parent_cldstop(current->group_leader,
2581 true, why);
2582 read_unlock(&tasklist_lock);
2583
2584 goto relock;
2585 }
2586
2587
2588 if (signal_group_exit(signal)) {
2589 ksig->info.si_signo = signr = SIGKILL;
2590 sigdelset(¤t->pending.signal, SIGKILL);
2591 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2592 &sighand->action[SIGKILL - 1]);
2593 recalc_sigpending();
2594 goto fatal;
2595 }
2596
2597 for (;;) {
2598 struct k_sigaction *ka;
2599
2600 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2601 do_signal_stop(0))
2602 goto relock;
2603
2604 if (unlikely(current->jobctl &
2605 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2606 if (current->jobctl & JOBCTL_TRAP_MASK) {
2607 do_jobctl_trap();
2608 spin_unlock_irq(&sighand->siglock);
2609 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2610 do_freezer_trap();
2611
2612 goto relock;
2613 }
2614
2615
2616
2617
2618
2619 if (unlikely(cgroup_task_frozen(current))) {
2620 spin_unlock_irq(&sighand->siglock);
2621 cgroup_leave_frozen(false);
2622 goto relock;
2623 }
2624
2625
2626
2627
2628
2629
2630
2631 signr = dequeue_synchronous_signal(&ksig->info);
2632 if (!signr)
2633 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2634
2635 if (!signr)
2636 break;
2637
2638 if (unlikely(current->ptrace) && signr != SIGKILL) {
2639 signr = ptrace_signal(signr, &ksig->info);
2640 if (!signr)
2641 continue;
2642 }
2643
2644 ka = &sighand->action[signr-1];
2645
2646
2647 trace_signal_deliver(signr, &ksig->info, ka);
2648
2649 if (ka->sa.sa_handler == SIG_IGN)
2650 continue;
2651 if (ka->sa.sa_handler != SIG_DFL) {
2652
2653 ksig->ka = *ka;
2654
2655 if (ka->sa.sa_flags & SA_ONESHOT)
2656 ka->sa.sa_handler = SIG_DFL;
2657
2658 break;
2659 }
2660
2661
2662
2663
2664 if (sig_kernel_ignore(signr))
2665 continue;
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2678 !sig_kernel_only(signr))
2679 continue;
2680
2681 if (sig_kernel_stop(signr)) {
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692 if (signr != SIGSTOP) {
2693 spin_unlock_irq(&sighand->siglock);
2694
2695
2696
2697 if (is_current_pgrp_orphaned())
2698 goto relock;
2699
2700 spin_lock_irq(&sighand->siglock);
2701 }
2702
2703 if (likely(do_signal_stop(ksig->info.si_signo))) {
2704
2705 goto relock;
2706 }
2707
2708
2709
2710
2711
2712 continue;
2713 }
2714
2715 fatal:
2716 spin_unlock_irq(&sighand->siglock);
2717 if (unlikely(cgroup_task_frozen(current)))
2718 cgroup_leave_frozen(true);
2719
2720
2721
2722
2723 current->flags |= PF_SIGNALED;
2724
2725 if (sig_kernel_coredump(signr)) {
2726 if (print_fatal_signals)
2727 print_fatal_signal(ksig->info.si_signo);
2728 proc_coredump_connector(current);
2729
2730
2731
2732
2733
2734
2735
2736
2737 do_coredump(&ksig->info);
2738 }
2739
2740
2741
2742
2743 do_group_exit(ksig->info.si_signo);
2744
2745 }
2746 spin_unlock_irq(&sighand->siglock);
2747
2748 ksig->sig = signr;
2749 return ksig->sig > 0;
2750 }
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762 static void signal_delivered(struct ksignal *ksig, int stepping)
2763 {
2764 sigset_t blocked;
2765
2766
2767
2768
2769
2770 clear_restore_sigmask();
2771
2772 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2773 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2774 sigaddset(&blocked, ksig->sig);
2775 set_current_blocked(&blocked);
2776 tracehook_signal_handler(stepping);
2777 }
2778
2779 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2780 {
2781 if (failed)
2782 force_sigsegv(ksig->sig);
2783 else
2784 signal_delivered(ksig, stepping);
2785 }
2786
2787
2788
2789
2790
2791
2792 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2793 {
2794 sigset_t retarget;
2795 struct task_struct *t;
2796
2797 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2798 if (sigisemptyset(&retarget))
2799 return;
2800
2801 t = tsk;
2802 while_each_thread(tsk, t) {
2803 if (t->flags & PF_EXITING)
2804 continue;
2805
2806 if (!has_pending_signals(&retarget, &t->blocked))
2807 continue;
2808
2809 sigandsets(&retarget, &retarget, &t->blocked);
2810
2811 if (!signal_pending(t))
2812 signal_wake_up(t, 0);
2813
2814 if (sigisemptyset(&retarget))
2815 break;
2816 }
2817 }
2818
2819 void exit_signals(struct task_struct *tsk)
2820 {
2821 int group_stop = 0;
2822 sigset_t unblocked;
2823
2824
2825
2826
2827
2828 cgroup_threadgroup_change_begin(tsk);
2829
2830 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2831 tsk->flags |= PF_EXITING;
2832 cgroup_threadgroup_change_end(tsk);
2833 return;
2834 }
2835
2836 spin_lock_irq(&tsk->sighand->siglock);
2837
2838
2839
2840
2841 tsk->flags |= PF_EXITING;
2842
2843 cgroup_threadgroup_change_end(tsk);
2844
2845 if (!signal_pending(tsk))
2846 goto out;
2847
2848 unblocked = tsk->blocked;
2849 signotset(&unblocked);
2850 retarget_shared_pending(tsk, &unblocked);
2851
2852 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2853 task_participate_group_stop(tsk))
2854 group_stop = CLD_STOPPED;
2855 out:
2856 spin_unlock_irq(&tsk->sighand->siglock);
2857
2858
2859
2860
2861
2862 if (unlikely(group_stop)) {
2863 read_lock(&tasklist_lock);
2864 do_notify_parent_cldstop(tsk, false, group_stop);
2865 read_unlock(&tasklist_lock);
2866 }
2867 }
2868
2869
2870
2871
2872
2873
2874
2875
2876 SYSCALL_DEFINE0(restart_syscall)
2877 {
2878 struct restart_block *restart = ¤t->restart_block;
2879 return restart->fn(restart);
2880 }
2881
2882 long do_no_restart_syscall(struct restart_block *param)
2883 {
2884 return -EINTR;
2885 }
2886
2887 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2888 {
2889 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2890 sigset_t newblocked;
2891
2892 sigandnsets(&newblocked, newset, ¤t->blocked);
2893 retarget_shared_pending(tsk, &newblocked);
2894 }
2895 tsk->blocked = *newset;
2896 recalc_sigpending();
2897 }
2898
2899
2900
2901
2902
2903
2904
2905
2906 void set_current_blocked(sigset_t *newset)
2907 {
2908 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2909 __set_current_blocked(newset);
2910 }
2911
2912 void __set_current_blocked(const sigset_t *newset)
2913 {
2914 struct task_struct *tsk = current;
2915
2916
2917
2918
2919
2920 if (sigequalsets(&tsk->blocked, newset))
2921 return;
2922
2923 spin_lock_irq(&tsk->sighand->siglock);
2924 __set_task_blocked(tsk, newset);
2925 spin_unlock_irq(&tsk->sighand->siglock);
2926 }
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2937 {
2938 struct task_struct *tsk = current;
2939 sigset_t newset;
2940
2941
2942 if (oldset)
2943 *oldset = tsk->blocked;
2944
2945 switch (how) {
2946 case SIG_BLOCK:
2947 sigorsets(&newset, &tsk->blocked, set);
2948 break;
2949 case SIG_UNBLOCK:
2950 sigandnsets(&newset, &tsk->blocked, set);
2951 break;
2952 case SIG_SETMASK:
2953 newset = *set;
2954 break;
2955 default:
2956 return -EINVAL;
2957 }
2958
2959 __set_current_blocked(&newset);
2960 return 0;
2961 }
2962 EXPORT_SYMBOL(sigprocmask);
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
2974 {
2975 sigset_t kmask;
2976
2977 if (!umask)
2978 return 0;
2979 if (sigsetsize != sizeof(sigset_t))
2980 return -EINVAL;
2981 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
2982 return -EFAULT;
2983
2984 set_restore_sigmask();
2985 current->saved_sigmask = current->blocked;
2986 set_current_blocked(&kmask);
2987
2988 return 0;
2989 }
2990
2991 #ifdef CONFIG_COMPAT
2992 int set_compat_user_sigmask(const compat_sigset_t __user *umask,
2993 size_t sigsetsize)
2994 {
2995 sigset_t kmask;
2996
2997 if (!umask)
2998 return 0;
2999 if (sigsetsize != sizeof(compat_sigset_t))
3000 return -EINVAL;
3001 if (get_compat_sigset(&kmask, umask))
3002 return -EFAULT;
3003
3004 set_restore_sigmask();
3005 current->saved_sigmask = current->blocked;
3006 set_current_blocked(&kmask);
3007
3008 return 0;
3009 }
3010 #endif
3011
3012
3013
3014
3015
3016
3017
3018
3019 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3020 sigset_t __user *, oset, size_t, sigsetsize)
3021 {
3022 sigset_t old_set, new_set;
3023 int error;
3024
3025
3026 if (sigsetsize != sizeof(sigset_t))
3027 return -EINVAL;
3028
3029 old_set = current->blocked;
3030
3031 if (nset) {
3032 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3033 return -EFAULT;
3034 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3035
3036 error = sigprocmask(how, &new_set, NULL);
3037 if (error)
3038 return error;
3039 }
3040
3041 if (oset) {
3042 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3043 return -EFAULT;
3044 }
3045
3046 return 0;
3047 }
3048
3049 #ifdef CONFIG_COMPAT
3050 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3051 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3052 {
3053 sigset_t old_set = current->blocked;
3054
3055
3056 if (sigsetsize != sizeof(sigset_t))
3057 return -EINVAL;
3058
3059 if (nset) {
3060 sigset_t new_set;
3061 int error;
3062 if (get_compat_sigset(&new_set, nset))
3063 return -EFAULT;
3064 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3065
3066 error = sigprocmask(how, &new_set, NULL);
3067 if (error)
3068 return error;
3069 }
3070 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3071 }
3072 #endif
3073
3074 static void do_sigpending(sigset_t *set)
3075 {
3076 spin_lock_irq(¤t->sighand->siglock);
3077 sigorsets(set, ¤t->pending.signal,
3078 ¤t->signal->shared_pending.signal);
3079 spin_unlock_irq(¤t->sighand->siglock);
3080
3081
3082 sigandsets(set, ¤t->blocked, set);
3083 }
3084
3085
3086
3087
3088
3089
3090
3091 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3092 {
3093 sigset_t set;
3094
3095 if (sigsetsize > sizeof(*uset))
3096 return -EINVAL;
3097
3098 do_sigpending(&set);
3099
3100 if (copy_to_user(uset, &set, sigsetsize))
3101 return -EFAULT;
3102
3103 return 0;
3104 }
3105
3106 #ifdef CONFIG_COMPAT
3107 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3108 compat_size_t, sigsetsize)
3109 {
3110 sigset_t set;
3111
3112 if (sigsetsize > sizeof(*uset))
3113 return -EINVAL;
3114
3115 do_sigpending(&set);
3116
3117 return put_compat_sigset(uset, &set, sigsetsize);
3118 }
3119 #endif
3120
3121 static const struct {
3122 unsigned char limit, layout;
3123 } sig_sicodes[] = {
3124 [SIGILL] = { NSIGILL, SIL_FAULT },
3125 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3126 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3127 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3128 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3129 #if defined(SIGEMT)
3130 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3131 #endif
3132 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3133 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3134 [SIGSYS] = { NSIGSYS, SIL_SYS },
3135 };
3136
3137 static bool known_siginfo_layout(unsigned sig, int si_code)
3138 {
3139 if (si_code == SI_KERNEL)
3140 return true;
3141 else if ((si_code > SI_USER)) {
3142 if (sig_specific_sicodes(sig)) {
3143 if (si_code <= sig_sicodes[sig].limit)
3144 return true;
3145 }
3146 else if (si_code <= NSIGPOLL)
3147 return true;
3148 }
3149 else if (si_code >= SI_DETHREAD)
3150 return true;
3151 else if (si_code == SI_ASYNCNL)
3152 return true;
3153 return false;
3154 }
3155
3156 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3157 {
3158 enum siginfo_layout layout = SIL_KILL;
3159 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3160 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3161 (si_code <= sig_sicodes[sig].limit)) {
3162 layout = sig_sicodes[sig].layout;
3163
3164 if ((sig == SIGBUS) &&
3165 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3166 layout = SIL_FAULT_MCEERR;
3167 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3168 layout = SIL_FAULT_BNDERR;
3169 #ifdef SEGV_PKUERR
3170 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3171 layout = SIL_FAULT_PKUERR;
3172 #endif
3173 }
3174 else if (si_code <= NSIGPOLL)
3175 layout = SIL_POLL;
3176 } else {
3177 if (si_code == SI_TIMER)
3178 layout = SIL_TIMER;
3179 else if (si_code == SI_SIGIO)
3180 layout = SIL_POLL;
3181 else if (si_code < 0)
3182 layout = SIL_RT;
3183 }
3184 return layout;
3185 }
3186
3187 static inline char __user *si_expansion(const siginfo_t __user *info)
3188 {
3189 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3190 }
3191
3192 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3193 {
3194 char __user *expansion = si_expansion(to);
3195 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3196 return -EFAULT;
3197 if (clear_user(expansion, SI_EXPANSION_SIZE))
3198 return -EFAULT;
3199 return 0;
3200 }
3201
3202 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3203 const siginfo_t __user *from)
3204 {
3205 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3206 char __user *expansion = si_expansion(from);
3207 char buf[SI_EXPANSION_SIZE];
3208 int i;
3209
3210
3211
3212
3213
3214
3215 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3216 return -EFAULT;
3217 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3218 if (buf[i] != 0)
3219 return -E2BIG;
3220 }
3221 }
3222 return 0;
3223 }
3224
3225 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3226 const siginfo_t __user *from)
3227 {
3228 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3229 return -EFAULT;
3230 to->si_signo = signo;
3231 return post_copy_siginfo_from_user(to, from);
3232 }
3233
3234 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3235 {
3236 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3237 return -EFAULT;
3238 return post_copy_siginfo_from_user(to, from);
3239 }
3240
3241 #ifdef CONFIG_COMPAT
3242 int copy_siginfo_to_user32(struct compat_siginfo __user *to,
3243 const struct kernel_siginfo *from)
3244 #if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
3245 {
3246 return __copy_siginfo_to_user32(to, from, in_x32_syscall());
3247 }
3248 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3249 const struct kernel_siginfo *from, bool x32_ABI)
3250 #endif
3251 {
3252 struct compat_siginfo new;
3253 memset(&new, 0, sizeof(new));
3254
3255 new.si_signo = from->si_signo;
3256 new.si_errno = from->si_errno;
3257 new.si_code = from->si_code;
3258 switch(siginfo_layout(from->si_signo, from->si_code)) {
3259 case SIL_KILL:
3260 new.si_pid = from->si_pid;
3261 new.si_uid = from->si_uid;
3262 break;
3263 case SIL_TIMER:
3264 new.si_tid = from->si_tid;
3265 new.si_overrun = from->si_overrun;
3266 new.si_int = from->si_int;
3267 break;
3268 case SIL_POLL:
3269 new.si_band = from->si_band;
3270 new.si_fd = from->si_fd;
3271 break;
3272 case SIL_FAULT:
3273 new.si_addr = ptr_to_compat(from->si_addr);
3274 #ifdef __ARCH_SI_TRAPNO
3275 new.si_trapno = from->si_trapno;
3276 #endif
3277 break;
3278 case SIL_FAULT_MCEERR:
3279 new.si_addr = ptr_to_compat(from->si_addr);
3280 #ifdef __ARCH_SI_TRAPNO
3281 new.si_trapno = from->si_trapno;
3282 #endif
3283 new.si_addr_lsb = from->si_addr_lsb;
3284 break;
3285 case SIL_FAULT_BNDERR:
3286 new.si_addr = ptr_to_compat(from->si_addr);
3287 #ifdef __ARCH_SI_TRAPNO
3288 new.si_trapno = from->si_trapno;
3289 #endif
3290 new.si_lower = ptr_to_compat(from->si_lower);
3291 new.si_upper = ptr_to_compat(from->si_upper);
3292 break;
3293 case SIL_FAULT_PKUERR:
3294 new.si_addr = ptr_to_compat(from->si_addr);
3295 #ifdef __ARCH_SI_TRAPNO
3296 new.si_trapno = from->si_trapno;
3297 #endif
3298 new.si_pkey = from->si_pkey;
3299 break;
3300 case SIL_CHLD:
3301 new.si_pid = from->si_pid;
3302 new.si_uid = from->si_uid;
3303 new.si_status = from->si_status;
3304 #ifdef CONFIG_X86_X32_ABI
3305 if (x32_ABI) {
3306 new._sifields._sigchld_x32._utime = from->si_utime;
3307 new._sifields._sigchld_x32._stime = from->si_stime;
3308 } else
3309 #endif
3310 {
3311 new.si_utime = from->si_utime;
3312 new.si_stime = from->si_stime;
3313 }
3314 break;
3315 case SIL_RT:
3316 new.si_pid = from->si_pid;
3317 new.si_uid = from->si_uid;
3318 new.si_int = from->si_int;
3319 break;
3320 case SIL_SYS:
3321 new.si_call_addr = ptr_to_compat(from->si_call_addr);
3322 new.si_syscall = from->si_syscall;
3323 new.si_arch = from->si_arch;
3324 break;
3325 }
3326
3327 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3328 return -EFAULT;
3329
3330 return 0;
3331 }
3332
3333 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3334 const struct compat_siginfo *from)
3335 {
3336 clear_siginfo(to);
3337 to->si_signo = from->si_signo;
3338 to->si_errno = from->si_errno;
3339 to->si_code = from->si_code;
3340 switch(siginfo_layout(from->si_signo, from->si_code)) {
3341 case SIL_KILL:
3342 to->si_pid = from->si_pid;
3343 to->si_uid = from->si_uid;
3344 break;
3345 case SIL_TIMER:
3346 to->si_tid = from->si_tid;
3347 to->si_overrun = from->si_overrun;
3348 to->si_int = from->si_int;
3349 break;
3350 case SIL_POLL:
3351 to->si_band = from->si_band;
3352 to->si_fd = from->si_fd;
3353 break;
3354 case SIL_FAULT:
3355 to->si_addr = compat_ptr(from->si_addr);
3356 #ifdef __ARCH_SI_TRAPNO
3357 to->si_trapno = from->si_trapno;
3358 #endif
3359 break;
3360 case SIL_FAULT_MCEERR:
3361 to->si_addr = compat_ptr(from->si_addr);
3362 #ifdef __ARCH_SI_TRAPNO
3363 to->si_trapno = from->si_trapno;
3364 #endif
3365 to->si_addr_lsb = from->si_addr_lsb;
3366 break;
3367 case SIL_FAULT_BNDERR:
3368 to->si_addr = compat_ptr(from->si_addr);
3369 #ifdef __ARCH_SI_TRAPNO
3370 to->si_trapno = from->si_trapno;
3371 #endif
3372 to->si_lower = compat_ptr(from->si_lower);
3373 to->si_upper = compat_ptr(from->si_upper);
3374 break;
3375 case SIL_FAULT_PKUERR:
3376 to->si_addr = compat_ptr(from->si_addr);
3377 #ifdef __ARCH_SI_TRAPNO
3378 to->si_trapno = from->si_trapno;
3379 #endif
3380 to->si_pkey = from->si_pkey;
3381 break;
3382 case SIL_CHLD:
3383 to->si_pid = from->si_pid;
3384 to->si_uid = from->si_uid;
3385 to->si_status = from->si_status;
3386 #ifdef CONFIG_X86_X32_ABI
3387 if (in_x32_syscall()) {
3388 to->si_utime = from->_sifields._sigchld_x32._utime;
3389 to->si_stime = from->_sifields._sigchld_x32._stime;
3390 } else
3391 #endif
3392 {
3393 to->si_utime = from->si_utime;
3394 to->si_stime = from->si_stime;
3395 }
3396 break;
3397 case SIL_RT:
3398 to->si_pid = from->si_pid;
3399 to->si_uid = from->si_uid;
3400 to->si_int = from->si_int;
3401 break;
3402 case SIL_SYS:
3403 to->si_call_addr = compat_ptr(from->si_call_addr);
3404 to->si_syscall = from->si_syscall;
3405 to->si_arch = from->si_arch;
3406 break;
3407 }
3408 return 0;
3409 }
3410
3411 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3412 const struct compat_siginfo __user *ufrom)
3413 {
3414 struct compat_siginfo from;
3415
3416 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3417 return -EFAULT;
3418
3419 from.si_signo = signo;
3420 return post_copy_siginfo_from_user32(to, &from);
3421 }
3422
3423 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3424 const struct compat_siginfo __user *ufrom)
3425 {
3426 struct compat_siginfo from;
3427
3428 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3429 return -EFAULT;
3430
3431 return post_copy_siginfo_from_user32(to, &from);
3432 }
3433 #endif
3434
3435
3436
3437
3438
3439
3440
3441 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3442 const struct timespec64 *ts)
3443 {
3444 ktime_t *to = NULL, timeout = KTIME_MAX;
3445 struct task_struct *tsk = current;
3446 sigset_t mask = *which;
3447 int sig, ret = 0;
3448
3449 if (ts) {
3450 if (!timespec64_valid(ts))
3451 return -EINVAL;
3452 timeout = timespec64_to_ktime(*ts);
3453 to = &timeout;
3454 }
3455
3456
3457
3458
3459 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3460 signotset(&mask);
3461
3462 spin_lock_irq(&tsk->sighand->siglock);
3463 sig = dequeue_signal(tsk, &mask, info);
3464 if (!sig && timeout) {
3465
3466
3467
3468
3469
3470
3471 tsk->real_blocked = tsk->blocked;
3472 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3473 recalc_sigpending();
3474 spin_unlock_irq(&tsk->sighand->siglock);
3475
3476 __set_current_state(TASK_INTERRUPTIBLE);
3477 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3478 HRTIMER_MODE_REL);
3479 spin_lock_irq(&tsk->sighand->siglock);
3480 __set_task_blocked(tsk, &tsk->real_blocked);
3481 sigemptyset(&tsk->real_blocked);
3482 sig = dequeue_signal(tsk, &mask, info);
3483 }
3484 spin_unlock_irq(&tsk->sighand->siglock);
3485
3486 if (sig)
3487 return sig;
3488 return ret ? -EINTR : -EAGAIN;
3489 }
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3500 siginfo_t __user *, uinfo,
3501 const struct __kernel_timespec __user *, uts,
3502 size_t, sigsetsize)
3503 {
3504 sigset_t these;
3505 struct timespec64 ts;
3506 kernel_siginfo_t info;
3507 int ret;
3508
3509
3510 if (sigsetsize != sizeof(sigset_t))
3511 return -EINVAL;
3512
3513 if (copy_from_user(&these, uthese, sizeof(these)))
3514 return -EFAULT;
3515
3516 if (uts) {
3517 if (get_timespec64(&ts, uts))
3518 return -EFAULT;
3519 }
3520
3521 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3522
3523 if (ret > 0 && uinfo) {
3524 if (copy_siginfo_to_user(uinfo, &info))
3525 ret = -EFAULT;
3526 }
3527
3528 return ret;
3529 }
3530
3531 #ifdef CONFIG_COMPAT_32BIT_TIME
3532 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3533 siginfo_t __user *, uinfo,
3534 const struct old_timespec32 __user *, uts,
3535 size_t, sigsetsize)
3536 {
3537 sigset_t these;
3538 struct timespec64 ts;
3539 kernel_siginfo_t info;
3540 int ret;
3541
3542 if (sigsetsize != sizeof(sigset_t))
3543 return -EINVAL;
3544
3545 if (copy_from_user(&these, uthese, sizeof(these)))
3546 return -EFAULT;
3547
3548 if (uts) {
3549 if (get_old_timespec32(&ts, uts))
3550 return -EFAULT;
3551 }
3552
3553 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3554
3555 if (ret > 0 && uinfo) {
3556 if (copy_siginfo_to_user(uinfo, &info))
3557 ret = -EFAULT;
3558 }
3559
3560 return ret;
3561 }
3562 #endif
3563
3564 #ifdef CONFIG_COMPAT
3565 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3566 struct compat_siginfo __user *, uinfo,
3567 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3568 {
3569 sigset_t s;
3570 struct timespec64 t;
3571 kernel_siginfo_t info;
3572 long ret;
3573
3574 if (sigsetsize != sizeof(sigset_t))
3575 return -EINVAL;
3576
3577 if (get_compat_sigset(&s, uthese))
3578 return -EFAULT;
3579
3580 if (uts) {
3581 if (get_timespec64(&t, uts))
3582 return -EFAULT;
3583 }
3584
3585 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3586
3587 if (ret > 0 && uinfo) {
3588 if (copy_siginfo_to_user32(uinfo, &info))
3589 ret = -EFAULT;
3590 }
3591
3592 return ret;
3593 }
3594
3595 #ifdef CONFIG_COMPAT_32BIT_TIME
3596 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3597 struct compat_siginfo __user *, uinfo,
3598 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3599 {
3600 sigset_t s;
3601 struct timespec64 t;
3602 kernel_siginfo_t info;
3603 long ret;
3604
3605 if (sigsetsize != sizeof(sigset_t))
3606 return -EINVAL;
3607
3608 if (get_compat_sigset(&s, uthese))
3609 return -EFAULT;
3610
3611 if (uts) {
3612 if (get_old_timespec32(&t, uts))
3613 return -EFAULT;
3614 }
3615
3616 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3617
3618 if (ret > 0 && uinfo) {
3619 if (copy_siginfo_to_user32(uinfo, &info))
3620 ret = -EFAULT;
3621 }
3622
3623 return ret;
3624 }
3625 #endif
3626 #endif
3627
3628 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3629 {
3630 clear_siginfo(info);
3631 info->si_signo = sig;
3632 info->si_errno = 0;
3633 info->si_code = SI_USER;
3634 info->si_pid = task_tgid_vnr(current);
3635 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3636 }
3637
3638
3639
3640
3641
3642
3643 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3644 {
3645 struct kernel_siginfo info;
3646
3647 prepare_kill_siginfo(sig, &info);
3648
3649 return kill_something_info(sig, &info, pid);
3650 }
3651
3652
3653
3654
3655
3656
3657 static bool access_pidfd_pidns(struct pid *pid)
3658 {
3659 struct pid_namespace *active = task_active_pid_ns(current);
3660 struct pid_namespace *p = ns_of_pid(pid);
3661
3662 for (;;) {
3663 if (!p)
3664 return false;
3665 if (p == active)
3666 break;
3667 p = p->parent;
3668 }
3669
3670 return true;
3671 }
3672
3673 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, siginfo_t *info)
3674 {
3675 #ifdef CONFIG_COMPAT
3676
3677
3678
3679
3680
3681 if (in_compat_syscall())
3682 return copy_siginfo_from_user32(
3683 kinfo, (struct compat_siginfo __user *)info);
3684 #endif
3685 return copy_siginfo_from_user(kinfo, info);
3686 }
3687
3688 static struct pid *pidfd_to_pid(const struct file *file)
3689 {
3690 struct pid *pid;
3691
3692 pid = pidfd_pid(file);
3693 if (!IS_ERR(pid))
3694 return pid;
3695
3696 return tgid_pidfd_to_pid(file);
3697 }
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3718 siginfo_t __user *, info, unsigned int, flags)
3719 {
3720 int ret;
3721 struct fd f;
3722 struct pid *pid;
3723 kernel_siginfo_t kinfo;
3724
3725
3726 if (flags)
3727 return -EINVAL;
3728
3729 f = fdget(pidfd);
3730 if (!f.file)
3731 return -EBADF;
3732
3733
3734 pid = pidfd_to_pid(f.file);
3735 if (IS_ERR(pid)) {
3736 ret = PTR_ERR(pid);
3737 goto err;
3738 }
3739
3740 ret = -EINVAL;
3741 if (!access_pidfd_pidns(pid))
3742 goto err;
3743
3744 if (info) {
3745 ret = copy_siginfo_from_user_any(&kinfo, info);
3746 if (unlikely(ret))
3747 goto err;
3748
3749 ret = -EINVAL;
3750 if (unlikely(sig != kinfo.si_signo))
3751 goto err;
3752
3753
3754 ret = -EPERM;
3755 if ((task_pid(current) != pid) &&
3756 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3757 goto err;
3758 } else {
3759 prepare_kill_siginfo(sig, &kinfo);
3760 }
3761
3762 ret = kill_pid_info(sig, &kinfo, pid);
3763
3764 err:
3765 fdput(f);
3766 return ret;
3767 }
3768
3769 static int
3770 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3771 {
3772 struct task_struct *p;
3773 int error = -ESRCH;
3774
3775 rcu_read_lock();
3776 p = find_task_by_vpid(pid);
3777 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3778 error = check_kill_permission(sig, info, p);
3779
3780
3781
3782
3783 if (!error && sig) {
3784 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3785
3786
3787
3788
3789
3790 if (unlikely(error == -ESRCH))
3791 error = 0;
3792 }
3793 }
3794 rcu_read_unlock();
3795
3796 return error;
3797 }
3798
3799 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3800 {
3801 struct kernel_siginfo info;
3802
3803 clear_siginfo(&info);
3804 info.si_signo = sig;
3805 info.si_errno = 0;
3806 info.si_code = SI_TKILL;
3807 info.si_pid = task_tgid_vnr(current);
3808 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3809
3810 return do_send_specific(tgid, pid, sig, &info);
3811 }
3812
3813
3814
3815
3816
3817
3818
3819
3820
3821
3822
3823 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3824 {
3825
3826 if (pid <= 0 || tgid <= 0)
3827 return -EINVAL;
3828
3829 return do_tkill(tgid, pid, sig);
3830 }
3831
3832
3833
3834
3835
3836
3837
3838
3839 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3840 {
3841
3842 if (pid <= 0)
3843 return -EINVAL;
3844
3845 return do_tkill(0, pid, sig);
3846 }
3847
3848 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3849 {
3850
3851
3852
3853 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3854 (task_pid_vnr(current) != pid))
3855 return -EPERM;
3856
3857
3858 return kill_proc_info(sig, info, pid);
3859 }
3860
3861
3862
3863
3864
3865
3866
3867 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3868 siginfo_t __user *, uinfo)
3869 {
3870 kernel_siginfo_t info;
3871 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3872 if (unlikely(ret))
3873 return ret;
3874 return do_rt_sigqueueinfo(pid, sig, &info);
3875 }
3876
3877 #ifdef CONFIG_COMPAT
3878 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3879 compat_pid_t, pid,
3880 int, sig,
3881 struct compat_siginfo __user *, uinfo)
3882 {
3883 kernel_siginfo_t info;
3884 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3885 if (unlikely(ret))
3886 return ret;
3887 return do_rt_sigqueueinfo(pid, sig, &info);
3888 }
3889 #endif
3890
3891 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
3892 {
3893
3894 if (pid <= 0 || tgid <= 0)
3895 return -EINVAL;
3896
3897
3898
3899
3900 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3901 (task_pid_vnr(current) != pid))
3902 return -EPERM;
3903
3904 return do_send_specific(tgid, pid, sig, info);
3905 }
3906
3907 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3908 siginfo_t __user *, uinfo)
3909 {
3910 kernel_siginfo_t info;
3911 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3912 if (unlikely(ret))
3913 return ret;
3914 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3915 }
3916
3917 #ifdef CONFIG_COMPAT
3918 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3919 compat_pid_t, tgid,
3920 compat_pid_t, pid,
3921 int, sig,
3922 struct compat_siginfo __user *, uinfo)
3923 {
3924 kernel_siginfo_t info;
3925 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3926 if (unlikely(ret))
3927 return ret;
3928 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3929 }
3930 #endif
3931
3932
3933
3934
3935 void kernel_sigaction(int sig, __sighandler_t action)
3936 {
3937 spin_lock_irq(¤t->sighand->siglock);
3938 current->sighand->action[sig - 1].sa.sa_handler = action;
3939 if (action == SIG_IGN) {
3940 sigset_t mask;
3941
3942 sigemptyset(&mask);
3943 sigaddset(&mask, sig);
3944
3945 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
3946 flush_sigqueue_mask(&mask, ¤t->pending);
3947 recalc_sigpending();
3948 }
3949 spin_unlock_irq(¤t->sighand->siglock);
3950 }
3951 EXPORT_SYMBOL(kernel_sigaction);
3952
3953 void __weak sigaction_compat_abi(struct k_sigaction *act,
3954 struct k_sigaction *oact)
3955 {
3956 }
3957
3958 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3959 {
3960 struct task_struct *p = current, *t;
3961 struct k_sigaction *k;
3962 sigset_t mask;
3963
3964 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3965 return -EINVAL;
3966
3967 k = &p->sighand->action[sig-1];
3968
3969 spin_lock_irq(&p->sighand->siglock);
3970 if (oact)
3971 *oact = *k;
3972
3973 sigaction_compat_abi(act, oact);
3974
3975 if (act) {
3976 sigdelsetmask(&act->sa.sa_mask,
3977 sigmask(SIGKILL) | sigmask(SIGSTOP));
3978 *k = *act;
3979
3980
3981
3982
3983
3984
3985
3986
3987
3988
3989
3990 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3991 sigemptyset(&mask);
3992 sigaddset(&mask, sig);
3993 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3994 for_each_thread(p, t)
3995 flush_sigqueue_mask(&mask, &t->pending);
3996 }
3997 }
3998
3999 spin_unlock_irq(&p->sighand->siglock);
4000 return 0;
4001 }
4002
4003 static int
4004 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4005 size_t min_ss_size)
4006 {
4007 struct task_struct *t = current;
4008
4009 if (oss) {
4010 memset(oss, 0, sizeof(stack_t));
4011 oss->ss_sp = (void __user *) t->sas_ss_sp;
4012 oss->ss_size = t->sas_ss_size;
4013 oss->ss_flags = sas_ss_flags(sp) |
4014 (current->sas_ss_flags & SS_FLAG_BITS);
4015 }
4016
4017 if (ss) {
4018 void __user *ss_sp = ss->ss_sp;
4019 size_t ss_size = ss->ss_size;
4020 unsigned ss_flags = ss->ss_flags;
4021 int ss_mode;
4022
4023 if (unlikely(on_sig_stack(sp)))
4024 return -EPERM;
4025
4026 ss_mode = ss_flags & ~SS_FLAG_BITS;
4027 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4028 ss_mode != 0))
4029 return -EINVAL;
4030
4031 if (ss_mode == SS_DISABLE) {
4032 ss_size = 0;
4033 ss_sp = NULL;
4034 } else {
4035 if (unlikely(ss_size < min_ss_size))
4036 return -ENOMEM;
4037 }
4038
4039 t->sas_ss_sp = (unsigned long) ss_sp;
4040 t->sas_ss_size = ss_size;
4041 t->sas_ss_flags = ss_flags;
4042 }
4043 return 0;
4044 }
4045
4046 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4047 {
4048 stack_t new, old;
4049 int err;
4050 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4051 return -EFAULT;
4052 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4053 current_user_stack_pointer(),
4054 MINSIGSTKSZ);
4055 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4056 err = -EFAULT;
4057 return err;
4058 }
4059
4060 int restore_altstack(const stack_t __user *uss)
4061 {
4062 stack_t new;
4063 if (copy_from_user(&new, uss, sizeof(stack_t)))
4064 return -EFAULT;
4065 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4066 MINSIGSTKSZ);
4067
4068 return 0;
4069 }
4070
4071 int __save_altstack(stack_t __user *uss, unsigned long sp)
4072 {
4073 struct task_struct *t = current;
4074 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4075 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4076 __put_user(t->sas_ss_size, &uss->ss_size);
4077 if (err)
4078 return err;
4079 if (t->sas_ss_flags & SS_AUTODISARM)
4080 sas_ss_reset(t);
4081 return 0;
4082 }
4083
4084 #ifdef CONFIG_COMPAT
4085 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4086 compat_stack_t __user *uoss_ptr)
4087 {
4088 stack_t uss, uoss;
4089 int ret;
4090
4091 if (uss_ptr) {
4092 compat_stack_t uss32;
4093 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4094 return -EFAULT;
4095 uss.ss_sp = compat_ptr(uss32.ss_sp);
4096 uss.ss_flags = uss32.ss_flags;
4097 uss.ss_size = uss32.ss_size;
4098 }
4099 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4100 compat_user_stack_pointer(),
4101 COMPAT_MINSIGSTKSZ);
4102 if (ret >= 0 && uoss_ptr) {
4103 compat_stack_t old;
4104 memset(&old, 0, sizeof(old));
4105 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4106 old.ss_flags = uoss.ss_flags;
4107 old.ss_size = uoss.ss_size;
4108 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4109 ret = -EFAULT;
4110 }
4111 return ret;
4112 }
4113
4114 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4115 const compat_stack_t __user *, uss_ptr,
4116 compat_stack_t __user *, uoss_ptr)
4117 {
4118 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4119 }
4120
4121 int compat_restore_altstack(const compat_stack_t __user *uss)
4122 {
4123 int err = do_compat_sigaltstack(uss, NULL);
4124
4125 return err == -EFAULT ? err : 0;
4126 }
4127
4128 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4129 {
4130 int err;
4131 struct task_struct *t = current;
4132 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4133 &uss->ss_sp) |
4134 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4135 __put_user(t->sas_ss_size, &uss->ss_size);
4136 if (err)
4137 return err;
4138 if (t->sas_ss_flags & SS_AUTODISARM)
4139 sas_ss_reset(t);
4140 return 0;
4141 }
4142 #endif
4143
4144 #ifdef __ARCH_WANT_SYS_SIGPENDING
4145
4146
4147
4148
4149
4150 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4151 {
4152 sigset_t set;
4153
4154 if (sizeof(old_sigset_t) > sizeof(*uset))
4155 return -EINVAL;
4156
4157 do_sigpending(&set);
4158
4159 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4160 return -EFAULT;
4161
4162 return 0;
4163 }
4164
4165 #ifdef CONFIG_COMPAT
4166 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4167 {
4168 sigset_t set;
4169
4170 do_sigpending(&set);
4171
4172 return put_user(set.sig[0], set32);
4173 }
4174 #endif
4175
4176 #endif
4177
4178 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4179
4180
4181
4182
4183
4184
4185
4186
4187
4188
4189 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4190 old_sigset_t __user *, oset)
4191 {
4192 old_sigset_t old_set, new_set;
4193 sigset_t new_blocked;
4194
4195 old_set = current->blocked.sig[0];
4196
4197 if (nset) {
4198 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4199 return -EFAULT;
4200
4201 new_blocked = current->blocked;
4202
4203 switch (how) {
4204 case SIG_BLOCK:
4205 sigaddsetmask(&new_blocked, new_set);
4206 break;
4207 case SIG_UNBLOCK:
4208 sigdelsetmask(&new_blocked, new_set);
4209 break;
4210 case SIG_SETMASK:
4211 new_blocked.sig[0] = new_set;
4212 break;
4213 default:
4214 return -EINVAL;
4215 }
4216
4217 set_current_blocked(&new_blocked);
4218 }
4219
4220 if (oset) {
4221 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4222 return -EFAULT;
4223 }
4224
4225 return 0;
4226 }
4227 #endif
4228
4229 #ifndef CONFIG_ODD_RT_SIGACTION
4230
4231
4232
4233
4234
4235
4236
4237 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4238 const struct sigaction __user *, act,
4239 struct sigaction __user *, oact,
4240 size_t, sigsetsize)
4241 {
4242 struct k_sigaction new_sa, old_sa;
4243 int ret;
4244
4245
4246 if (sigsetsize != sizeof(sigset_t))
4247 return -EINVAL;
4248
4249 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4250 return -EFAULT;
4251
4252 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4253 if (ret)
4254 return ret;
4255
4256 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4257 return -EFAULT;
4258
4259 return 0;
4260 }
4261 #ifdef CONFIG_COMPAT
4262 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4263 const struct compat_sigaction __user *, act,
4264 struct compat_sigaction __user *, oact,
4265 compat_size_t, sigsetsize)
4266 {
4267 struct k_sigaction new_ka, old_ka;
4268 #ifdef __ARCH_HAS_SA_RESTORER
4269 compat_uptr_t restorer;
4270 #endif
4271 int ret;
4272
4273
4274 if (sigsetsize != sizeof(compat_sigset_t))
4275 return -EINVAL;
4276
4277 if (act) {
4278 compat_uptr_t handler;
4279 ret = get_user(handler, &act->sa_handler);
4280 new_ka.sa.sa_handler = compat_ptr(handler);
4281 #ifdef __ARCH_HAS_SA_RESTORER
4282 ret |= get_user(restorer, &act->sa_restorer);
4283 new_ka.sa.sa_restorer = compat_ptr(restorer);
4284 #endif
4285 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4286 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4287 if (ret)
4288 return -EFAULT;
4289 }
4290
4291 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4292 if (!ret && oact) {
4293 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4294 &oact->sa_handler);
4295 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4296 sizeof(oact->sa_mask));
4297 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4298 #ifdef __ARCH_HAS_SA_RESTORER
4299 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4300 &oact->sa_restorer);
4301 #endif
4302 }
4303 return ret;
4304 }
4305 #endif
4306 #endif
4307
4308 #ifdef CONFIG_OLD_SIGACTION
4309 SYSCALL_DEFINE3(sigaction, int, sig,
4310 const struct old_sigaction __user *, act,
4311 struct old_sigaction __user *, oact)
4312 {
4313 struct k_sigaction new_ka, old_ka;
4314 int ret;
4315
4316 if (act) {
4317 old_sigset_t mask;
4318 if (!access_ok(act, sizeof(*act)) ||
4319 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4320 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4321 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4322 __get_user(mask, &act->sa_mask))
4323 return -EFAULT;
4324 #ifdef __ARCH_HAS_KA_RESTORER
4325 new_ka.ka_restorer = NULL;
4326 #endif
4327 siginitset(&new_ka.sa.sa_mask, mask);
4328 }
4329
4330 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4331
4332 if (!ret && oact) {
4333 if (!access_ok(oact, sizeof(*oact)) ||
4334 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4335 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4336 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4337 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4338 return -EFAULT;
4339 }
4340
4341 return ret;
4342 }
4343 #endif
4344 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4345 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4346 const struct compat_old_sigaction __user *, act,
4347 struct compat_old_sigaction __user *, oact)
4348 {
4349 struct k_sigaction new_ka, old_ka;
4350 int ret;
4351 compat_old_sigset_t mask;
4352 compat_uptr_t handler, restorer;
4353
4354 if (act) {
4355 if (!access_ok(act, sizeof(*act)) ||
4356 __get_user(handler, &act->sa_handler) ||
4357 __get_user(restorer, &act->sa_restorer) ||
4358 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4359 __get_user(mask, &act->sa_mask))
4360 return -EFAULT;
4361
4362 #ifdef __ARCH_HAS_KA_RESTORER
4363 new_ka.ka_restorer = NULL;
4364 #endif
4365 new_ka.sa.sa_handler = compat_ptr(handler);
4366 new_ka.sa.sa_restorer = compat_ptr(restorer);
4367 siginitset(&new_ka.sa.sa_mask, mask);
4368 }
4369
4370 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4371
4372 if (!ret && oact) {
4373 if (!access_ok(oact, sizeof(*oact)) ||
4374 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4375 &oact->sa_handler) ||
4376 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4377 &oact->sa_restorer) ||
4378 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4379 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4380 return -EFAULT;
4381 }
4382 return ret;
4383 }
4384 #endif
4385
4386 #ifdef CONFIG_SGETMASK_SYSCALL
4387
4388
4389
4390
4391 SYSCALL_DEFINE0(sgetmask)
4392 {
4393
4394 return current->blocked.sig[0];
4395 }
4396
4397 SYSCALL_DEFINE1(ssetmask, int, newmask)
4398 {
4399 int old = current->blocked.sig[0];
4400 sigset_t newset;
4401
4402 siginitset(&newset, newmask);
4403 set_current_blocked(&newset);
4404
4405 return old;
4406 }
4407 #endif
4408
4409 #ifdef __ARCH_WANT_SYS_SIGNAL
4410
4411
4412
4413 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4414 {
4415 struct k_sigaction new_sa, old_sa;
4416 int ret;
4417
4418 new_sa.sa.sa_handler = handler;
4419 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4420 sigemptyset(&new_sa.sa.sa_mask);
4421
4422 ret = do_sigaction(sig, &new_sa, &old_sa);
4423
4424 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4425 }
4426 #endif
4427
4428 #ifdef __ARCH_WANT_SYS_PAUSE
4429
4430 SYSCALL_DEFINE0(pause)
4431 {
4432 while (!signal_pending(current)) {
4433 __set_current_state(TASK_INTERRUPTIBLE);
4434 schedule();
4435 }
4436 return -ERESTARTNOHAND;
4437 }
4438
4439 #endif
4440
4441 static int sigsuspend(sigset_t *set)
4442 {
4443 current->saved_sigmask = current->blocked;
4444 set_current_blocked(set);
4445
4446 while (!signal_pending(current)) {
4447 __set_current_state(TASK_INTERRUPTIBLE);
4448 schedule();
4449 }
4450 set_restore_sigmask();
4451 return -ERESTARTNOHAND;
4452 }
4453
4454
4455
4456
4457
4458
4459
4460 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4461 {
4462 sigset_t newset;
4463
4464
4465 if (sigsetsize != sizeof(sigset_t))
4466 return -EINVAL;
4467
4468 if (copy_from_user(&newset, unewset, sizeof(newset)))
4469 return -EFAULT;
4470 return sigsuspend(&newset);
4471 }
4472
4473 #ifdef CONFIG_COMPAT
4474 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4475 {
4476 sigset_t newset;
4477
4478
4479 if (sigsetsize != sizeof(sigset_t))
4480 return -EINVAL;
4481
4482 if (get_compat_sigset(&newset, unewset))
4483 return -EFAULT;
4484 return sigsuspend(&newset);
4485 }
4486 #endif
4487
4488 #ifdef CONFIG_OLD_SIGSUSPEND
4489 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4490 {
4491 sigset_t blocked;
4492 siginitset(&blocked, mask);
4493 return sigsuspend(&blocked);
4494 }
4495 #endif
4496 #ifdef CONFIG_OLD_SIGSUSPEND3
4497 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4498 {
4499 sigset_t blocked;
4500 siginitset(&blocked, mask);
4501 return sigsuspend(&blocked);
4502 }
4503 #endif
4504
4505 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4506 {
4507 return NULL;
4508 }
4509
4510 static inline void siginfo_buildtime_checks(void)
4511 {
4512 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4513
4514
4515 #define CHECK_OFFSET(field) \
4516 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4517
4518
4519 CHECK_OFFSET(si_pid);
4520 CHECK_OFFSET(si_uid);
4521
4522
4523 CHECK_OFFSET(si_tid);
4524 CHECK_OFFSET(si_overrun);
4525 CHECK_OFFSET(si_value);
4526
4527
4528 CHECK_OFFSET(si_pid);
4529 CHECK_OFFSET(si_uid);
4530 CHECK_OFFSET(si_value);
4531
4532
4533 CHECK_OFFSET(si_pid);
4534 CHECK_OFFSET(si_uid);
4535 CHECK_OFFSET(si_status);
4536 CHECK_OFFSET(si_utime);
4537 CHECK_OFFSET(si_stime);
4538
4539
4540 CHECK_OFFSET(si_addr);
4541 CHECK_OFFSET(si_addr_lsb);
4542 CHECK_OFFSET(si_lower);
4543 CHECK_OFFSET(si_upper);
4544 CHECK_OFFSET(si_pkey);
4545
4546
4547 CHECK_OFFSET(si_band);
4548 CHECK_OFFSET(si_fd);
4549
4550
4551 CHECK_OFFSET(si_call_addr);
4552 CHECK_OFFSET(si_syscall);
4553 CHECK_OFFSET(si_arch);
4554 #undef CHECK_OFFSET
4555
4556
4557 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4558 offsetof(struct siginfo, si_addr));
4559 if (sizeof(int) == sizeof(void __user *)) {
4560 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4561 sizeof(void __user *));
4562 } else {
4563 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4564 sizeof_field(struct siginfo, si_uid)) !=
4565 sizeof(void __user *));
4566 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4567 offsetof(struct siginfo, si_uid));
4568 }
4569 #ifdef CONFIG_COMPAT
4570 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4571 offsetof(struct compat_siginfo, si_addr));
4572 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4573 sizeof(compat_uptr_t));
4574 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4575 sizeof_field(struct siginfo, si_pid));
4576 #endif
4577 }
4578
4579 void __init signals_init(void)
4580 {
4581 siginfo_buildtime_checks();
4582
4583 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
4584 }
4585
4586 #ifdef CONFIG_KGDB_KDB
4587 #include <linux/kdb.h>
4588
4589
4590
4591
4592
4593
4594 void kdb_send_sig(struct task_struct *t, int sig)
4595 {
4596 static struct task_struct *kdb_prev_t;
4597 int new_t, ret;
4598 if (!spin_trylock(&t->sighand->siglock)) {
4599 kdb_printf("Can't do kill command now.\n"
4600 "The sigmask lock is held somewhere else in "
4601 "kernel, try again later\n");
4602 return;
4603 }
4604 new_t = kdb_prev_t != t;
4605 kdb_prev_t = t;
4606 if (t->state != TASK_RUNNING && new_t) {
4607 spin_unlock(&t->sighand->siglock);
4608 kdb_printf("Process is not RUNNING, sending a signal from "
4609 "kdb risks deadlock\n"
4610 "on the run queue locks. "
4611 "The signal has _not_ been sent.\n"
4612 "Reissue the kill command if you want to risk "
4613 "the deadlock.\n");
4614 return;
4615 }
4616 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4617 spin_unlock(&t->sighand->siglock);
4618 if (ret)
4619 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4620 sig, t->pid);
4621 else
4622 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4623 }
4624 #endif