2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/config.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
20 #include <linux/tty.h>
21 #include <linux/binfmts.h>
22 #include <linux/security.h>
23 #include <linux/syscalls.h>
24 #include <linux/ptrace.h>
25 #include <linux/posix-timers.h>
26 #include <linux/signal.h>
27 #include <linux/audit.h>
28 #include <linux/capability.h>
29 #include <asm/param.h>
30 #include <asm/uaccess.h>
31 #include <asm/unistd.h>
32 #include <asm/siginfo.h>
35 * SLAB caches for signal bits.
38 static kmem_cache_t *sigqueue_cachep;
41 * In POSIX a signal is sent either to a specific thread (Linux task)
42 * or to the process as a whole (Linux thread group). How the signal
43 * is sent determines whether it's to one thread or the whole group,
44 * which determines which signal mask(s) are involved in blocking it
45 * from being delivered until later. When the signal is delivered,
46 * either it's caught or ignored by a user handler or it has a default
47 * effect that applies to the whole thread group (POSIX process).
49 * The possible effects an unblocked signal set to SIG_DFL can have are:
50 * ignore - Nothing Happens
51 * terminate - kill the process, i.e. all threads in the group,
52 * similar to exit_group. The group leader (only) reports
53 * WIFSIGNALED status to its parent.
54 * coredump - write a core dump file describing all threads using
55 * the same mm and then kill all those threads
56 * stop - stop all the threads in the group, i.e. TASK_STOPPED state
58 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
59 * Other signals when not blocked and set to SIG_DFL behaves as follows.
60 * The job control signals also have other special effects.
62 * +--------------------+------------------+
63 * | POSIX signal | default action |
64 * +--------------------+------------------+
65 * | SIGHUP | terminate |
66 * | SIGINT | terminate |
67 * | SIGQUIT | coredump |
68 * | SIGILL | coredump |
69 * | SIGTRAP | coredump |
70 * | SIGABRT/SIGIOT | coredump |
71 * | SIGBUS | coredump |
72 * | SIGFPE | coredump |
73 * | SIGKILL | terminate(+) |
74 * | SIGUSR1 | terminate |
75 * | SIGSEGV | coredump |
76 * | SIGUSR2 | terminate |
77 * | SIGPIPE | terminate |
78 * | SIGALRM | terminate |
79 * | SIGTERM | terminate |
80 * | SIGCHLD | ignore |
81 * | SIGCONT | ignore(*) |
82 * | SIGSTOP | stop(*)(+) |
83 * | SIGTSTP | stop(*) |
84 * | SIGTTIN | stop(*) |
85 * | SIGTTOU | stop(*) |
87 * | SIGXCPU | coredump |
88 * | SIGXFSZ | coredump |
89 * | SIGVTALRM | terminate |
90 * | SIGPROF | terminate |
91 * | SIGPOLL/SIGIO | terminate |
92 * | SIGSYS/SIGUNUSED | coredump |
93 * | SIGSTKFLT | terminate |
94 * | SIGWINCH | ignore |
95 * | SIGPWR | terminate |
96 * | SIGRTMIN-SIGRTMAX | terminate |
97 * +--------------------+------------------+
98 * | non-POSIX signal | default action |
99 * +--------------------+------------------+
100 * | SIGEMT | coredump |
101 * +--------------------+------------------+
103 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
104 * (*) Special job control effects:
105 * When SIGCONT is sent, it resumes the process (all threads in the group)
106 * from TASK_STOPPED state and also clears any pending/queued stop signals
107 * (any of those marked with "stop(*)"). This happens regardless of blocking,
108 * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
109 * any pending/queued SIGCONT signals; this happens regardless of blocking,
110 * catching, or ignored the stop signal, though (except for SIGSTOP) the
111 * default action of stopping the process may happen later or never.
115 #define M_SIGEMT M(SIGEMT)
120 #if SIGRTMIN > BITS_PER_LONG
121 #define M(sig) (1ULL << ((sig)-1))
123 #define M(sig) (1UL << ((sig)-1))
125 #define T(sig, mask) (M(sig) & (mask))
127 #define SIG_KERNEL_ONLY_MASK (\
128 M(SIGKILL) | M(SIGSTOP) )
130 #define SIG_KERNEL_STOP_MASK (\
131 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
133 #define SIG_KERNEL_COREDUMP_MASK (\
134 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
135 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
136 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
138 #define SIG_KERNEL_IGNORE_MASK (\
139 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) )
141 #define sig_kernel_only(sig) \
142 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
143 #define sig_kernel_coredump(sig) \
144 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
145 #define sig_kernel_ignore(sig) \
146 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK))
147 #define sig_kernel_stop(sig) \
148 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
150 #define sig_needs_tasklist(sig) \
151 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK | M(SIGCONT)))
153 #define sig_user_defined(t, signr) \
154 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
155 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
157 #define sig_fatal(t, signr) \
158 (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
159 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
161 static int sig_ignored(struct task_struct *t, int sig)
163 void __user * handler;
166 * Tracers always want to know about signals..
168 if (t->ptrace & PT_PTRACED)
172 * Blocked signals are never ignored, since the
173 * signal handler may change by the time it is
176 if (sigismember(&t->blocked, sig))
179 /* Is it explicitly or implicitly ignored? */
180 handler = t->sighand->action[sig-1].sa.sa_handler;
181 return handler == SIG_IGN ||
182 (handler == SIG_DFL && sig_kernel_ignore(sig));
186 * Re-calculate pending state from the set of locally pending
187 * signals, globally pending signals, and blocked signals.
189 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
194 switch (_NSIG_WORDS) {
196 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
197 ready |= signal->sig[i] &~ blocked->sig[i];
200 case 4: ready = signal->sig[3] &~ blocked->sig[3];
201 ready |= signal->sig[2] &~ blocked->sig[2];
202 ready |= signal->sig[1] &~ blocked->sig[1];
203 ready |= signal->sig[0] &~ blocked->sig[0];
206 case 2: ready = signal->sig[1] &~ blocked->sig[1];
207 ready |= signal->sig[0] &~ blocked->sig[0];
210 case 1: ready = signal->sig[0] &~ blocked->sig[0];
215 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
217 fastcall void recalc_sigpending_tsk(struct task_struct *t)
219 if (t->signal->group_stop_count > 0 ||
221 PENDING(&t->pending, &t->blocked) ||
222 PENDING(&t->signal->shared_pending, &t->blocked))
223 set_tsk_thread_flag(t, TIF_SIGPENDING);
225 clear_tsk_thread_flag(t, TIF_SIGPENDING);
228 void recalc_sigpending(void)
230 recalc_sigpending_tsk(current);
233 /* Given the mask, find the first available signal that should be serviced. */
236 next_signal(struct sigpending *pending, sigset_t *mask)
238 unsigned long i, *s, *m, x;
241 s = pending->signal.sig;
243 switch (_NSIG_WORDS) {
245 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
246 if ((x = *s &~ *m) != 0) {
247 sig = ffz(~x) + i*_NSIG_BPW + 1;
252 case 2: if ((x = s[0] &~ m[0]) != 0)
254 else if ((x = s[1] &~ m[1]) != 0)
261 case 1: if ((x = *s &~ *m) != 0)
269 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
272 struct sigqueue *q = NULL;
274 atomic_inc(&t->user->sigpending);
275 if (override_rlimit ||
276 atomic_read(&t->user->sigpending) <=
277 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
278 q = kmem_cache_alloc(sigqueue_cachep, flags);
279 if (unlikely(q == NULL)) {
280 atomic_dec(&t->user->sigpending);
282 INIT_LIST_HEAD(&q->list);
284 q->user = get_uid(t->user);
289 static void __sigqueue_free(struct sigqueue *q)
291 if (q->flags & SIGQUEUE_PREALLOC)
293 atomic_dec(&q->user->sigpending);
295 kmem_cache_free(sigqueue_cachep, q);
298 static void flush_sigqueue(struct sigpending *queue)
302 sigemptyset(&queue->signal);
303 while (!list_empty(&queue->list)) {
304 q = list_entry(queue->list.next, struct sigqueue , list);
305 list_del_init(&q->list);
311 * Flush all pending signals for a task.
313 void flush_signals(struct task_struct *t)
317 spin_lock_irqsave(&t->sighand->siglock, flags);
318 clear_tsk_thread_flag(t,TIF_SIGPENDING);
319 flush_sigqueue(&t->pending);
320 flush_sigqueue(&t->signal->shared_pending);
321 spin_unlock_irqrestore(&t->sighand->siglock, flags);
325 * This function expects the tasklist_lock write-locked.
327 void __exit_signal(struct task_struct *tsk)
329 struct signal_struct *sig = tsk->signal;
330 struct sighand_struct *sighand;
333 BUG_ON(!atomic_read(&sig->count));
336 sighand = rcu_dereference(tsk->sighand);
337 spin_lock(&sighand->siglock);
339 posix_cpu_timers_exit(tsk);
340 if (atomic_dec_and_test(&sig->count))
341 posix_cpu_timers_exit_group(tsk);
344 * If there is any task waiting for the group exit
347 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
348 wake_up_process(sig->group_exit_task);
349 sig->group_exit_task = NULL;
351 if (tsk == sig->curr_target)
352 sig->curr_target = next_thread(tsk);
354 * Accumulate here the counters for all threads but the
355 * group leader as they die, so they can be added into
356 * the process-wide totals when those are taken.
357 * The group leader stays around as a zombie as long
358 * as there are other threads. When it gets reaped,
359 * the exit.c code will add its counts into these totals.
360 * We won't ever get here for the group leader, since it
361 * will have been the last reference on the signal_struct.
363 sig->utime = cputime_add(sig->utime, tsk->utime);
364 sig->stime = cputime_add(sig->stime, tsk->stime);
365 sig->min_flt += tsk->min_flt;
366 sig->maj_flt += tsk->maj_flt;
367 sig->nvcsw += tsk->nvcsw;
368 sig->nivcsw += tsk->nivcsw;
369 sig->sched_time += tsk->sched_time;
370 sig = NULL; /* Marker for below. */
374 cleanup_sighand(tsk);
375 spin_unlock(&sighand->siglock);
378 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
379 flush_sigqueue(&tsk->pending);
381 flush_sigqueue(&sig->shared_pending);
382 __cleanup_signal(sig);
387 * Flush all handlers for a task.
391 flush_signal_handlers(struct task_struct *t, int force_default)
394 struct k_sigaction *ka = &t->sighand->action[0];
395 for (i = _NSIG ; i != 0 ; i--) {
396 if (force_default || ka->sa.sa_handler != SIG_IGN)
397 ka->sa.sa_handler = SIG_DFL;
399 sigemptyset(&ka->sa.sa_mask);
405 /* Notify the system that a driver wants to block all signals for this
406 * process, and wants to be notified if any signals at all were to be
407 * sent/acted upon. If the notifier routine returns non-zero, then the
408 * signal will be acted upon after all. If the notifier routine returns 0,
409 * then then signal will be blocked. Only one block per process is
410 * allowed. priv is a pointer to private data that the notifier routine
411 * can use to determine if the signal should be blocked or not. */
414 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
418 spin_lock_irqsave(¤t->sighand->siglock, flags);
419 current->notifier_mask = mask;
420 current->notifier_data = priv;
421 current->notifier = notifier;
422 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
425 /* Notify the system that blocking has ended. */
428 unblock_all_signals(void)
432 spin_lock_irqsave(¤t->sighand->siglock, flags);
433 current->notifier = NULL;
434 current->notifier_data = NULL;
436 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
439 static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
441 struct sigqueue *q, *first = NULL;
442 int still_pending = 0;
444 if (unlikely(!sigismember(&list->signal, sig)))
448 * Collect the siginfo appropriate to this signal. Check if
449 * there is another siginfo for the same signal.
451 list_for_each_entry(q, &list->list, list) {
452 if (q->info.si_signo == sig) {
461 list_del_init(&first->list);
462 copy_siginfo(info, &first->info);
463 __sigqueue_free(first);
465 sigdelset(&list->signal, sig);
468 /* Ok, it wasn't in the queue. This must be
469 a fast-pathed signal or we must have been
470 out of queue space. So zero out the info.
472 sigdelset(&list->signal, sig);
473 info->si_signo = sig;
482 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
487 sig = next_signal(pending, mask);
489 if (current->notifier) {
490 if (sigismember(current->notifier_mask, sig)) {
491 if (!(current->notifier)(current->notifier_data)) {
492 clear_thread_flag(TIF_SIGPENDING);
498 if (!collect_signal(sig, pending, info))
508 * Dequeue a signal and return the element to the caller, which is
509 * expected to free it.
511 * All callers have to hold the siglock.
513 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
515 int signr = __dequeue_signal(&tsk->pending, mask, info);
517 signr = __dequeue_signal(&tsk->signal->shared_pending,
519 if (signr && unlikely(sig_kernel_stop(signr))) {
521 * Set a marker that we have dequeued a stop signal. Our
522 * caller might release the siglock and then the pending
523 * stop signal it is about to process is no longer in the
524 * pending bitmasks, but must still be cleared by a SIGCONT
525 * (and overruled by a SIGKILL). So those cases clear this
526 * shared flag after we've set it. Note that this flag may
527 * remain set after the signal we return is ignored or
528 * handled. That doesn't matter because its only purpose
529 * is to alert stop-signal processing code when another
530 * processor has come along and cleared the flag.
532 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
533 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
536 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
537 info->si_sys_private){
539 * Release the siglock to ensure proper locking order
540 * of timer locks outside of siglocks. Note, we leave
541 * irqs disabled here, since the posix-timers code is
542 * about to disable them again anyway.
544 spin_unlock(&tsk->sighand->siglock);
545 do_schedule_next_timer(info);
546 spin_lock(&tsk->sighand->siglock);
552 * Tell a process that it has a new active signal..
554 * NOTE! we rely on the previous spin_lock to
555 * lock interrupts for us! We can only be called with
556 * "siglock" held, and the local interrupt must
557 * have been disabled when that got acquired!
559 * No need to set need_resched since signal event passing
560 * goes through ->blocked
562 void signal_wake_up(struct task_struct *t, int resume)
566 set_tsk_thread_flag(t, TIF_SIGPENDING);
569 * For SIGKILL, we want to wake it up in the stopped/traced case.
570 * We don't check t->state here because there is a race with it
571 * executing another processor and just now entering stopped state.
572 * By using wake_up_state, we ensure the process will wake up and
573 * handle its death signal.
575 mask = TASK_INTERRUPTIBLE;
577 mask |= TASK_STOPPED | TASK_TRACED;
578 if (!wake_up_state(t, mask))
583 * Remove signals in mask from the pending set and queue.
584 * Returns 1 if any signals were found.
586 * All callers must be holding the siglock.
588 * This version takes a sigset mask and looks at all signals,
589 * not just those in the first mask word.
591 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
593 struct sigqueue *q, *n;
596 sigandsets(&m, mask, &s->signal);
597 if (sigisemptyset(&m))
600 signandsets(&s->signal, &s->signal, mask);
601 list_for_each_entry_safe(q, n, &s->list, list) {
602 if (sigismember(mask, q->info.si_signo)) {
603 list_del_init(&q->list);
610 * Remove signals in mask from the pending set and queue.
611 * Returns 1 if any signals were found.
613 * All callers must be holding the siglock.
615 static int rm_from_queue(unsigned long mask, struct sigpending *s)
617 struct sigqueue *q, *n;
619 if (!sigtestsetmask(&s->signal, mask))
622 sigdelsetmask(&s->signal, mask);
623 list_for_each_entry_safe(q, n, &s->list, list) {
624 if (q->info.si_signo < SIGRTMIN &&
625 (mask & sigmask(q->info.si_signo))) {
626 list_del_init(&q->list);
634 * Bad permissions for sending the signal
636 static int check_kill_permission(int sig, struct siginfo *info,
637 struct task_struct *t)
640 if (!valid_signal(sig))
643 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
644 && ((sig != SIGCONT) ||
645 (current->signal->session != t->signal->session))
646 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
647 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
648 && !capable(CAP_KILL))
651 error = security_task_kill(t, info, sig);
653 audit_signal_info(sig, t); /* Let audit system see the signal */
658 static void do_notify_parent_cldstop(struct task_struct *tsk,
663 * Handle magic process-wide effects of stop/continue signals.
664 * Unlike the signal actions, these happen immediately at signal-generation
665 * time regardless of blocking, ignoring, or handling. This does the
666 * actual continuing for SIGCONT, but not the actual stopping for stop
667 * signals. The process stop is done as a signal action for SIG_DFL.
669 static void handle_stop_signal(int sig, struct task_struct *p)
671 struct task_struct *t;
673 if (p->signal->flags & SIGNAL_GROUP_EXIT)
675 * The process is in the middle of dying already.
679 if (sig_kernel_stop(sig)) {
681 * This is a stop signal. Remove SIGCONT from all queues.
683 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
686 rm_from_queue(sigmask(SIGCONT), &t->pending);
689 } else if (sig == SIGCONT) {
691 * Remove all stop signals from all queues,
692 * and wake all threads.
694 if (unlikely(p->signal->group_stop_count > 0)) {
696 * There was a group stop in progress. We'll
697 * pretend it finished before we got here. We are
698 * obliged to report it to the parent: if the
699 * SIGSTOP happened "after" this SIGCONT, then it
700 * would have cleared this pending SIGCONT. If it
701 * happened "before" this SIGCONT, then the parent
702 * got the SIGCHLD about the stop finishing before
703 * the continue happened. We do the notification
704 * now, and it's as if the stop had finished and
705 * the SIGCHLD was pending on entry to this kill.
707 p->signal->group_stop_count = 0;
708 p->signal->flags = SIGNAL_STOP_CONTINUED;
709 spin_unlock(&p->sighand->siglock);
710 do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_STOPPED);
711 spin_lock(&p->sighand->siglock);
713 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
717 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
720 * If there is a handler for SIGCONT, we must make
721 * sure that no thread returns to user mode before
722 * we post the signal, in case it was the only
723 * thread eligible to run the signal handler--then
724 * it must not do anything between resuming and
725 * running the handler. With the TIF_SIGPENDING
726 * flag set, the thread will pause and acquire the
727 * siglock that we hold now and until we've queued
728 * the pending signal.
730 * Wake up the stopped thread _after_ setting
733 state = TASK_STOPPED;
734 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
735 set_tsk_thread_flag(t, TIF_SIGPENDING);
736 state |= TASK_INTERRUPTIBLE;
738 wake_up_state(t, state);
743 if (p->signal->flags & SIGNAL_STOP_STOPPED) {
745 * We were in fact stopped, and are now continued.
746 * Notify the parent with CLD_CONTINUED.
748 p->signal->flags = SIGNAL_STOP_CONTINUED;
749 p->signal->group_exit_code = 0;
750 spin_unlock(&p->sighand->siglock);
751 do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_CONTINUED);
752 spin_lock(&p->sighand->siglock);
755 * We are not stopped, but there could be a stop
756 * signal in the middle of being processed after
757 * being removed from the queue. Clear that too.
759 p->signal->flags = 0;
761 } else if (sig == SIGKILL) {
763 * Make sure that any pending stop signal already dequeued
764 * is undone by the wakeup for SIGKILL.
766 p->signal->flags = 0;
770 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
771 struct sigpending *signals)
773 struct sigqueue * q = NULL;
777 * fast-pathed signals for kernel-internal things like SIGSTOP
780 if (info == SEND_SIG_FORCED)
783 /* Real-time signals must be queued if sent by sigqueue, or
784 some other real-time mechanism. It is implementation
785 defined whether kill() does so. We attempt to do so, on
786 the principle of least surprise, but since kill is not
787 allowed to fail with EAGAIN when low on memory we just
788 make sure at least one signal gets delivered and don't
789 pass on the info struct. */
791 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
792 (is_si_special(info) ||
793 info->si_code >= 0)));
795 list_add_tail(&q->list, &signals->list);
796 switch ((unsigned long) info) {
797 case (unsigned long) SEND_SIG_NOINFO:
798 q->info.si_signo = sig;
799 q->info.si_errno = 0;
800 q->info.si_code = SI_USER;
801 q->info.si_pid = current->pid;
802 q->info.si_uid = current->uid;
804 case (unsigned long) SEND_SIG_PRIV:
805 q->info.si_signo = sig;
806 q->info.si_errno = 0;
807 q->info.si_code = SI_KERNEL;
812 copy_siginfo(&q->info, info);
815 } else if (!is_si_special(info)) {
816 if (sig >= SIGRTMIN && info->si_code != SI_USER)
818 * Queue overflow, abort. We may abort if the signal was rt
819 * and sent by user using something other than kill().
825 sigaddset(&signals->signal, sig);
829 #define LEGACY_QUEUE(sigptr, sig) \
830 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
834 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
838 if (!irqs_disabled())
840 assert_spin_locked(&t->sighand->siglock);
842 /* Short-circuit ignored signals. */
843 if (sig_ignored(t, sig))
846 /* Support queueing exactly one non-rt signal, so that we
847 can get more detailed information about the cause of
849 if (LEGACY_QUEUE(&t->pending, sig))
852 ret = send_signal(sig, info, t, &t->pending);
853 if (!ret && !sigismember(&t->blocked, sig))
854 signal_wake_up(t, sig == SIGKILL);
860 * Force a signal that the process can't ignore: if necessary
861 * we unblock the signal and change any SIG_IGN to SIG_DFL.
865 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
867 unsigned long int flags;
870 spin_lock_irqsave(&t->sighand->siglock, flags);
871 if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
872 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
874 if (sigismember(&t->blocked, sig)) {
875 sigdelset(&t->blocked, sig);
877 recalc_sigpending_tsk(t);
878 ret = specific_send_sig_info(sig, info, t);
879 spin_unlock_irqrestore(&t->sighand->siglock, flags);
885 force_sig_specific(int sig, struct task_struct *t)
887 force_sig_info(sig, SEND_SIG_FORCED, t);
891 * Test if P wants to take SIG. After we've checked all threads with this,
892 * it's equivalent to finding no threads not blocking SIG. Any threads not
893 * blocking SIG were ruled out because they are not running and already
894 * have pending signals. Such threads will dequeue from the shared queue
895 * as soon as they're available, so putting the signal on the shared queue
896 * will be equivalent to sending it to one such thread.
898 static inline int wants_signal(int sig, struct task_struct *p)
900 if (sigismember(&p->blocked, sig))
902 if (p->flags & PF_EXITING)
906 if (p->state & (TASK_STOPPED | TASK_TRACED))
908 return task_curr(p) || !signal_pending(p);
912 __group_complete_signal(int sig, struct task_struct *p)
914 struct task_struct *t;
917 * Now find a thread we can wake up to take the signal off the queue.
919 * If the main thread wants the signal, it gets first crack.
920 * Probably the least surprising to the average bear.
922 if (wants_signal(sig, p))
924 else if (thread_group_empty(p))
926 * There is just one thread and it does not need to be woken.
927 * It will dequeue unblocked signals before it runs again.
932 * Otherwise try to find a suitable thread.
934 t = p->signal->curr_target;
936 /* restart balancing at this thread */
937 t = p->signal->curr_target = p;
938 BUG_ON(t->tgid != p->tgid);
940 while (!wants_signal(sig, t)) {
942 if (t == p->signal->curr_target)
944 * No thread needs to be woken.
945 * Any eligible threads will see
946 * the signal in the queue soon.
950 p->signal->curr_target = t;
954 * Found a killable thread. If the signal will be fatal,
955 * then start taking the whole group down immediately.
957 if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
958 !sigismember(&t->real_blocked, sig) &&
959 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
961 * This signal will be fatal to the whole group.
963 if (!sig_kernel_coredump(sig)) {
965 * Start a group exit and wake everybody up.
966 * This way we don't have other threads
967 * running and doing things after a slower
968 * thread has the fatal signal pending.
970 p->signal->flags = SIGNAL_GROUP_EXIT;
971 p->signal->group_exit_code = sig;
972 p->signal->group_stop_count = 0;
975 sigaddset(&t->pending.signal, SIGKILL);
976 signal_wake_up(t, 1);
983 * There will be a core dump. We make all threads other
984 * than the chosen one go into a group stop so that nothing
985 * happens until it gets scheduled, takes the signal off
986 * the shared queue, and does the core dump. This is a
987 * little more complicated than strictly necessary, but it
988 * keeps the signal state that winds up in the core dump
989 * unchanged from the death state, e.g. which thread had
990 * the core-dump signal unblocked.
992 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
993 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
994 p->signal->group_stop_count = 0;
995 p->signal->group_exit_task = t;
998 p->signal->group_stop_count++;
999 signal_wake_up(t, 0);
1002 wake_up_process(p->signal->group_exit_task);
1007 * The signal is already in the shared-pending queue.
1008 * Tell the chosen thread to wake up and dequeue it.
1010 signal_wake_up(t, sig == SIGKILL);
1015 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1019 assert_spin_locked(&p->sighand->siglock);
1020 handle_stop_signal(sig, p);
1022 /* Short-circuit ignored signals. */
1023 if (sig_ignored(p, sig))
1026 if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
1027 /* This is a non-RT signal and we already have one queued. */
1031 * Put this signal on the shared-pending queue, or fail with EAGAIN.
1032 * We always use the shared queue for process-wide signals,
1033 * to avoid several races.
1035 ret = send_signal(sig, info, p, &p->signal->shared_pending);
1039 __group_complete_signal(sig, p);
1044 * Nuke all other threads in the group.
1046 void zap_other_threads(struct task_struct *p)
1048 struct task_struct *t;
1050 p->signal->flags = SIGNAL_GROUP_EXIT;
1051 p->signal->group_stop_count = 0;
1053 if (thread_group_empty(p))
1056 for (t = next_thread(p); t != p; t = next_thread(t)) {
1058 * Don't bother with already dead threads
1064 * We don't want to notify the parent, since we are
1065 * killed as part of a thread group due to another
1066 * thread doing an execve() or similar. So set the
1067 * exit signal to -1 to allow immediate reaping of
1068 * the process. But don't detach the thread group
1071 if (t != p->group_leader)
1072 t->exit_signal = -1;
1074 /* SIGKILL will be handled before any pending SIGSTOP */
1075 sigaddset(&t->pending.signal, SIGKILL);
1076 signal_wake_up(t, 1);
1081 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
1083 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
1085 struct sighand_struct *sighand;
1088 sighand = rcu_dereference(tsk->sighand);
1089 if (unlikely(sighand == NULL))
1092 spin_lock_irqsave(&sighand->siglock, *flags);
1093 if (likely(sighand == tsk->sighand))
1095 spin_unlock_irqrestore(&sighand->siglock, *flags);
1101 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1103 unsigned long flags;
1106 ret = check_kill_permission(sig, info, p);
1110 if (lock_task_sighand(p, &flags)) {
1111 ret = __group_send_sig_info(sig, info, p);
1112 unlock_task_sighand(p, &flags);
1120 * kill_pg_info() sends a signal to a process group: this is what the tty
1121 * control characters do (^C, ^Z etc)
1124 int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1126 struct task_struct *p = NULL;
1127 int retval, success;
1134 do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
1135 int err = group_send_sig_info(sig, info, p);
1138 } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
1139 return success ? 0 : retval;
1143 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1147 read_lock(&tasklist_lock);
1148 retval = __kill_pg_info(sig, info, pgrp);
1149 read_unlock(&tasklist_lock);
1155 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1158 int acquired_tasklist_lock = 0;
1159 struct task_struct *p;
1162 if (unlikely(sig_needs_tasklist(sig))) {
1163 read_lock(&tasklist_lock);
1164 acquired_tasklist_lock = 1;
1166 p = find_task_by_pid(pid);
1169 error = group_send_sig_info(sig, info, p);
1170 if (unlikely(acquired_tasklist_lock))
1171 read_unlock(&tasklist_lock);
1176 /* like kill_proc_info(), but doesn't use uid/euid of "current" */
1177 int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid,
1178 uid_t uid, uid_t euid)
1181 struct task_struct *p;
1183 if (!valid_signal(sig))
1186 read_lock(&tasklist_lock);
1187 p = find_task_by_pid(pid);
1192 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1193 && (euid != p->suid) && (euid != p->uid)
1194 && (uid != p->suid) && (uid != p->uid)) {
1198 if (sig && p->sighand) {
1199 unsigned long flags;
1200 spin_lock_irqsave(&p->sighand->siglock, flags);
1201 ret = __group_send_sig_info(sig, info, p);
1202 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1205 read_unlock(&tasklist_lock);
1208 EXPORT_SYMBOL_GPL(kill_proc_info_as_uid);
1211 * kill_something_info() interprets pid in interesting ways just like kill(2).
1213 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1214 * is probably wrong. Should make it like BSD or SYSV.
1217 static int kill_something_info(int sig, struct siginfo *info, int pid)
1220 return kill_pg_info(sig, info, process_group(current));
1221 } else if (pid == -1) {
1222 int retval = 0, count = 0;
1223 struct task_struct * p;
1225 read_lock(&tasklist_lock);
1226 for_each_process(p) {
1227 if (p->pid > 1 && p->tgid != current->tgid) {
1228 int err = group_send_sig_info(sig, info, p);
1234 read_unlock(&tasklist_lock);
1235 return count ? retval : -ESRCH;
1236 } else if (pid < 0) {
1237 return kill_pg_info(sig, info, -pid);
1239 return kill_proc_info(sig, info, pid);
1244 * These are for backward compatibility with the rest of the kernel source.
1248 * These two are the most common entry points. They send a signal
1249 * just to the specific thread.
1252 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1255 unsigned long flags;
1258 * Make sure legacy kernel users don't send in bad values
1259 * (normal paths check this in check_kill_permission).
1261 if (!valid_signal(sig))
1265 * We need the tasklist lock even for the specific
1266 * thread case (when we don't need to follow the group
1267 * lists) in order to avoid races with "p->sighand"
1268 * going away or changing from under us.
1270 read_lock(&tasklist_lock);
1271 spin_lock_irqsave(&p->sighand->siglock, flags);
1272 ret = specific_send_sig_info(sig, info, p);
1273 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1274 read_unlock(&tasklist_lock);
1278 #define __si_special(priv) \
1279 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1282 send_sig(int sig, struct task_struct *p, int priv)
1284 return send_sig_info(sig, __si_special(priv), p);
1288 * This is the entry point for "process-wide" signals.
1289 * They will go to an appropriate thread in the thread group.
1292 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1295 read_lock(&tasklist_lock);
1296 ret = group_send_sig_info(sig, info, p);
1297 read_unlock(&tasklist_lock);
1302 force_sig(int sig, struct task_struct *p)
1304 force_sig_info(sig, SEND_SIG_PRIV, p);
1308 * When things go south during signal handling, we
1309 * will force a SIGSEGV. And if the signal that caused
1310 * the problem was already a SIGSEGV, we'll want to
1311 * make sure we don't even try to deliver the signal..
1314 force_sigsegv(int sig, struct task_struct *p)
1316 if (sig == SIGSEGV) {
1317 unsigned long flags;
1318 spin_lock_irqsave(&p->sighand->siglock, flags);
1319 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1320 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1322 force_sig(SIGSEGV, p);
1327 kill_pg(pid_t pgrp, int sig, int priv)
1329 return kill_pg_info(sig, __si_special(priv), pgrp);
1333 kill_proc(pid_t pid, int sig, int priv)
1335 return kill_proc_info(sig, __si_special(priv), pid);
1339 * These functions support sending signals using preallocated sigqueue
1340 * structures. This is needed "because realtime applications cannot
1341 * afford to lose notifications of asynchronous events, like timer
1342 * expirations or I/O completions". In the case of Posix Timers
1343 * we allocate the sigqueue structure from the timer_create. If this
1344 * allocation fails we are able to report the failure to the application
1345 * with an EAGAIN error.
1348 struct sigqueue *sigqueue_alloc(void)
1352 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1353 q->flags |= SIGQUEUE_PREALLOC;
1357 void sigqueue_free(struct sigqueue *q)
1359 unsigned long flags;
1360 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1362 * If the signal is still pending remove it from the
1365 if (unlikely(!list_empty(&q->list))) {
1366 spinlock_t *lock = ¤t->sighand->siglock;
1367 read_lock(&tasklist_lock);
1368 spin_lock_irqsave(lock, flags);
1369 if (!list_empty(&q->list))
1370 list_del_init(&q->list);
1371 spin_unlock_irqrestore(lock, flags);
1372 read_unlock(&tasklist_lock);
1374 q->flags &= ~SIGQUEUE_PREALLOC;
1379 send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1381 unsigned long flags;
1383 struct sighand_struct *sh;
1385 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1388 * The rcu based delayed sighand destroy makes it possible to
1389 * run this without tasklist lock held. The task struct itself
1390 * cannot go away as create_timer did get_task_struct().
1392 * We return -1, when the task is marked exiting, so
1393 * posix_timer_event can redirect it to the group leader
1397 if (unlikely(p->flags & PF_EXITING)) {
1403 sh = rcu_dereference(p->sighand);
1405 spin_lock_irqsave(&sh->siglock, flags);
1406 if (p->sighand != sh) {
1407 /* We raced with exec() in a multithreaded process... */
1408 spin_unlock_irqrestore(&sh->siglock, flags);
1413 * We do the check here again to handle the following scenario:
1418 * interrupt exit code running
1420 * lock sighand->siglock
1421 * unlock sighand->siglock
1423 * add(tsk->pending) flush_sigqueue(tsk->pending)
1427 if (unlikely(p->flags & PF_EXITING)) {
1432 if (unlikely(!list_empty(&q->list))) {
1434 * If an SI_TIMER entry is already queue just increment
1435 * the overrun count.
1437 if (q->info.si_code != SI_TIMER)
1439 q->info.si_overrun++;
1442 /* Short-circuit ignored signals. */
1443 if (sig_ignored(p, sig)) {
1448 list_add_tail(&q->list, &p->pending.list);
1449 sigaddset(&p->pending.signal, sig);
1450 if (!sigismember(&p->blocked, sig))
1451 signal_wake_up(p, sig == SIGKILL);
1454 spin_unlock_irqrestore(&sh->siglock, flags);
1462 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1464 unsigned long flags;
1467 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1469 read_lock(&tasklist_lock);
1470 /* Since it_lock is held, p->sighand cannot be NULL. */
1471 spin_lock_irqsave(&p->sighand->siglock, flags);
1472 handle_stop_signal(sig, p);
1474 /* Short-circuit ignored signals. */
1475 if (sig_ignored(p, sig)) {
1480 if (unlikely(!list_empty(&q->list))) {
1482 * If an SI_TIMER entry is already queue just increment
1483 * the overrun count. Other uses should not try to
1484 * send the signal multiple times.
1486 if (q->info.si_code != SI_TIMER)
1488 q->info.si_overrun++;
1493 * Put this signal on the shared-pending queue.
1494 * We always use the shared queue for process-wide signals,
1495 * to avoid several races.
1497 list_add_tail(&q->list, &p->signal->shared_pending.list);
1498 sigaddset(&p->signal->shared_pending.signal, sig);
1500 __group_complete_signal(sig, p);
1502 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1503 read_unlock(&tasklist_lock);
1508 * Wake up any threads in the parent blocked in wait* syscalls.
1510 static inline void __wake_up_parent(struct task_struct *p,
1511 struct task_struct *parent)
1513 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1517 * Let a parent know about the death of a child.
1518 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1521 void do_notify_parent(struct task_struct *tsk, int sig)
1523 struct siginfo info;
1524 unsigned long flags;
1525 struct sighand_struct *psig;
1529 /* do_notify_parent_cldstop should have been called instead. */
1530 BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1532 BUG_ON(!tsk->ptrace &&
1533 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1535 info.si_signo = sig;
1537 info.si_pid = tsk->pid;
1538 info.si_uid = tsk->uid;
1540 /* FIXME: find out whether or not this is supposed to be c*time. */
1541 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1542 tsk->signal->utime));
1543 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1544 tsk->signal->stime));
1546 info.si_status = tsk->exit_code & 0x7f;
1547 if (tsk->exit_code & 0x80)
1548 info.si_code = CLD_DUMPED;
1549 else if (tsk->exit_code & 0x7f)
1550 info.si_code = CLD_KILLED;
1552 info.si_code = CLD_EXITED;
1553 info.si_status = tsk->exit_code >> 8;
1556 psig = tsk->parent->sighand;
1557 spin_lock_irqsave(&psig->siglock, flags);
1558 if (!tsk->ptrace && sig == SIGCHLD &&
1559 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1560 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1562 * We are exiting and our parent doesn't care. POSIX.1
1563 * defines special semantics for setting SIGCHLD to SIG_IGN
1564 * or setting the SA_NOCLDWAIT flag: we should be reaped
1565 * automatically and not left for our parent's wait4 call.
1566 * Rather than having the parent do it as a magic kind of
1567 * signal handler, we just set this to tell do_exit that we
1568 * can be cleaned up without becoming a zombie. Note that
1569 * we still call __wake_up_parent in this case, because a
1570 * blocked sys_wait4 might now return -ECHILD.
1572 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1573 * is implementation-defined: we do (if you don't want
1574 * it, just use SIG_IGN instead).
1576 tsk->exit_signal = -1;
1577 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1580 if (valid_signal(sig) && sig > 0)
1581 __group_send_sig_info(sig, &info, tsk->parent);
1582 __wake_up_parent(tsk, tsk->parent);
1583 spin_unlock_irqrestore(&psig->siglock, flags);
1586 static void do_notify_parent_cldstop(struct task_struct *tsk, int to_self, int why)
1588 struct siginfo info;
1589 unsigned long flags;
1590 struct task_struct *parent;
1591 struct sighand_struct *sighand;
1594 parent = tsk->parent;
1596 tsk = tsk->group_leader;
1597 parent = tsk->real_parent;
1600 info.si_signo = SIGCHLD;
1602 info.si_pid = tsk->pid;
1603 info.si_uid = tsk->uid;
1605 /* FIXME: find out whether or not this is supposed to be c*time. */
1606 info.si_utime = cputime_to_jiffies(tsk->utime);
1607 info.si_stime = cputime_to_jiffies(tsk->stime);
1612 info.si_status = SIGCONT;
1615 info.si_status = tsk->signal->group_exit_code & 0x7f;
1618 info.si_status = tsk->exit_code & 0x7f;
1624 sighand = parent->sighand;
1625 spin_lock_irqsave(&sighand->siglock, flags);
1626 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1627 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1628 __group_send_sig_info(SIGCHLD, &info, parent);
1630 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1632 __wake_up_parent(tsk, parent);
1633 spin_unlock_irqrestore(&sighand->siglock, flags);
1637 * This must be called with current->sighand->siglock held.
1639 * This should be the path for all ptrace stops.
1640 * We always set current->last_siginfo while stopped here.
1641 * That makes it a way to test a stopped process for
1642 * being ptrace-stopped vs being job-control-stopped.
1644 * If we actually decide not to stop at all because the tracer is gone,
1645 * we leave nostop_code in current->exit_code.
1647 static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1650 * If there is a group stop in progress,
1651 * we must participate in the bookkeeping.
1653 if (current->signal->group_stop_count > 0)
1654 --current->signal->group_stop_count;
1656 current->last_siginfo = info;
1657 current->exit_code = exit_code;
1659 /* Let the debugger run. */
1660 set_current_state(TASK_TRACED);
1661 spin_unlock_irq(¤t->sighand->siglock);
1662 read_lock(&tasklist_lock);
1663 if (likely(current->ptrace & PT_PTRACED) &&
1664 likely(current->parent != current->real_parent ||
1665 !(current->ptrace & PT_ATTACHED)) &&
1666 (likely(current->parent->signal != current->signal) ||
1667 !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
1668 do_notify_parent_cldstop(current, 1, CLD_TRAPPED);
1669 read_unlock(&tasklist_lock);
1673 * By the time we got the lock, our tracer went away.
1676 read_unlock(&tasklist_lock);
1677 set_current_state(TASK_RUNNING);
1678 current->exit_code = nostop_code;
1682 * We are back. Now reacquire the siglock before touching
1683 * last_siginfo, so that we are sure to have synchronized with
1684 * any signal-sending on another CPU that wants to examine it.
1686 spin_lock_irq(¤t->sighand->siglock);
1687 current->last_siginfo = NULL;
1690 * Queued signals ignored us while we were stopped for tracing.
1691 * So check for any that we should take before resuming user mode.
1693 recalc_sigpending();
1696 void ptrace_notify(int exit_code)
1700 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1702 memset(&info, 0, sizeof info);
1703 info.si_signo = SIGTRAP;
1704 info.si_code = exit_code;
1705 info.si_pid = current->pid;
1706 info.si_uid = current->uid;
1708 /* Let the debugger run. */
1709 spin_lock_irq(¤t->sighand->siglock);
1710 ptrace_stop(exit_code, 0, &info);
1711 spin_unlock_irq(¤t->sighand->siglock);
1715 finish_stop(int stop_count)
1720 * If there are no other threads in the group, or if there is
1721 * a group stop in progress and we are the last to stop,
1722 * report to the parent. When ptraced, every thread reports itself.
1724 if (stop_count < 0 || (current->ptrace & PT_PTRACED))
1726 else if (stop_count == 0)
1731 read_lock(&tasklist_lock);
1732 do_notify_parent_cldstop(current, to_self, CLD_STOPPED);
1733 read_unlock(&tasklist_lock);
1738 * Now we don't run again until continued.
1740 current->exit_code = 0;
1744 * This performs the stopping for SIGSTOP and other stop signals.
1745 * We have to stop all threads in the thread group.
1746 * Returns nonzero if we've actually stopped and released the siglock.
1747 * Returns zero if we didn't stop and still hold the siglock.
1750 do_signal_stop(int signr)
1752 struct signal_struct *sig = current->signal;
1753 struct sighand_struct *sighand = current->sighand;
1754 int stop_count = -1;
1756 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1759 if (sig->group_stop_count > 0) {
1761 * There is a group stop in progress. We don't need to
1762 * start another one.
1764 signr = sig->group_exit_code;
1765 stop_count = --sig->group_stop_count;
1766 current->exit_code = signr;
1767 set_current_state(TASK_STOPPED);
1768 if (stop_count == 0)
1769 sig->flags = SIGNAL_STOP_STOPPED;
1770 spin_unlock_irq(&sighand->siglock);
1772 else if (thread_group_empty(current)) {
1774 * Lock must be held through transition to stopped state.
1776 current->exit_code = current->signal->group_exit_code = signr;
1777 set_current_state(TASK_STOPPED);
1778 sig->flags = SIGNAL_STOP_STOPPED;
1779 spin_unlock_irq(&sighand->siglock);
1783 * There is no group stop already in progress.
1784 * We must initiate one now, but that requires
1785 * dropping siglock to get both the tasklist lock
1786 * and siglock again in the proper order. Note that
1787 * this allows an intervening SIGCONT to be posted.
1788 * We need to check for that and bail out if necessary.
1790 struct task_struct *t;
1792 spin_unlock_irq(&sighand->siglock);
1794 /* signals can be posted during this window */
1796 read_lock(&tasklist_lock);
1797 spin_lock_irq(&sighand->siglock);
1799 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) {
1801 * Another stop or continue happened while we
1802 * didn't have the lock. We can just swallow this
1803 * signal now. If we raced with a SIGCONT, that
1804 * should have just cleared it now. If we raced
1805 * with another processor delivering a stop signal,
1806 * then the SIGCONT that wakes us up should clear it.
1808 read_unlock(&tasklist_lock);
1812 if (sig->group_stop_count == 0) {
1813 sig->group_exit_code = signr;
1815 for (t = next_thread(current); t != current;
1818 * Setting state to TASK_STOPPED for a group
1819 * stop is always done with the siglock held,
1820 * so this check has no races.
1822 if (!t->exit_state &&
1823 !(t->state & (TASK_STOPPED|TASK_TRACED))) {
1825 signal_wake_up(t, 0);
1827 sig->group_stop_count = stop_count;
1830 /* A race with another thread while unlocked. */
1831 signr = sig->group_exit_code;
1832 stop_count = --sig->group_stop_count;
1835 current->exit_code = signr;
1836 set_current_state(TASK_STOPPED);
1837 if (stop_count == 0)
1838 sig->flags = SIGNAL_STOP_STOPPED;
1840 spin_unlock_irq(&sighand->siglock);
1841 read_unlock(&tasklist_lock);
1844 finish_stop(stop_count);
1849 * Do appropriate magic when group_stop_count > 0.
1850 * We return nonzero if we stopped, after releasing the siglock.
1851 * We return zero if we still hold the siglock and should look
1852 * for another signal without checking group_stop_count again.
1854 static int handle_group_stop(void)
1858 if (current->signal->group_exit_task == current) {
1860 * Group stop is so we can do a core dump,
1861 * We are the initiating thread, so get on with it.
1863 current->signal->group_exit_task = NULL;
1867 if (current->signal->flags & SIGNAL_GROUP_EXIT)
1869 * Group stop is so another thread can do a core dump,
1870 * or else we are racing against a death signal.
1871 * Just punt the stop so we can get the next signal.
1876 * There is a group stop in progress. We stop
1877 * without any associated signal being in our queue.
1879 stop_count = --current->signal->group_stop_count;
1880 if (stop_count == 0)
1881 current->signal->flags = SIGNAL_STOP_STOPPED;
1882 current->exit_code = current->signal->group_exit_code;
1883 set_current_state(TASK_STOPPED);
1884 spin_unlock_irq(¤t->sighand->siglock);
1885 finish_stop(stop_count);
1889 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1890 struct pt_regs *regs, void *cookie)
1892 sigset_t *mask = ¤t->blocked;
1898 spin_lock_irq(¤t->sighand->siglock);
1900 struct k_sigaction *ka;
1902 if (unlikely(current->signal->group_stop_count > 0) &&
1903 handle_group_stop())
1906 signr = dequeue_signal(current, mask, info);
1909 break; /* will return 0 */
1911 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1912 ptrace_signal_deliver(regs, cookie);
1914 /* Let the debugger run. */
1915 ptrace_stop(signr, signr, info);
1917 /* We're back. Did the debugger cancel the sig or group_exit? */
1918 signr = current->exit_code;
1919 if (signr == 0 || current->signal->flags & SIGNAL_GROUP_EXIT)
1922 current->exit_code = 0;
1924 /* Update the siginfo structure if the signal has
1925 changed. If the debugger wanted something
1926 specific in the siginfo structure then it should
1927 have updated *info via PTRACE_SETSIGINFO. */
1928 if (signr != info->si_signo) {
1929 info->si_signo = signr;
1931 info->si_code = SI_USER;
1932 info->si_pid = current->parent->pid;
1933 info->si_uid = current->parent->uid;
1936 /* If the (new) signal is now blocked, requeue it. */
1937 if (sigismember(¤t->blocked, signr)) {
1938 specific_send_sig_info(signr, info, current);
1943 ka = ¤t->sighand->action[signr-1];
1944 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1946 if (ka->sa.sa_handler != SIG_DFL) {
1947 /* Run the handler. */
1950 if (ka->sa.sa_flags & SA_ONESHOT)
1951 ka->sa.sa_handler = SIG_DFL;
1953 break; /* will return non-zero "signr" value */
1957 * Now we are doing the default action for this signal.
1959 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1962 /* Init gets no signals it doesn't want. */
1963 if (current == child_reaper)
1966 if (sig_kernel_stop(signr)) {
1968 * The default action is to stop all threads in
1969 * the thread group. The job control signals
1970 * do nothing in an orphaned pgrp, but SIGSTOP
1971 * always works. Note that siglock needs to be
1972 * dropped during the call to is_orphaned_pgrp()
1973 * because of lock ordering with tasklist_lock.
1974 * This allows an intervening SIGCONT to be posted.
1975 * We need to check for that and bail out if necessary.
1977 if (signr != SIGSTOP) {
1978 spin_unlock_irq(¤t->sighand->siglock);
1980 /* signals can be posted during this window */
1982 if (is_orphaned_pgrp(process_group(current)))
1985 spin_lock_irq(¤t->sighand->siglock);
1988 if (likely(do_signal_stop(signr))) {
1989 /* It released the siglock. */
1994 * We didn't actually stop, due to a race
1995 * with SIGCONT or something like that.
2000 spin_unlock_irq(¤t->sighand->siglock);
2003 * Anything else is fatal, maybe with a core dump.
2005 current->flags |= PF_SIGNALED;
2006 if (sig_kernel_coredump(signr)) {
2008 * If it was able to dump core, this kills all
2009 * other threads in the group and synchronizes with
2010 * their demise. If we lost the race with another
2011 * thread getting here, it set group_exit_code
2012 * first and our do_group_exit call below will use
2013 * that value and ignore the one we pass it.
2015 do_coredump((long)signr, signr, regs);
2019 * Death signals, no core dump.
2021 do_group_exit(signr);
2024 spin_unlock_irq(¤t->sighand->siglock);
2028 EXPORT_SYMBOL(recalc_sigpending);
2029 EXPORT_SYMBOL_GPL(dequeue_signal);
2030 EXPORT_SYMBOL(flush_signals);
2031 EXPORT_SYMBOL(force_sig);
2032 EXPORT_SYMBOL(kill_pg);
2033 EXPORT_SYMBOL(kill_proc);
2034 EXPORT_SYMBOL(ptrace_notify);
2035 EXPORT_SYMBOL(send_sig);
2036 EXPORT_SYMBOL(send_sig_info);
2037 EXPORT_SYMBOL(sigprocmask);
2038 EXPORT_SYMBOL(block_all_signals);
2039 EXPORT_SYMBOL(unblock_all_signals);
2043 * System call entry points.
2046 asmlinkage long sys_restart_syscall(void)
2048 struct restart_block *restart = ¤t_thread_info()->restart_block;
2049 return restart->fn(restart);
2052 long do_no_restart_syscall(struct restart_block *param)
2058 * We don't need to get the kernel lock - this is all local to this
2059 * particular thread.. (and that's good, because this is _heavily_
2060 * used by various programs)
2064 * This is also useful for kernel threads that want to temporarily
2065 * (or permanently) block certain signals.
2067 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2068 * interface happily blocks "unblockable" signals like SIGKILL
2071 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2075 spin_lock_irq(¤t->sighand->siglock);
2077 *oldset = current->blocked;
2082 sigorsets(¤t->blocked, ¤t->blocked, set);
2085 signandsets(¤t->blocked, ¤t->blocked, set);
2088 current->blocked = *set;
2093 recalc_sigpending();
2094 spin_unlock_irq(¤t->sighand->siglock);
2100 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
2102 int error = -EINVAL;
2103 sigset_t old_set, new_set;
2105 /* XXX: Don't preclude handling different sized sigset_t's. */
2106 if (sigsetsize != sizeof(sigset_t))
2111 if (copy_from_user(&new_set, set, sizeof(*set)))
2113 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2115 error = sigprocmask(how, &new_set, &old_set);
2121 spin_lock_irq(¤t->sighand->siglock);
2122 old_set = current->blocked;
2123 spin_unlock_irq(¤t->sighand->siglock);
2127 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2135 long do_sigpending(void __user *set, unsigned long sigsetsize)
2137 long error = -EINVAL;
2140 if (sigsetsize > sizeof(sigset_t))
2143 spin_lock_irq(¤t->sighand->siglock);
2144 sigorsets(&pending, ¤t->pending.signal,
2145 ¤t->signal->shared_pending.signal);
2146 spin_unlock_irq(¤t->sighand->siglock);
2148 /* Outside the lock because only this thread touches it. */
2149 sigandsets(&pending, ¤t->blocked, &pending);
2152 if (!copy_to_user(set, &pending, sigsetsize))
2160 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2162 return do_sigpending(set, sigsetsize);
2165 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2167 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2171 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2173 if (from->si_code < 0)
2174 return __copy_to_user(to, from, sizeof(siginfo_t))
2177 * If you change siginfo_t structure, please be sure
2178 * this code is fixed accordingly.
2179 * It should never copy any pad contained in the structure
2180 * to avoid security leaks, but must copy the generic
2181 * 3 ints plus the relevant union member.
2183 err = __put_user(from->si_signo, &to->si_signo);
2184 err |= __put_user(from->si_errno, &to->si_errno);
2185 err |= __put_user((short)from->si_code, &to->si_code);
2186 switch (from->si_code & __SI_MASK) {
2188 err |= __put_user(from->si_pid, &to->si_pid);
2189 err |= __put_user(from->si_uid, &to->si_uid);
2192 err |= __put_user(from->si_tid, &to->si_tid);
2193 err |= __put_user(from->si_overrun, &to->si_overrun);
2194 err |= __put_user(from->si_ptr, &to->si_ptr);
2197 err |= __put_user(from->si_band, &to->si_band);
2198 err |= __put_user(from->si_fd, &to->si_fd);
2201 err |= __put_user(from->si_addr, &to->si_addr);
2202 #ifdef __ARCH_SI_TRAPNO
2203 err |= __put_user(from->si_trapno, &to->si_trapno);
2207 err |= __put_user(from->si_pid, &to->si_pid);
2208 err |= __put_user(from->si_uid, &to->si_uid);
2209 err |= __put_user(from->si_status, &to->si_status);
2210 err |= __put_user(from->si_utime, &to->si_utime);
2211 err |= __put_user(from->si_stime, &to->si_stime);
2213 case __SI_RT: /* This is not generated by the kernel as of now. */
2214 case __SI_MESGQ: /* But this is */
2215 err |= __put_user(from->si_pid, &to->si_pid);
2216 err |= __put_user(from->si_uid, &to->si_uid);
2217 err |= __put_user(from->si_ptr, &to->si_ptr);
2219 default: /* this is just in case for now ... */
2220 err |= __put_user(from->si_pid, &to->si_pid);
2221 err |= __put_user(from->si_uid, &to->si_uid);
2230 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2231 siginfo_t __user *uinfo,
2232 const struct timespec __user *uts,
2241 /* XXX: Don't preclude handling different sized sigset_t's. */
2242 if (sigsetsize != sizeof(sigset_t))
2245 if (copy_from_user(&these, uthese, sizeof(these)))
2249 * Invert the set of allowed signals to get those we
2252 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2256 if (copy_from_user(&ts, uts, sizeof(ts)))
2258 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2263 spin_lock_irq(¤t->sighand->siglock);
2264 sig = dequeue_signal(current, &these, &info);
2266 timeout = MAX_SCHEDULE_TIMEOUT;
2268 timeout = (timespec_to_jiffies(&ts)
2269 + (ts.tv_sec || ts.tv_nsec));
2272 /* None ready -- temporarily unblock those we're
2273 * interested while we are sleeping in so that we'll
2274 * be awakened when they arrive. */
2275 current->real_blocked = current->blocked;
2276 sigandsets(¤t->blocked, ¤t->blocked, &these);
2277 recalc_sigpending();
2278 spin_unlock_irq(¤t->sighand->siglock);
2280 timeout = schedule_timeout_interruptible(timeout);
2282 spin_lock_irq(¤t->sighand->siglock);
2283 sig = dequeue_signal(current, &these, &info);
2284 current->blocked = current->real_blocked;
2285 siginitset(¤t->real_blocked, 0);
2286 recalc_sigpending();
2289 spin_unlock_irq(¤t->sighand->siglock);
2294 if (copy_siginfo_to_user(uinfo, &info))
2307 sys_kill(int pid, int sig)
2309 struct siginfo info;
2311 info.si_signo = sig;
2313 info.si_code = SI_USER;
2314 info.si_pid = current->tgid;
2315 info.si_uid = current->uid;
2317 return kill_something_info(sig, &info, pid);
2320 static int do_tkill(int tgid, int pid, int sig)
2323 struct siginfo info;
2324 struct task_struct *p;
2327 info.si_signo = sig;
2329 info.si_code = SI_TKILL;
2330 info.si_pid = current->tgid;
2331 info.si_uid = current->uid;
2333 read_lock(&tasklist_lock);
2334 p = find_task_by_pid(pid);
2335 if (p && (tgid <= 0 || p->tgid == tgid)) {
2336 error = check_kill_permission(sig, &info, p);
2338 * The null signal is a permissions and process existence
2339 * probe. No signal is actually delivered.
2341 if (!error && sig && p->sighand) {
2342 spin_lock_irq(&p->sighand->siglock);
2343 handle_stop_signal(sig, p);
2344 error = specific_send_sig_info(sig, &info, p);
2345 spin_unlock_irq(&p->sighand->siglock);
2348 read_unlock(&tasklist_lock);
2354 * sys_tgkill - send signal to one specific thread
2355 * @tgid: the thread group ID of the thread
2356 * @pid: the PID of the thread
2357 * @sig: signal to be sent
2359 * This syscall also checks the tgid and returns -ESRCH even if the PID
2360 * exists but it's not belonging to the target process anymore. This
2361 * method solves the problem of threads exiting and PIDs getting reused.
2363 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2365 /* This is only valid for single tasks */
2366 if (pid <= 0 || tgid <= 0)
2369 return do_tkill(tgid, pid, sig);
2373 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2376 sys_tkill(int pid, int sig)
2378 /* This is only valid for single tasks */
2382 return do_tkill(0, pid, sig);
2386 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2390 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2393 /* Not even root can pretend to send signals from the kernel.
2394 Nor can they impersonate a kill(), which adds source info. */
2395 if (info.si_code >= 0)
2397 info.si_signo = sig;
2399 /* POSIX.1b doesn't mention process groups. */
2400 return kill_proc_info(sig, &info, pid);
2404 do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2406 struct k_sigaction *k;
2409 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2412 k = ¤t->sighand->action[sig-1];
2414 spin_lock_irq(¤t->sighand->siglock);
2415 if (signal_pending(current)) {
2417 * If there might be a fatal signal pending on multiple
2418 * threads, make sure we take it before changing the action.
2420 spin_unlock_irq(¤t->sighand->siglock);
2421 return -ERESTARTNOINTR;
2428 sigdelsetmask(&act->sa.sa_mask,
2429 sigmask(SIGKILL) | sigmask(SIGSTOP));
2432 * "Setting a signal action to SIG_IGN for a signal that is
2433 * pending shall cause the pending signal to be discarded,
2434 * whether or not it is blocked."
2436 * "Setting a signal action to SIG_DFL for a signal that is
2437 * pending and whose default action is to ignore the signal
2438 * (for example, SIGCHLD), shall cause the pending signal to
2439 * be discarded, whether or not it is blocked"
2441 if (act->sa.sa_handler == SIG_IGN ||
2442 (act->sa.sa_handler == SIG_DFL &&
2443 sig_kernel_ignore(sig))) {
2445 * This is a fairly rare case, so we only take the
2446 * tasklist_lock once we're sure we'll need it.
2447 * Now we must do this little unlock and relock
2448 * dance to maintain the lock hierarchy.
2450 struct task_struct *t = current;
2451 spin_unlock_irq(&t->sighand->siglock);
2452 read_lock(&tasklist_lock);
2453 spin_lock_irq(&t->sighand->siglock);
2456 sigaddset(&mask, sig);
2457 rm_from_queue_full(&mask, &t->signal->shared_pending);
2459 rm_from_queue_full(&mask, &t->pending);
2460 recalc_sigpending_tsk(t);
2462 } while (t != current);
2463 spin_unlock_irq(¤t->sighand->siglock);
2464 read_unlock(&tasklist_lock);
2471 spin_unlock_irq(¤t->sighand->siglock);
2476 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2482 oss.ss_sp = (void __user *) current->sas_ss_sp;
2483 oss.ss_size = current->sas_ss_size;
2484 oss.ss_flags = sas_ss_flags(sp);
2493 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2494 || __get_user(ss_sp, &uss->ss_sp)
2495 || __get_user(ss_flags, &uss->ss_flags)
2496 || __get_user(ss_size, &uss->ss_size))
2500 if (on_sig_stack(sp))
2506 * Note - this code used to test ss_flags incorrectly
2507 * old code may have been written using ss_flags==0
2508 * to mean ss_flags==SS_ONSTACK (as this was the only
2509 * way that worked) - this fix preserves that older
2512 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2515 if (ss_flags == SS_DISABLE) {
2520 if (ss_size < MINSIGSTKSZ)
2524 current->sas_ss_sp = (unsigned long) ss_sp;
2525 current->sas_ss_size = ss_size;
2530 if (copy_to_user(uoss, &oss, sizeof(oss)))
2539 #ifdef __ARCH_WANT_SYS_SIGPENDING
2542 sys_sigpending(old_sigset_t __user *set)
2544 return do_sigpending(set, sizeof(*set));
2549 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2550 /* Some platforms have their own version with special arguments others
2551 support only sys_rt_sigprocmask. */
2554 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2557 old_sigset_t old_set, new_set;
2561 if (copy_from_user(&new_set, set, sizeof(*set)))
2563 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2565 spin_lock_irq(¤t->sighand->siglock);
2566 old_set = current->blocked.sig[0];
2574 sigaddsetmask(¤t->blocked, new_set);
2577 sigdelsetmask(¤t->blocked, new_set);
2580 current->blocked.sig[0] = new_set;
2584 recalc_sigpending();
2585 spin_unlock_irq(¤t->sighand->siglock);
2591 old_set = current->blocked.sig[0];
2594 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2601 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2603 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2605 sys_rt_sigaction(int sig,
2606 const struct sigaction __user *act,
2607 struct sigaction __user *oact,
2610 struct k_sigaction new_sa, old_sa;
2613 /* XXX: Don't preclude handling different sized sigset_t's. */
2614 if (sigsetsize != sizeof(sigset_t))
2618 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2622 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2625 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2631 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2633 #ifdef __ARCH_WANT_SYS_SGETMASK
2636 * For backwards compatibility. Functionality superseded by sigprocmask.
2642 return current->blocked.sig[0];
2646 sys_ssetmask(int newmask)
2650 spin_lock_irq(¤t->sighand->siglock);
2651 old = current->blocked.sig[0];
2653 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
2655 recalc_sigpending();
2656 spin_unlock_irq(¤t->sighand->siglock);
2660 #endif /* __ARCH_WANT_SGETMASK */
2662 #ifdef __ARCH_WANT_SYS_SIGNAL
2664 * For backwards compatibility. Functionality superseded by sigaction.
2666 asmlinkage unsigned long
2667 sys_signal(int sig, __sighandler_t handler)
2669 struct k_sigaction new_sa, old_sa;
2672 new_sa.sa.sa_handler = handler;
2673 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2674 sigemptyset(&new_sa.sa.sa_mask);
2676 ret = do_sigaction(sig, &new_sa, &old_sa);
2678 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2680 #endif /* __ARCH_WANT_SYS_SIGNAL */
2682 #ifdef __ARCH_WANT_SYS_PAUSE
2687 current->state = TASK_INTERRUPTIBLE;
2689 return -ERESTARTNOHAND;
2694 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2695 asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2699 /* XXX: Don't preclude handling different sized sigset_t's. */
2700 if (sigsetsize != sizeof(sigset_t))
2703 if (copy_from_user(&newset, unewset, sizeof(newset)))
2705 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2707 spin_lock_irq(¤t->sighand->siglock);
2708 current->saved_sigmask = current->blocked;
2709 current->blocked = newset;
2710 recalc_sigpending();
2711 spin_unlock_irq(¤t->sighand->siglock);
2713 current->state = TASK_INTERRUPTIBLE;
2715 set_thread_flag(TIF_RESTORE_SIGMASK);
2716 return -ERESTARTNOHAND;
2718 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2720 void __init signals_init(void)
2723 kmem_cache_create("sigqueue",
2724 sizeof(struct sigqueue),
2725 __alignof__(struct sigqueue),
2726 SLAB_PANIC, NULL, NULL);