4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/export.h>
9 #include <linux/utsname.h>
10 #include <linux/mman.h>
11 #include <linux/reboot.h>
12 #include <linux/prctl.h>
13 #include <linux/highuid.h>
15 #include <linux/kmod.h>
16 #include <linux/perf_event.h>
17 #include <linux/resource.h>
18 #include <linux/kernel.h>
19 #include <linux/kexec.h>
20 #include <linux/workqueue.h>
21 #include <linux/capability.h>
22 #include <linux/device.h>
23 #include <linux/key.h>
24 #include <linux/times.h>
25 #include <linux/posix-timers.h>
26 #include <linux/security.h>
27 #include <linux/dcookies.h>
28 #include <linux/suspend.h>
29 #include <linux/tty.h>
30 #include <linux/signal.h>
31 #include <linux/cn_proc.h>
32 #include <linux/getcpu.h>
33 #include <linux/task_io_accounting_ops.h>
34 #include <linux/seccomp.h>
35 #include <linux/cpu.h>
36 #include <linux/personality.h>
37 #include <linux/ptrace.h>
38 #include <linux/fs_struct.h>
39 #include <linux/file.h>
40 #include <linux/mount.h>
41 #include <linux/gfp.h>
42 #include <linux/syscore_ops.h>
43 #include <linux/version.h>
44 #include <linux/ctype.h>
46 #include <linux/mempolicy.h>
48 #include <linux/compat.h>
49 #include <linux/syscalls.h>
50 #include <linux/kprobes.h>
51 #include <linux/user_namespace.h>
52 #include <linux/binfmts.h>
54 #include <linux/sched.h>
55 #include <linux/rcupdate.h>
56 #include <linux/uidgid.h>
57 #include <linux/cred.h>
59 #include <linux/kmsg_dump.h>
60 /* Move somewhere else to avoid recompiling? */
61 #include <generated/utsrelease.h>
63 #include <asm/uaccess.h>
65 #include <asm/unistd.h>
67 #ifndef SET_UNALIGN_CTL
68 # define SET_UNALIGN_CTL(a,b) (-EINVAL)
70 #ifndef GET_UNALIGN_CTL
71 # define GET_UNALIGN_CTL(a,b) (-EINVAL)
74 # define SET_FPEMU_CTL(a,b) (-EINVAL)
77 # define GET_FPEMU_CTL(a,b) (-EINVAL)
80 # define SET_FPEXC_CTL(a,b) (-EINVAL)
83 # define GET_FPEXC_CTL(a,b) (-EINVAL)
86 # define GET_ENDIAN(a,b) (-EINVAL)
89 # define SET_ENDIAN(a,b) (-EINVAL)
92 # define GET_TSC_CTL(a) (-EINVAL)
95 # define SET_TSC_CTL(a) (-EINVAL)
99 * this is where the system-wide overflow UID and GID are defined, for
100 * architectures that now have 32-bit UID/GID but didn't in the past
103 int overflowuid = DEFAULT_OVERFLOWUID;
104 int overflowgid = DEFAULT_OVERFLOWGID;
106 EXPORT_SYMBOL(overflowuid);
107 EXPORT_SYMBOL(overflowgid);
110 * the same as above, but for filesystems which can only store a 16-bit
111 * UID and GID. as such, this is needed on all architectures
114 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
115 int fs_overflowgid = DEFAULT_FS_OVERFLOWUID;
117 EXPORT_SYMBOL(fs_overflowuid);
118 EXPORT_SYMBOL(fs_overflowgid);
121 * this indicates whether you can reboot with ctrl-alt-del: the default is yes
126 EXPORT_SYMBOL(cad_pid);
129 * If set, this is used for preparing the system to power off.
132 void (*pm_power_off_prepare)(void);
135 * Returns true if current's euid is same as p's uid or euid,
136 * or has CAP_SYS_NICE to p's user_ns.
138 * Called with rcu_read_lock, creds are safe
140 static bool set_one_prio_perm(struct task_struct *p)
142 const struct cred *cred = current_cred(), *pcred = __task_cred(p);
144 if (uid_eq(pcred->uid, cred->euid) ||
145 uid_eq(pcred->euid, cred->euid))
147 if (ns_capable(pcred->user_ns, CAP_SYS_NICE))
153 * set the priority of a task
154 * - the caller must hold the RCU read lock
156 static int set_one_prio(struct task_struct *p, int niceval, int error)
160 if (!set_one_prio_perm(p)) {
164 if (niceval < task_nice(p) && !can_nice(p, niceval)) {
168 no_nice = security_task_setnice(p, niceval);
175 set_user_nice(p, niceval);
180 SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
182 struct task_struct *g, *p;
183 struct user_struct *user;
184 const struct cred *cred = current_cred();
189 if (which > PRIO_USER || which < PRIO_PROCESS)
192 /* normalize: avoid signed division (rounding problems) */
200 read_lock(&tasklist_lock);
204 p = find_task_by_vpid(who);
208 error = set_one_prio(p, niceval, error);
212 pgrp = find_vpid(who);
214 pgrp = task_pgrp(current);
215 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
216 error = set_one_prio(p, niceval, error);
217 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
220 uid = make_kuid(cred->user_ns, who);
224 else if (!uid_eq(uid, cred->uid) &&
225 !(user = find_user(uid)))
226 goto out_unlock; /* No processes for this user */
228 do_each_thread(g, p) {
229 if (uid_eq(task_uid(p), uid))
230 error = set_one_prio(p, niceval, error);
231 } while_each_thread(g, p);
232 if (!uid_eq(uid, cred->uid))
233 free_uid(user); /* For find_user() */
237 read_unlock(&tasklist_lock);
244 * Ugh. To avoid negative return values, "getpriority()" will
245 * not return the normal nice-value, but a negated value that
246 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
247 * to stay compatible.
249 SYSCALL_DEFINE2(getpriority, int, which, int, who)
251 struct task_struct *g, *p;
252 struct user_struct *user;
253 const struct cred *cred = current_cred();
254 long niceval, retval = -ESRCH;
258 if (which > PRIO_USER || which < PRIO_PROCESS)
262 read_lock(&tasklist_lock);
266 p = find_task_by_vpid(who);
270 niceval = 20 - task_nice(p);
271 if (niceval > retval)
277 pgrp = find_vpid(who);
279 pgrp = task_pgrp(current);
280 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
281 niceval = 20 - task_nice(p);
282 if (niceval > retval)
284 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
287 uid = make_kuid(cred->user_ns, who);
291 else if (!uid_eq(uid, cred->uid) &&
292 !(user = find_user(uid)))
293 goto out_unlock; /* No processes for this user */
295 do_each_thread(g, p) {
296 if (uid_eq(task_uid(p), uid)) {
297 niceval = 20 - task_nice(p);
298 if (niceval > retval)
301 } while_each_thread(g, p);
302 if (!uid_eq(uid, cred->uid))
303 free_uid(user); /* for find_user() */
307 read_unlock(&tasklist_lock);
314 * emergency_restart - reboot the system
316 * Without shutting down any hardware or taking any locks
317 * reboot the system. This is called when we know we are in
318 * trouble so this is our best effort to reboot. This is
319 * safe to call in interrupt context.
321 void emergency_restart(void)
323 kmsg_dump(KMSG_DUMP_EMERG);
324 machine_emergency_restart();
326 EXPORT_SYMBOL_GPL(emergency_restart);
328 void kernel_restart_prepare(char *cmd)
330 blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
331 system_state = SYSTEM_RESTART;
332 usermodehelper_disable();
337 * register_reboot_notifier - Register function to be called at reboot time
338 * @nb: Info about notifier function to be called
340 * Registers a function with the list of functions
341 * to be called at reboot time.
343 * Currently always returns zero, as blocking_notifier_chain_register()
344 * always returns zero.
346 int register_reboot_notifier(struct notifier_block *nb)
348 return blocking_notifier_chain_register(&reboot_notifier_list, nb);
350 EXPORT_SYMBOL(register_reboot_notifier);
353 * unregister_reboot_notifier - Unregister previously registered reboot notifier
354 * @nb: Hook to be unregistered
356 * Unregisters a previously registered reboot
359 * Returns zero on success, or %-ENOENT on failure.
361 int unregister_reboot_notifier(struct notifier_block *nb)
363 return blocking_notifier_chain_unregister(&reboot_notifier_list, nb);
365 EXPORT_SYMBOL(unregister_reboot_notifier);
367 /* Add backwards compatibility for stable trees. */
368 #ifndef PF_NO_SETAFFINITY
369 #define PF_NO_SETAFFINITY PF_THREAD_BOUND
372 static void migrate_to_reboot_cpu(void)
374 /* The boot cpu is always logical cpu 0 */
377 cpu_hotplug_disable();
379 /* Make certain the cpu I'm about to reboot on is online */
380 if (!cpu_online(cpu))
381 cpu = cpumask_first(cpu_online_mask);
383 /* Prevent races with other tasks migrating this task */
384 current->flags |= PF_NO_SETAFFINITY;
386 /* Make certain I only run on the appropriate processor */
387 set_cpus_allowed_ptr(current, cpumask_of(cpu));
391 * kernel_restart - reboot the system
392 * @cmd: pointer to buffer containing command to execute for restart
395 * Shutdown everything and perform a clean reboot.
396 * This is not safe to call in interrupt context.
398 void kernel_restart(char *cmd)
400 kernel_restart_prepare(cmd);
401 migrate_to_reboot_cpu();
404 printk(KERN_EMERG "Restarting system.\n");
406 printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd);
407 kmsg_dump(KMSG_DUMP_RESTART);
408 machine_restart(cmd);
410 EXPORT_SYMBOL_GPL(kernel_restart);
412 static void kernel_shutdown_prepare(enum system_states state)
414 blocking_notifier_call_chain(&reboot_notifier_list,
415 (state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL);
416 system_state = state;
417 usermodehelper_disable();
421 * kernel_halt - halt the system
423 * Shutdown everything and perform a clean system halt.
425 void kernel_halt(void)
427 kernel_shutdown_prepare(SYSTEM_HALT);
428 migrate_to_reboot_cpu();
430 printk(KERN_EMERG "System halted.\n");
431 kmsg_dump(KMSG_DUMP_HALT);
435 EXPORT_SYMBOL_GPL(kernel_halt);
438 * kernel_power_off - power_off the system
440 * Shutdown everything and perform a clean system power_off.
442 void kernel_power_off(void)
444 kernel_shutdown_prepare(SYSTEM_POWER_OFF);
445 if (pm_power_off_prepare)
446 pm_power_off_prepare();
447 migrate_to_reboot_cpu();
449 printk(KERN_EMERG "Power down.\n");
450 kmsg_dump(KMSG_DUMP_POWEROFF);
453 EXPORT_SYMBOL_GPL(kernel_power_off);
455 static DEFINE_MUTEX(reboot_mutex);
458 * Reboot system call: for obvious reasons only root may call it,
459 * and even root needs to set up some magic numbers in the registers
460 * so that some mistake won't make this reboot the whole machine.
461 * You can also set the meaning of the ctrl-alt-del-key here.
463 * reboot doesn't sync: do that yourself before calling this.
465 SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
468 struct pid_namespace *pid_ns = task_active_pid_ns(current);
472 /* We only trust the superuser with rebooting the system. */
473 if (!ns_capable(pid_ns->user_ns, CAP_SYS_BOOT))
476 /* For safety, we require "magic" arguments. */
477 if (magic1 != LINUX_REBOOT_MAGIC1 ||
478 (magic2 != LINUX_REBOOT_MAGIC2 &&
479 magic2 != LINUX_REBOOT_MAGIC2A &&
480 magic2 != LINUX_REBOOT_MAGIC2B &&
481 magic2 != LINUX_REBOOT_MAGIC2C))
485 * If pid namespaces are enabled and the current task is in a child
486 * pid_namespace, the command is handled by reboot_pid_ns() which will
489 ret = reboot_pid_ns(pid_ns, cmd);
493 /* Instead of trying to make the power_off code look like
494 * halt when pm_power_off is not set do it the easy way.
496 if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off)
497 cmd = LINUX_REBOOT_CMD_HALT;
499 mutex_lock(&reboot_mutex);
501 case LINUX_REBOOT_CMD_RESTART:
502 kernel_restart(NULL);
505 case LINUX_REBOOT_CMD_CAD_ON:
509 case LINUX_REBOOT_CMD_CAD_OFF:
513 case LINUX_REBOOT_CMD_HALT:
516 panic("cannot halt");
518 case LINUX_REBOOT_CMD_POWER_OFF:
523 case LINUX_REBOOT_CMD_RESTART2:
524 if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) {
528 buffer[sizeof(buffer) - 1] = '\0';
530 kernel_restart(buffer);
534 case LINUX_REBOOT_CMD_KEXEC:
535 ret = kernel_kexec();
539 #ifdef CONFIG_HIBERNATION
540 case LINUX_REBOOT_CMD_SW_SUSPEND:
549 mutex_unlock(&reboot_mutex);
553 static void deferred_cad(struct work_struct *dummy)
555 kernel_restart(NULL);
559 * This function gets called by ctrl-alt-del - ie the keyboard interrupt.
560 * As it's called within an interrupt, it may NOT sync: the only choice
561 * is whether to reboot at once, or just ignore the ctrl-alt-del.
563 void ctrl_alt_del(void)
565 static DECLARE_WORK(cad_work, deferred_cad);
568 schedule_work(&cad_work);
570 kill_cad_pid(SIGINT, 1);
574 * Unprivileged users may change the real gid to the effective gid
575 * or vice versa. (BSD-style)
577 * If you set the real gid at all, or set the effective gid to a value not
578 * equal to the real gid, then the saved gid is set to the new effective gid.
580 * This makes it possible for a setgid program to completely drop its
581 * privileges, which is often a useful assertion to make when you are doing
582 * a security audit over a program.
584 * The general idea is that a program which uses just setregid() will be
585 * 100% compatible with BSD. A program which uses just setgid() will be
586 * 100% compatible with POSIX with saved IDs.
588 * SMP: There are not races, the GIDs are checked only by filesystem
589 * operations (as far as semantic preservation is concerned).
591 SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
593 struct user_namespace *ns = current_user_ns();
594 const struct cred *old;
599 krgid = make_kgid(ns, rgid);
600 kegid = make_kgid(ns, egid);
602 if ((rgid != (gid_t) -1) && !gid_valid(krgid))
604 if ((egid != (gid_t) -1) && !gid_valid(kegid))
607 new = prepare_creds();
610 old = current_cred();
613 if (rgid != (gid_t) -1) {
614 if (gid_eq(old->gid, krgid) ||
615 gid_eq(old->egid, krgid) ||
616 nsown_capable(CAP_SETGID))
621 if (egid != (gid_t) -1) {
622 if (gid_eq(old->gid, kegid) ||
623 gid_eq(old->egid, kegid) ||
624 gid_eq(old->sgid, kegid) ||
625 nsown_capable(CAP_SETGID))
631 if (rgid != (gid_t) -1 ||
632 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
633 new->sgid = new->egid;
634 new->fsgid = new->egid;
636 return commit_creds(new);
644 * setgid() is implemented like SysV w/ SAVED_IDS
646 * SMP: Same implicit races as above.
648 SYSCALL_DEFINE1(setgid, gid_t, gid)
650 struct user_namespace *ns = current_user_ns();
651 const struct cred *old;
656 kgid = make_kgid(ns, gid);
657 if (!gid_valid(kgid))
660 new = prepare_creds();
663 old = current_cred();
666 if (nsown_capable(CAP_SETGID))
667 new->gid = new->egid = new->sgid = new->fsgid = kgid;
668 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
669 new->egid = new->fsgid = kgid;
673 return commit_creds(new);
681 * change the user struct in a credentials set to match the new UID
683 static int set_user(struct cred *new)
685 struct user_struct *new_user;
687 new_user = alloc_uid(new->uid);
692 * We don't fail in case of NPROC limit excess here because too many
693 * poorly written programs don't check set*uid() return code, assuming
694 * it never fails if called by root. We may still enforce NPROC limit
695 * for programs doing set*uid()+execve() by harmlessly deferring the
696 * failure to the execve() stage.
698 if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
699 new_user != INIT_USER)
700 current->flags |= PF_NPROC_EXCEEDED;
702 current->flags &= ~PF_NPROC_EXCEEDED;
705 new->user = new_user;
710 * Unprivileged users may change the real uid to the effective uid
711 * or vice versa. (BSD-style)
713 * If you set the real uid at all, or set the effective uid to a value not
714 * equal to the real uid, then the saved uid is set to the new effective uid.
716 * This makes it possible for a setuid program to completely drop its
717 * privileges, which is often a useful assertion to make when you are doing
718 * a security audit over a program.
720 * The general idea is that a program which uses just setreuid() will be
721 * 100% compatible with BSD. A program which uses just setuid() will be
722 * 100% compatible with POSIX with saved IDs.
724 SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
726 struct user_namespace *ns = current_user_ns();
727 const struct cred *old;
732 kruid = make_kuid(ns, ruid);
733 keuid = make_kuid(ns, euid);
735 if ((ruid != (uid_t) -1) && !uid_valid(kruid))
737 if ((euid != (uid_t) -1) && !uid_valid(keuid))
740 new = prepare_creds();
743 old = current_cred();
746 if (ruid != (uid_t) -1) {
748 if (!uid_eq(old->uid, kruid) &&
749 !uid_eq(old->euid, kruid) &&
750 !nsown_capable(CAP_SETUID))
754 if (euid != (uid_t) -1) {
756 if (!uid_eq(old->uid, keuid) &&
757 !uid_eq(old->euid, keuid) &&
758 !uid_eq(old->suid, keuid) &&
759 !nsown_capable(CAP_SETUID))
763 if (!uid_eq(new->uid, old->uid)) {
764 retval = set_user(new);
768 if (ruid != (uid_t) -1 ||
769 (euid != (uid_t) -1 && !uid_eq(keuid, old->uid)))
770 new->suid = new->euid;
771 new->fsuid = new->euid;
773 retval = security_task_fix_setuid(new, old, LSM_SETID_RE);
777 return commit_creds(new);
785 * setuid() is implemented like SysV with SAVED_IDS
787 * Note that SAVED_ID's is deficient in that a setuid root program
788 * like sendmail, for example, cannot set its uid to be a normal
789 * user and then switch back, because if you're root, setuid() sets
790 * the saved uid too. If you don't like this, blame the bright people
791 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
792 * will allow a root program to temporarily drop privileges and be able to
793 * regain them by swapping the real and effective uid.
795 SYSCALL_DEFINE1(setuid, uid_t, uid)
797 struct user_namespace *ns = current_user_ns();
798 const struct cred *old;
803 kuid = make_kuid(ns, uid);
804 if (!uid_valid(kuid))
807 new = prepare_creds();
810 old = current_cred();
813 if (nsown_capable(CAP_SETUID)) {
814 new->suid = new->uid = kuid;
815 if (!uid_eq(kuid, old->uid)) {
816 retval = set_user(new);
820 } else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) {
824 new->fsuid = new->euid = kuid;
826 retval = security_task_fix_setuid(new, old, LSM_SETID_ID);
830 return commit_creds(new);
839 * This function implements a generic ability to update ruid, euid,
840 * and suid. This allows you to implement the 4.4 compatible seteuid().
842 SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
844 struct user_namespace *ns = current_user_ns();
845 const struct cred *old;
848 kuid_t kruid, keuid, ksuid;
850 kruid = make_kuid(ns, ruid);
851 keuid = make_kuid(ns, euid);
852 ksuid = make_kuid(ns, suid);
854 if ((ruid != (uid_t) -1) && !uid_valid(kruid))
857 if ((euid != (uid_t) -1) && !uid_valid(keuid))
860 if ((suid != (uid_t) -1) && !uid_valid(ksuid))
863 new = prepare_creds();
867 old = current_cred();
870 if (!nsown_capable(CAP_SETUID)) {
871 if (ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) &&
872 !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid))
874 if (euid != (uid_t) -1 && !uid_eq(keuid, old->uid) &&
875 !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid))
877 if (suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) &&
878 !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid))
882 if (ruid != (uid_t) -1) {
884 if (!uid_eq(kruid, old->uid)) {
885 retval = set_user(new);
890 if (euid != (uid_t) -1)
892 if (suid != (uid_t) -1)
894 new->fsuid = new->euid;
896 retval = security_task_fix_setuid(new, old, LSM_SETID_RES);
900 return commit_creds(new);
907 SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp)
909 const struct cred *cred = current_cred();
911 uid_t ruid, euid, suid;
913 ruid = from_kuid_munged(cred->user_ns, cred->uid);
914 euid = from_kuid_munged(cred->user_ns, cred->euid);
915 suid = from_kuid_munged(cred->user_ns, cred->suid);
917 if (!(retval = put_user(ruid, ruidp)) &&
918 !(retval = put_user(euid, euidp)))
919 retval = put_user(suid, suidp);
925 * Same as above, but for rgid, egid, sgid.
927 SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
929 struct user_namespace *ns = current_user_ns();
930 const struct cred *old;
933 kgid_t krgid, kegid, ksgid;
935 krgid = make_kgid(ns, rgid);
936 kegid = make_kgid(ns, egid);
937 ksgid = make_kgid(ns, sgid);
939 if ((rgid != (gid_t) -1) && !gid_valid(krgid))
941 if ((egid != (gid_t) -1) && !gid_valid(kegid))
943 if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
946 new = prepare_creds();
949 old = current_cred();
952 if (!nsown_capable(CAP_SETGID)) {
953 if (rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) &&
954 !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid))
956 if (egid != (gid_t) -1 && !gid_eq(kegid, old->gid) &&
957 !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid))
959 if (sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) &&
960 !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid))
964 if (rgid != (gid_t) -1)
966 if (egid != (gid_t) -1)
968 if (sgid != (gid_t) -1)
970 new->fsgid = new->egid;
972 return commit_creds(new);
979 SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp)
981 const struct cred *cred = current_cred();
983 gid_t rgid, egid, sgid;
985 rgid = from_kgid_munged(cred->user_ns, cred->gid);
986 egid = from_kgid_munged(cred->user_ns, cred->egid);
987 sgid = from_kgid_munged(cred->user_ns, cred->sgid);
989 if (!(retval = put_user(rgid, rgidp)) &&
990 !(retval = put_user(egid, egidp)))
991 retval = put_user(sgid, sgidp);
998 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
999 * is used for "access()" and for the NFS daemon (letting nfsd stay at
1000 * whatever uid it wants to). It normally shadows "euid", except when
1001 * explicitly set by setfsuid() or for access..
1003 SYSCALL_DEFINE1(setfsuid, uid_t, uid)
1005 const struct cred *old;
1010 old = current_cred();
1011 old_fsuid = from_kuid_munged(old->user_ns, old->fsuid);
1013 kuid = make_kuid(old->user_ns, uid);
1014 if (!uid_valid(kuid))
1017 new = prepare_creds();
1021 if (uid_eq(kuid, old->uid) || uid_eq(kuid, old->euid) ||
1022 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
1023 nsown_capable(CAP_SETUID)) {
1024 if (!uid_eq(kuid, old->fsuid)) {
1026 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
1040 * Samma på svenska..
1042 SYSCALL_DEFINE1(setfsgid, gid_t, gid)
1044 const struct cred *old;
1049 old = current_cred();
1050 old_fsgid = from_kgid_munged(old->user_ns, old->fsgid);
1052 kgid = make_kgid(old->user_ns, gid);
1053 if (!gid_valid(kgid))
1056 new = prepare_creds();
1060 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
1061 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
1062 nsown_capable(CAP_SETGID)) {
1063 if (!gid_eq(kgid, old->fsgid)) {
1078 * sys_getpid - return the thread group id of the current process
1080 * Note, despite the name, this returns the tgid not the pid. The tgid and
1081 * the pid are identical unless CLONE_THREAD was specified on clone() in
1082 * which case the tgid is the same in all threads of the same group.
1084 * This is SMP safe as current->tgid does not change.
1086 SYSCALL_DEFINE0(getpid)
1088 return task_tgid_vnr(current);
1091 /* Thread ID - the internal kernel "pid" */
1092 SYSCALL_DEFINE0(gettid)
1094 return task_pid_vnr(current);
1098 * Accessing ->real_parent is not SMP-safe, it could
1099 * change from under us. However, we can use a stale
1100 * value of ->real_parent under rcu_read_lock(), see
1101 * release_task()->call_rcu(delayed_put_task_struct).
1103 SYSCALL_DEFINE0(getppid)
1108 pid = task_tgid_vnr(rcu_dereference(current->real_parent));
1114 SYSCALL_DEFINE0(getuid)
1116 /* Only we change this so SMP safe */
1117 return from_kuid_munged(current_user_ns(), current_uid());
1120 SYSCALL_DEFINE0(geteuid)
1122 /* Only we change this so SMP safe */
1123 return from_kuid_munged(current_user_ns(), current_euid());
1126 SYSCALL_DEFINE0(getgid)
1128 /* Only we change this so SMP safe */
1129 return from_kgid_munged(current_user_ns(), current_gid());
1132 SYSCALL_DEFINE0(getegid)
1134 /* Only we change this so SMP safe */
1135 return from_kgid_munged(current_user_ns(), current_egid());
1138 void do_sys_times(struct tms *tms)
1140 cputime_t tgutime, tgstime, cutime, cstime;
1142 spin_lock_irq(¤t->sighand->siglock);
1143 thread_group_cputime_adjusted(current, &tgutime, &tgstime);
1144 cutime = current->signal->cutime;
1145 cstime = current->signal->cstime;
1146 spin_unlock_irq(¤t->sighand->siglock);
1147 tms->tms_utime = cputime_to_clock_t(tgutime);
1148 tms->tms_stime = cputime_to_clock_t(tgstime);
1149 tms->tms_cutime = cputime_to_clock_t(cutime);
1150 tms->tms_cstime = cputime_to_clock_t(cstime);
1153 SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
1159 if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
1162 force_successful_syscall_return();
1163 return (long) jiffies_64_to_clock_t(get_jiffies_64());
1167 * This needs some heavy checking ...
1168 * I just haven't the stomach for it. I also don't fully
1169 * understand sessions/pgrp etc. Let somebody who does explain it.
1171 * OK, I think I have the protection semantics right.... this is really
1172 * only important on a multi-user system anyway, to make sure one user
1173 * can't send a signal to a process owned by another. -TYT, 12/12/91
1175 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
1178 SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
1180 struct task_struct *p;
1181 struct task_struct *group_leader = current->group_leader;
1186 pid = task_pid_vnr(group_leader);
1193 /* From this point forward we keep holding onto the tasklist lock
1194 * so that our parent does not change from under us. -DaveM
1196 write_lock_irq(&tasklist_lock);
1199 p = find_task_by_vpid(pid);
1204 if (!thread_group_leader(p))
1207 if (same_thread_group(p->real_parent, group_leader)) {
1209 if (task_session(p) != task_session(group_leader))
1216 if (p != group_leader)
1221 if (p->signal->leader)
1226 struct task_struct *g;
1228 pgrp = find_vpid(pgid);
1229 g = pid_task(pgrp, PIDTYPE_PGID);
1230 if (!g || task_session(g) != task_session(group_leader))
1234 err = security_task_setpgid(p, pgid);
1238 if (task_pgrp(p) != pgrp)
1239 change_pid(p, PIDTYPE_PGID, pgrp);
1243 /* All paths lead to here, thus we are safe. -DaveM */
1244 write_unlock_irq(&tasklist_lock);
1249 SYSCALL_DEFINE1(getpgid, pid_t, pid)
1251 struct task_struct *p;
1257 grp = task_pgrp(current);
1260 p = find_task_by_vpid(pid);
1267 retval = security_task_getpgid(p);
1271 retval = pid_vnr(grp);
1277 #ifdef __ARCH_WANT_SYS_GETPGRP
1279 SYSCALL_DEFINE0(getpgrp)
1281 return sys_getpgid(0);
1286 SYSCALL_DEFINE1(getsid, pid_t, pid)
1288 struct task_struct *p;
1294 sid = task_session(current);
1297 p = find_task_by_vpid(pid);
1300 sid = task_session(p);
1304 retval = security_task_getsid(p);
1308 retval = pid_vnr(sid);
1314 SYSCALL_DEFINE0(setsid)
1316 struct task_struct *group_leader = current->group_leader;
1317 struct pid *sid = task_pid(group_leader);
1318 pid_t session = pid_vnr(sid);
1321 write_lock_irq(&tasklist_lock);
1322 /* Fail if I am already a session leader */
1323 if (group_leader->signal->leader)
1326 /* Fail if a process group id already exists that equals the
1327 * proposed session id.
1329 if (pid_task(sid, PIDTYPE_PGID))
1332 group_leader->signal->leader = 1;
1333 __set_special_pids(sid);
1335 proc_clear_tty(group_leader);
1339 write_unlock_irq(&tasklist_lock);
1341 proc_sid_connector(group_leader);
1342 sched_autogroup_create_attach(group_leader);
1347 DECLARE_RWSEM(uts_sem);
1349 #ifdef COMPAT_UTS_MACHINE
1350 #define override_architecture(name) \
1351 (personality(current->personality) == PER_LINUX32 && \
1352 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
1353 sizeof(COMPAT_UTS_MACHINE)))
1355 #define override_architecture(name) 0
1359 * Work around broken programs that cannot handle "Linux 3.0".
1360 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
1362 static int override_release(char __user *release, size_t len)
1366 if (current->personality & UNAME26) {
1367 const char *rest = UTS_RELEASE;
1368 char buf[65] = { 0 };
1374 if (*rest == '.' && ++ndots >= 3)
1376 if (!isdigit(*rest) && *rest != '.')
1380 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
1381 copy = clamp_t(size_t, len, 1, sizeof(buf));
1382 copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
1383 ret = copy_to_user(release, buf, copy + 1);
1388 SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1392 down_read(&uts_sem);
1393 if (copy_to_user(name, utsname(), sizeof *name))
1397 if (!errno && override_release(name->release, sizeof(name->release)))
1399 if (!errno && override_architecture(name))
1404 #ifdef __ARCH_WANT_SYS_OLD_UNAME
1408 SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
1415 down_read(&uts_sem);
1416 if (copy_to_user(name, utsname(), sizeof(*name)))
1420 if (!error && override_release(name->release, sizeof(name->release)))
1422 if (!error && override_architecture(name))
1427 SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
1433 if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
1436 down_read(&uts_sem);
1437 error = __copy_to_user(&name->sysname, &utsname()->sysname,
1439 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
1440 error |= __copy_to_user(&name->nodename, &utsname()->nodename,
1442 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
1443 error |= __copy_to_user(&name->release, &utsname()->release,
1445 error |= __put_user(0, name->release + __OLD_UTS_LEN);
1446 error |= __copy_to_user(&name->version, &utsname()->version,
1448 error |= __put_user(0, name->version + __OLD_UTS_LEN);
1449 error |= __copy_to_user(&name->machine, &utsname()->machine,
1451 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
1454 if (!error && override_architecture(name))
1456 if (!error && override_release(name->release, sizeof(name->release)))
1458 return error ? -EFAULT : 0;
1462 SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
1465 char tmp[__NEW_UTS_LEN];
1467 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1470 if (len < 0 || len > __NEW_UTS_LEN)
1472 down_write(&uts_sem);
1474 if (!copy_from_user(tmp, name, len)) {
1475 struct new_utsname *u = utsname();
1477 memcpy(u->nodename, tmp, len);
1478 memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1480 uts_proc_notify(UTS_PROC_HOSTNAME);
1486 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1488 SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
1491 struct new_utsname *u;
1495 down_read(&uts_sem);
1497 i = 1 + strlen(u->nodename);
1501 if (copy_to_user(name, u->nodename, i))
1510 * Only setdomainname; getdomainname can be implemented by calling
1513 SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
1516 char tmp[__NEW_UTS_LEN];
1518 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1520 if (len < 0 || len > __NEW_UTS_LEN)
1523 down_write(&uts_sem);
1525 if (!copy_from_user(tmp, name, len)) {
1526 struct new_utsname *u = utsname();
1528 memcpy(u->domainname, tmp, len);
1529 memset(u->domainname + len, 0, sizeof(u->domainname) - len);
1531 uts_proc_notify(UTS_PROC_DOMAINNAME);
1537 SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1539 struct rlimit value;
1542 ret = do_prlimit(current, resource, NULL, &value);
1544 ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1549 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1552 * Back compatibility for getrlimit. Needed for some apps.
1555 SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1556 struct rlimit __user *, rlim)
1559 if (resource >= RLIM_NLIMITS)
1562 task_lock(current->group_leader);
1563 x = current->signal->rlim[resource];
1564 task_unlock(current->group_leader);
1565 if (x.rlim_cur > 0x7FFFFFFF)
1566 x.rlim_cur = 0x7FFFFFFF;
1567 if (x.rlim_max > 0x7FFFFFFF)
1568 x.rlim_max = 0x7FFFFFFF;
1569 return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0;
1574 static inline bool rlim64_is_infinity(__u64 rlim64)
1576 #if BITS_PER_LONG < 64
1577 return rlim64 >= ULONG_MAX;
1579 return rlim64 == RLIM64_INFINITY;
1583 static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64)
1585 if (rlim->rlim_cur == RLIM_INFINITY)
1586 rlim64->rlim_cur = RLIM64_INFINITY;
1588 rlim64->rlim_cur = rlim->rlim_cur;
1589 if (rlim->rlim_max == RLIM_INFINITY)
1590 rlim64->rlim_max = RLIM64_INFINITY;
1592 rlim64->rlim_max = rlim->rlim_max;
1595 static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim)
1597 if (rlim64_is_infinity(rlim64->rlim_cur))
1598 rlim->rlim_cur = RLIM_INFINITY;
1600 rlim->rlim_cur = (unsigned long)rlim64->rlim_cur;
1601 if (rlim64_is_infinity(rlim64->rlim_max))
1602 rlim->rlim_max = RLIM_INFINITY;
1604 rlim->rlim_max = (unsigned long)rlim64->rlim_max;
1607 /* make sure you are allowed to change @tsk limits before calling this */
1608 int do_prlimit(struct task_struct *tsk, unsigned int resource,
1609 struct rlimit *new_rlim, struct rlimit *old_rlim)
1611 struct rlimit *rlim;
1614 if (resource >= RLIM_NLIMITS)
1617 if (new_rlim->rlim_cur > new_rlim->rlim_max)
1619 if (resource == RLIMIT_NOFILE &&
1620 new_rlim->rlim_max > sysctl_nr_open)
1624 /* protect tsk->signal and tsk->sighand from disappearing */
1625 read_lock(&tasklist_lock);
1626 if (!tsk->sighand) {
1631 rlim = tsk->signal->rlim + resource;
1632 task_lock(tsk->group_leader);
1634 /* Keep the capable check against init_user_ns until
1635 cgroups can contain all limits */
1636 if (new_rlim->rlim_max > rlim->rlim_max &&
1637 !capable(CAP_SYS_RESOURCE))
1640 retval = security_task_setrlimit(tsk->group_leader,
1641 resource, new_rlim);
1642 if (resource == RLIMIT_CPU && new_rlim->rlim_cur == 0) {
1644 * The caller is asking for an immediate RLIMIT_CPU
1645 * expiry. But we use the zero value to mean "it was
1646 * never set". So let's cheat and make it one second
1649 new_rlim->rlim_cur = 1;
1658 task_unlock(tsk->group_leader);
1661 * RLIMIT_CPU handling. Note that the kernel fails to return an error
1662 * code if it rejected the user's attempt to set RLIMIT_CPU. This is a
1663 * very long-standing error, and fixing it now risks breakage of
1664 * applications, so we live with it
1666 if (!retval && new_rlim && resource == RLIMIT_CPU &&
1667 new_rlim->rlim_cur != RLIM_INFINITY)
1668 update_rlimit_cpu(tsk, new_rlim->rlim_cur);
1670 read_unlock(&tasklist_lock);
1674 /* rcu lock must be held */
1675 static int check_prlimit_permission(struct task_struct *task)
1677 const struct cred *cred = current_cred(), *tcred;
1679 if (current == task)
1682 tcred = __task_cred(task);
1683 if (uid_eq(cred->uid, tcred->euid) &&
1684 uid_eq(cred->uid, tcred->suid) &&
1685 uid_eq(cred->uid, tcred->uid) &&
1686 gid_eq(cred->gid, tcred->egid) &&
1687 gid_eq(cred->gid, tcred->sgid) &&
1688 gid_eq(cred->gid, tcred->gid))
1690 if (ns_capable(tcred->user_ns, CAP_SYS_RESOURCE))
1696 SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource,
1697 const struct rlimit64 __user *, new_rlim,
1698 struct rlimit64 __user *, old_rlim)
1700 struct rlimit64 old64, new64;
1701 struct rlimit old, new;
1702 struct task_struct *tsk;
1706 if (copy_from_user(&new64, new_rlim, sizeof(new64)))
1708 rlim64_to_rlim(&new64, &new);
1712 tsk = pid ? find_task_by_vpid(pid) : current;
1717 ret = check_prlimit_permission(tsk);
1722 get_task_struct(tsk);
1725 ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
1726 old_rlim ? &old : NULL);
1728 if (!ret && old_rlim) {
1729 rlim_to_rlim64(&old, &old64);
1730 if (copy_to_user(old_rlim, &old64, sizeof(old64)))
1734 put_task_struct(tsk);
1738 SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1740 struct rlimit new_rlim;
1742 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1744 return do_prlimit(current, resource, &new_rlim, NULL);
1748 * It would make sense to put struct rusage in the task_struct,
1749 * except that would make the task_struct be *really big*. After
1750 * task_struct gets moved into malloc'ed memory, it would
1751 * make sense to do this. It will make moving the rest of the information
1752 * a lot simpler! (Which we're not doing right now because we're not
1753 * measuring them yet).
1755 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1756 * races with threads incrementing their own counters. But since word
1757 * reads are atomic, we either get new values or old values and we don't
1758 * care which for the sums. We always take the siglock to protect reading
1759 * the c* fields from p->signal from races with exit.c updating those
1760 * fields when reaping, so a sample either gets all the additions of a
1761 * given child after it's reaped, or none so this sample is before reaping.
1764 * We need to take the siglock for CHILDEREN, SELF and BOTH
1765 * for the cases current multithreaded, non-current single threaded
1766 * non-current multithreaded. Thread traversal is now safe with
1768 * Strictly speaking, we donot need to take the siglock if we are current and
1769 * single threaded, as no one else can take our signal_struct away, no one
1770 * else can reap the children to update signal->c* counters, and no one else
1771 * can race with the signal-> fields. If we do not take any lock, the
1772 * signal-> fields could be read out of order while another thread was just
1773 * exiting. So we should place a read memory barrier when we avoid the lock.
1774 * On the writer side, write memory barrier is implied in __exit_signal
1775 * as __exit_signal releases the siglock spinlock after updating the signal->
1776 * fields. But we don't do this yet to keep things simple.
1780 static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
1782 r->ru_nvcsw += t->nvcsw;
1783 r->ru_nivcsw += t->nivcsw;
1784 r->ru_minflt += t->min_flt;
1785 r->ru_majflt += t->maj_flt;
1786 r->ru_inblock += task_io_get_inblock(t);
1787 r->ru_oublock += task_io_get_oublock(t);
1790 static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1792 struct task_struct *t;
1793 unsigned long flags;
1794 cputime_t tgutime, tgstime, utime, stime;
1795 unsigned long maxrss = 0;
1797 memset((char *) r, 0, sizeof *r);
1800 if (who == RUSAGE_THREAD) {
1801 task_cputime_adjusted(current, &utime, &stime);
1802 accumulate_thread_rusage(p, r);
1803 maxrss = p->signal->maxrss;
1807 if (!lock_task_sighand(p, &flags))
1812 case RUSAGE_CHILDREN:
1813 utime = p->signal->cutime;
1814 stime = p->signal->cstime;
1815 r->ru_nvcsw = p->signal->cnvcsw;
1816 r->ru_nivcsw = p->signal->cnivcsw;
1817 r->ru_minflt = p->signal->cmin_flt;
1818 r->ru_majflt = p->signal->cmaj_flt;
1819 r->ru_inblock = p->signal->cinblock;
1820 r->ru_oublock = p->signal->coublock;
1821 maxrss = p->signal->cmaxrss;
1823 if (who == RUSAGE_CHILDREN)
1827 thread_group_cputime_adjusted(p, &tgutime, &tgstime);
1830 r->ru_nvcsw += p->signal->nvcsw;
1831 r->ru_nivcsw += p->signal->nivcsw;
1832 r->ru_minflt += p->signal->min_flt;
1833 r->ru_majflt += p->signal->maj_flt;
1834 r->ru_inblock += p->signal->inblock;
1835 r->ru_oublock += p->signal->oublock;
1836 if (maxrss < p->signal->maxrss)
1837 maxrss = p->signal->maxrss;
1840 accumulate_thread_rusage(t, r);
1848 unlock_task_sighand(p, &flags);
1851 cputime_to_timeval(utime, &r->ru_utime);
1852 cputime_to_timeval(stime, &r->ru_stime);
1854 if (who != RUSAGE_CHILDREN) {
1855 struct mm_struct *mm = get_task_mm(p);
1857 setmax_mm_hiwater_rss(&maxrss, mm);
1861 r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
1864 int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
1867 k_getrusage(p, who, &r);
1868 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1871 SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
1873 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1874 who != RUSAGE_THREAD)
1876 return getrusage(current, who, ru);
1879 #ifdef CONFIG_COMPAT
1880 COMPAT_SYSCALL_DEFINE2(getrusage, int, who, struct compat_rusage __user *, ru)
1884 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1885 who != RUSAGE_THREAD)
1888 k_getrusage(current, who, &r);
1889 return put_compat_rusage(&r, ru);
1893 SYSCALL_DEFINE1(umask, int, mask)
1895 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO);
1899 static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
1902 struct inode *inode;
1909 inode = file_inode(exe.file);
1912 * Because the original mm->exe_file points to executable file, make
1913 * sure that this one is executable as well, to avoid breaking an
1917 if (!S_ISREG(inode->i_mode) ||
1918 exe.file->f_path.mnt->mnt_flags & MNT_NOEXEC)
1921 err = inode_permission(inode, MAY_EXEC);
1925 down_write(&mm->mmap_sem);
1928 * Forbid mm->exe_file change if old file still mapped.
1932 struct vm_area_struct *vma;
1934 for (vma = mm->mmap; vma; vma = vma->vm_next)
1936 path_equal(&vma->vm_file->f_path,
1937 &mm->exe_file->f_path))
1942 * The symlink can be changed only once, just to disallow arbitrary
1943 * transitions malicious software might bring in. This means one
1944 * could make a snapshot over all processes running and monitor
1945 * /proc/pid/exe changes to notice unusual activity if needed.
1948 if (test_and_set_bit(MMF_EXE_FILE_CHANGED, &mm->flags))
1952 set_mm_exe_file(mm, exe.file); /* this grabs a reference to exe.file */
1954 up_write(&mm->mmap_sem);
1961 static int prctl_set_mm(int opt, unsigned long addr,
1962 unsigned long arg4, unsigned long arg5)
1964 unsigned long rlim = rlimit(RLIMIT_DATA);
1965 struct mm_struct *mm = current->mm;
1966 struct vm_area_struct *vma;
1969 if (arg5 || (arg4 && opt != PR_SET_MM_AUXV))
1972 if (!capable(CAP_SYS_RESOURCE))
1975 if (opt == PR_SET_MM_EXE_FILE)
1976 return prctl_set_mm_exe_file(mm, (unsigned int)addr);
1978 if (addr >= TASK_SIZE || addr < mmap_min_addr)
1983 down_read(&mm->mmap_sem);
1984 vma = find_vma(mm, addr);
1987 case PR_SET_MM_START_CODE:
1988 mm->start_code = addr;
1990 case PR_SET_MM_END_CODE:
1991 mm->end_code = addr;
1993 case PR_SET_MM_START_DATA:
1994 mm->start_data = addr;
1996 case PR_SET_MM_END_DATA:
1997 mm->end_data = addr;
2000 case PR_SET_MM_START_BRK:
2001 if (addr <= mm->end_data)
2004 if (rlim < RLIM_INFINITY &&
2006 (mm->end_data - mm->start_data) > rlim)
2009 mm->start_brk = addr;
2013 if (addr <= mm->end_data)
2016 if (rlim < RLIM_INFINITY &&
2017 (addr - mm->start_brk) +
2018 (mm->end_data - mm->start_data) > rlim)
2025 * If command line arguments and environment
2026 * are placed somewhere else on stack, we can
2027 * set them up here, ARG_START/END to setup
2028 * command line argumets and ENV_START/END
2031 case PR_SET_MM_START_STACK:
2032 case PR_SET_MM_ARG_START:
2033 case PR_SET_MM_ARG_END:
2034 case PR_SET_MM_ENV_START:
2035 case PR_SET_MM_ENV_END:
2040 if (opt == PR_SET_MM_START_STACK)
2041 mm->start_stack = addr;
2042 else if (opt == PR_SET_MM_ARG_START)
2043 mm->arg_start = addr;
2044 else if (opt == PR_SET_MM_ARG_END)
2046 else if (opt == PR_SET_MM_ENV_START)
2047 mm->env_start = addr;
2048 else if (opt == PR_SET_MM_ENV_END)
2053 * This doesn't move auxiliary vector itself
2054 * since it's pinned to mm_struct, but allow
2055 * to fill vector with new values. It's up
2056 * to a caller to provide sane values here
2057 * otherwise user space tools which use this
2058 * vector might be unhappy.
2060 case PR_SET_MM_AUXV: {
2061 unsigned long user_auxv[AT_VECTOR_SIZE];
2063 if (arg4 > sizeof(user_auxv))
2065 up_read(&mm->mmap_sem);
2067 if (copy_from_user(user_auxv, (const void __user *)addr, arg4))
2070 /* Make sure the last entry is always AT_NULL */
2071 user_auxv[AT_VECTOR_SIZE - 2] = 0;
2072 user_auxv[AT_VECTOR_SIZE - 1] = 0;
2074 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
2077 memcpy(mm->saved_auxv, user_auxv, arg4);
2078 task_unlock(current);
2088 up_read(&mm->mmap_sem);
2092 #ifdef CONFIG_CHECKPOINT_RESTORE
2093 static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
2095 return put_user(me->clear_child_tid, tid_addr);
2098 static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
2105 static int prctl_update_vma_anon_name(struct vm_area_struct *vma,
2106 struct vm_area_struct **prev,
2107 unsigned long start, unsigned long end,
2108 const char __user *name_addr)
2110 struct mm_struct * mm = vma->vm_mm;
2114 if (name_addr == vma_get_anon_name(vma)) {
2119 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
2120 *prev = vma_merge(mm, *prev, start, end, vma->vm_flags, vma->anon_vma,
2121 vma->vm_file, pgoff, vma_policy(vma),
2130 if (start != vma->vm_start) {
2131 error = split_vma(mm, vma, start, 1);
2136 if (end != vma->vm_end) {
2137 error = split_vma(mm, vma, end, 0);
2144 vma->shared.anon_name = name_addr;
2147 if (error == -ENOMEM)
2152 static int prctl_set_vma_anon_name(unsigned long start, unsigned long end,
2156 struct vm_area_struct * vma, *prev;
2157 int unmapped_error = 0;
2158 int error = -EINVAL;
2161 * If the interval [start,end) covers some unmapped address
2162 * ranges, just ignore them, but return -ENOMEM at the end.
2163 * - this matches the handling in madvise.
2165 vma = find_vma_prev(current->mm, start, &prev);
2166 if (vma && start > vma->vm_start)
2170 /* Still start < end. */
2175 /* Here start < (end|vma->vm_end). */
2176 if (start < vma->vm_start) {
2177 unmapped_error = -ENOMEM;
2178 start = vma->vm_start;
2183 /* Here vma->vm_start <= start < (end|vma->vm_end) */
2188 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
2189 error = prctl_update_vma_anon_name(vma, &prev, start, end,
2190 (const char __user *)arg);
2194 if (prev && start < prev->vm_end)
2195 start = prev->vm_end;
2196 error = unmapped_error;
2200 vma = prev->vm_next;
2201 else /* madvise_remove dropped mmap_sem */
2202 vma = find_vma(current->mm, start);
2206 static int prctl_set_vma(unsigned long opt, unsigned long start,
2207 unsigned long len_in, unsigned long arg)
2209 struct mm_struct *mm = current->mm;
2214 if (start & ~PAGE_MASK)
2216 len = (len_in + ~PAGE_MASK) & PAGE_MASK;
2218 /* Check to see whether len was rounded up from small -ve to zero */
2229 down_write(&mm->mmap_sem);
2232 case PR_SET_VMA_ANON_NAME:
2233 error = prctl_set_vma_anon_name(start, end, arg);
2239 up_write(&mm->mmap_sem);
2244 SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
2245 unsigned long, arg4, unsigned long, arg5)
2247 struct task_struct *me = current;
2248 unsigned char comm[sizeof(me->comm)];
2251 error = security_task_prctl(option, arg2, arg3, arg4, arg5);
2252 if (error != -ENOSYS)
2257 case PR_SET_PDEATHSIG:
2258 if (!valid_signal(arg2)) {
2262 me->pdeath_signal = arg2;
2264 case PR_GET_PDEATHSIG:
2265 error = put_user(me->pdeath_signal, (int __user *)arg2);
2267 case PR_GET_DUMPABLE:
2268 error = get_dumpable(me->mm);
2270 case PR_SET_DUMPABLE:
2271 if (arg2 != SUID_DUMP_DISABLE && arg2 != SUID_DUMP_USER) {
2275 set_dumpable(me->mm, arg2);
2278 case PR_SET_UNALIGN:
2279 error = SET_UNALIGN_CTL(me, arg2);
2281 case PR_GET_UNALIGN:
2282 error = GET_UNALIGN_CTL(me, arg2);
2285 error = SET_FPEMU_CTL(me, arg2);
2288 error = GET_FPEMU_CTL(me, arg2);
2291 error = SET_FPEXC_CTL(me, arg2);
2294 error = GET_FPEXC_CTL(me, arg2);
2297 error = PR_TIMING_STATISTICAL;
2300 if (arg2 != PR_TIMING_STATISTICAL)
2304 comm[sizeof(me->comm) - 1] = 0;
2305 if (strncpy_from_user(comm, (char __user *)arg2,
2306 sizeof(me->comm) - 1) < 0)
2308 set_task_comm(me, comm);
2309 proc_comm_connector(me);
2312 get_task_comm(comm, me);
2313 if (copy_to_user((char __user *)arg2, comm, sizeof(comm)))
2317 error = GET_ENDIAN(me, arg2);
2320 error = SET_ENDIAN(me, arg2);
2322 case PR_GET_SECCOMP:
2323 error = prctl_get_seccomp();
2325 case PR_SET_SECCOMP:
2326 error = prctl_set_seccomp(arg2, (char __user *)arg3);
2329 error = GET_TSC_CTL(arg2);
2332 error = SET_TSC_CTL(arg2);
2334 case PR_TASK_PERF_EVENTS_DISABLE:
2335 error = perf_event_task_disable();
2337 case PR_TASK_PERF_EVENTS_ENABLE:
2338 error = perf_event_task_enable();
2340 case PR_GET_TIMERSLACK:
2341 error = current->timer_slack_ns;
2343 case PR_SET_TIMERSLACK:
2345 current->timer_slack_ns =
2346 current->default_timer_slack_ns;
2348 current->timer_slack_ns = arg2;
2354 case PR_MCE_KILL_CLEAR:
2357 current->flags &= ~PF_MCE_PROCESS;
2359 case PR_MCE_KILL_SET:
2360 current->flags |= PF_MCE_PROCESS;
2361 if (arg3 == PR_MCE_KILL_EARLY)
2362 current->flags |= PF_MCE_EARLY;
2363 else if (arg3 == PR_MCE_KILL_LATE)
2364 current->flags &= ~PF_MCE_EARLY;
2365 else if (arg3 == PR_MCE_KILL_DEFAULT)
2367 ~(PF_MCE_EARLY|PF_MCE_PROCESS);
2372 error = prctl_set_vma(arg2, arg3, arg4, arg5);
2378 case PR_MCE_KILL_GET:
2379 if (arg2 | arg3 | arg4 | arg5)
2381 if (current->flags & PF_MCE_PROCESS)
2382 error = (current->flags & PF_MCE_EARLY) ?
2383 PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
2385 error = PR_MCE_KILL_DEFAULT;
2388 error = prctl_set_mm(arg2, arg3, arg4, arg5);
2390 case PR_GET_TID_ADDRESS:
2391 error = prctl_get_tid_address(me, (int __user **)arg2);
2393 case PR_SET_CHILD_SUBREAPER:
2394 me->signal->is_child_subreaper = !!arg2;
2396 case PR_GET_CHILD_SUBREAPER:
2397 error = put_user(me->signal->is_child_subreaper,
2398 (int __user *)arg2);
2400 case PR_SET_NO_NEW_PRIVS:
2401 if (arg2 != 1 || arg3 || arg4 || arg5)
2404 current->no_new_privs = 1;
2406 case PR_GET_NO_NEW_PRIVS:
2407 if (arg2 || arg3 || arg4 || arg5)
2409 return current->no_new_privs ? 1 : 0;
2417 SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
2418 struct getcpu_cache __user *, unused)
2421 int cpu = raw_smp_processor_id();
2423 err |= put_user(cpu, cpup);
2425 err |= put_user(cpu_to_node(cpu), nodep);
2426 return err ? -EFAULT : 0;
2429 char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff";
2431 static int __orderly_poweroff(bool force)
2434 static char *envp[] = {
2436 "PATH=/sbin:/bin:/usr/sbin:/usr/bin",
2441 argv = argv_split(GFP_KERNEL, poweroff_cmd, NULL);
2443 ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
2446 printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n",
2447 __func__, poweroff_cmd);
2452 printk(KERN_WARNING "Failed to start orderly shutdown: "
2453 "forcing the issue\n");
2455 * I guess this should try to kick off some daemon to sync and
2456 * poweroff asap. Or not even bother syncing if we're doing an
2457 * emergency shutdown?
2466 static bool poweroff_force;
2468 static void poweroff_work_func(struct work_struct *work)
2470 __orderly_poweroff(poweroff_force);
2473 static DECLARE_WORK(poweroff_work, poweroff_work_func);
2476 * orderly_poweroff - Trigger an orderly system poweroff
2477 * @force: force poweroff if command execution fails
2479 * This may be called from any context to trigger a system shutdown.
2480 * If the orderly shutdown fails, it will force an immediate shutdown.
2482 int orderly_poweroff(bool force)
2484 if (force) /* do not override the pending "true" */
2485 poweroff_force = true;
2486 schedule_work(&poweroff_work);
2489 EXPORT_SYMBOL_GPL(orderly_poweroff);
2492 * do_sysinfo - fill in sysinfo struct
2493 * @info: pointer to buffer to fill
2495 static int do_sysinfo(struct sysinfo *info)
2497 unsigned long mem_total, sav_total;
2498 unsigned int mem_unit, bitcount;
2501 memset(info, 0, sizeof(struct sysinfo));
2504 monotonic_to_bootbased(&tp);
2505 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
2507 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
2509 info->procs = nr_threads;
2515 * If the sum of all the available memory (i.e. ram + swap)
2516 * is less than can be stored in a 32 bit unsigned long then
2517 * we can be binary compatible with 2.2.x kernels. If not,
2518 * well, in that case 2.2.x was broken anyways...
2520 * -Erik Andersen <andersee@debian.org>
2523 mem_total = info->totalram + info->totalswap;
2524 if (mem_total < info->totalram || mem_total < info->totalswap)
2527 mem_unit = info->mem_unit;
2528 while (mem_unit > 1) {
2531 sav_total = mem_total;
2533 if (mem_total < sav_total)
2538 * If mem_total did not overflow, multiply all memory values by
2539 * info->mem_unit and set it to 1. This leaves things compatible
2540 * with 2.2.x, and also retains compatibility with earlier 2.4.x
2545 info->totalram <<= bitcount;
2546 info->freeram <<= bitcount;
2547 info->sharedram <<= bitcount;
2548 info->bufferram <<= bitcount;
2549 info->totalswap <<= bitcount;
2550 info->freeswap <<= bitcount;
2551 info->totalhigh <<= bitcount;
2552 info->freehigh <<= bitcount;
2558 SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
2564 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
2570 #ifdef CONFIG_COMPAT
2571 struct compat_sysinfo {
2585 char _f[20-2*sizeof(u32)-sizeof(int)];
2588 COMPAT_SYSCALL_DEFINE1(sysinfo, struct compat_sysinfo __user *, info)
2594 /* Check to see if any memory value is too large for 32-bit and scale
2597 if ((s.totalram >> 32) || (s.totalswap >> 32)) {
2600 while (s.mem_unit < PAGE_SIZE) {
2605 s.totalram >>= bitcount;
2606 s.freeram >>= bitcount;
2607 s.sharedram >>= bitcount;
2608 s.bufferram >>= bitcount;
2609 s.totalswap >>= bitcount;
2610 s.freeswap >>= bitcount;
2611 s.totalhigh >>= bitcount;
2612 s.freehigh >>= bitcount;
2615 if (!access_ok(VERIFY_WRITE, info, sizeof(struct compat_sysinfo)) ||
2616 __put_user(s.uptime, &info->uptime) ||
2617 __put_user(s.loads[0], &info->loads[0]) ||
2618 __put_user(s.loads[1], &info->loads[1]) ||
2619 __put_user(s.loads[2], &info->loads[2]) ||
2620 __put_user(s.totalram, &info->totalram) ||
2621 __put_user(s.freeram, &info->freeram) ||
2622 __put_user(s.sharedram, &info->sharedram) ||
2623 __put_user(s.bufferram, &info->bufferram) ||
2624 __put_user(s.totalswap, &info->totalswap) ||
2625 __put_user(s.freeswap, &info->freeswap) ||
2626 __put_user(s.procs, &info->procs) ||
2627 __put_user(s.totalhigh, &info->totalhigh) ||
2628 __put_user(s.freehigh, &info->freehigh) ||
2629 __put_user(s.mem_unit, &info->mem_unit))
2634 #endif /* CONFIG_COMPAT */