Merge branch 'v28-timers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 20 Oct 2008 20:19:56 +0000 (13:19 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 20 Oct 2008 20:19:56 +0000 (13:19 -0700)
* 'v28-timers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (36 commits)
  fix documentation of sysrq-q really
  Fix documentation of sysrq-q
  timer_list: add base address to clock base
  timer_list: print cpu number of clockevents device
  timer_list: print real timer address
  NOHZ: restart tick device from irq_enter()
  NOHZ: split tick_nohz_restart_sched_tick()
  NOHZ: unify the nohz function calls in irq_enter()
  timers: fix itimer/many thread hang, fix
  timers: fix itimer/many thread hang, v3
  ntp: improve adjtimex frequency rounding
  timekeeping: fix rounding problem during clock update
  ntp: let update_persistent_clock() sleep
  hrtimer: reorder struct hrtimer to save 8 bytes on 64bit builds
  posix-timers: lock_timer: make it readable
  posix-timers: lock_timer: kill the bogus ->it_id check
  posix-timers: kill ->it_sigev_signo and ->it_sigev_value
  posix-timers: sys_timer_create: cleanup the error handling
  posix-timers: move the initialization of timer->sigq from send to create path
  posix-timers: sys_timer_create: simplify and s/tasklist/rcu/
  ...

Fix trivial conflicts due to sysrq-q description clahes in
Documentation/sysrq.txt and drivers/char/sysrq.c

37 files changed:
Documentation/sysrq.txt
drivers/char/sysrq.c
drivers/clocksource/acpi_pm.c
fs/binfmt_elf.c
fs/proc/array.c
include/linux/clocksource.h
include/linux/hrtimer.h
include/linux/kernel_stat.h
include/linux/posix-timers.h
include/linux/sched.h
include/linux/tick.h
include/linux/time.h
include/linux/timex.h
kernel/compat.c
kernel/exit.c
kernel/fork.c
kernel/hrtimer.c
kernel/itimer.c
kernel/posix-cpu-timers.c
kernel/posix-timers.c
kernel/sched.c
kernel/sched_fair.c
kernel/sched_rt.c
kernel/sched_stats.h
kernel/signal.c
kernel/softirq.c
kernel/sys.c
kernel/time/clocksource.c
kernel/time/jiffies.c
kernel/time/ntp.c
kernel/time/tick-broadcast.c
kernel/time/tick-internal.h
kernel/time/tick-sched.c
kernel/time/timekeeping.c
kernel/time/timer_list.c
kernel/timer.c
security/selinux/hooks.c

index 49378a9f2b5f276c4a050e76b0898c25a74303e0..10a0263ebb3f01e832c7827cc75d7fe54b341a6f 100644 (file)
@@ -95,8 +95,9 @@ On all -  write a character to /proc/sysrq-trigger.  e.g.:
 
 'p'     - Will dump the current registers and flags to your console.
 
-'q'     - Will dump a list of all running hrtimers.
-         WARNING: Does not cover any other timers
+'q'     - Will dump per CPU lists of all armed hrtimers (but NOT regular
+          timer_list timers) and detailed information about all
+          clockevent devices.
 
 'r'     - Turns off keyboard raw mode and sets it to XLATE.
 
index d0c0d64ed366ce9e961c7dbbfca03a9764e29321..ce0d9da52a8ab808a24e8d30bff27d631b474713 100644 (file)
@@ -168,7 +168,7 @@ static void sysrq_handle_show_timers(int key, struct tty_struct *tty)
 static struct sysrq_key_op sysrq_show_timers_op = {
        .handler        = sysrq_handle_show_timers,
        .help_msg       = "show-all-timers(Q)",
-       .action_msg     = "Show pending hrtimers (no others)",
+       .action_msg     = "Show clockevent devices & pending hrtimers (no others)",
 };
 
 static void sysrq_handle_mountro(int key, struct tty_struct *tty)
index 71d2ac4e3f46cc0e33410a2d8f7889ac1ae8de09..c20171078d1d6f475f04a23fc0be9e63101d5d76 100644 (file)
@@ -237,9 +237,12 @@ static int __init parse_pmtmr(char *arg)
 
        if (strict_strtoul(arg, 16, &base))
                return -EINVAL;
-
+#ifdef CONFIG_X86_64
+       if (base > UINT_MAX)
+               return -ERANGE;
+#endif
        printk(KERN_INFO "PMTMR IOPort override: 0x%04x -> 0x%04lx\n",
-              (unsigned int)pmtmr_ioport, base);
+              pmtmr_ioport, base);
        pmtmr_ioport = base;
 
        return 1;
index e2159063198a072ef75545130ced4eb21e56accf..8fcfa398d35075e1e149b70d62226a6095524ad3 100644 (file)
@@ -1341,20 +1341,15 @@ static void fill_prstatus(struct elf_prstatus *prstatus,
        prstatus->pr_pgrp = task_pgrp_vnr(p);
        prstatus->pr_sid = task_session_vnr(p);
        if (thread_group_leader(p)) {
+               struct task_cputime cputime;
+
                /*
-                * This is the record for the group leader.  Add in the
-                * cumulative times of previous dead threads.  This total
-                * won't include the time of each live thread whose state
-                * is included in the core dump.  The final total reported
-                * to our parent process when it calls wait4 will include
-                * those sums as well as the little bit more time it takes
-                * this and each other thread to finish dying after the
-                * core dump synchronization phase.
+                * This is the record for the group leader.  It shows the
+                * group-wide total, not its individual thread total.
                 */
-               cputime_to_timeval(cputime_add(p->utime, p->signal->utime),
-                                  &prstatus->pr_utime);
-               cputime_to_timeval(cputime_add(p->stime, p->signal->stime),
-                                  &prstatus->pr_stime);
+               thread_group_cputime(p, &cputime);
+               cputime_to_timeval(cputime.utime, &prstatus->pr_utime);
+               cputime_to_timeval(cputime.stime, &prstatus->pr_stime);
        } else {
                cputime_to_timeval(p->utime, &prstatus->pr_utime);
                cputime_to_timeval(p->stime, &prstatus->pr_stime);
index f4bc0e789539f413e080324ab8209575ba349c42..bb9f4b05703de9b587a1b2cbd36ab651d2613da8 100644 (file)
@@ -388,20 +388,20 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
 
                /* add up live thread stats at the group level */
                if (whole) {
+                       struct task_cputime cputime;
                        struct task_struct *t = task;
                        do {
                                min_flt += t->min_flt;
                                maj_flt += t->maj_flt;
-                               utime = cputime_add(utime, task_utime(t));
-                               stime = cputime_add(stime, task_stime(t));
                                gtime = cputime_add(gtime, task_gtime(t));
                                t = next_thread(t);
                        } while (t != task);
 
                        min_flt += sig->min_flt;
                        maj_flt += sig->maj_flt;
-                       utime = cputime_add(utime, sig->utime);
-                       stime = cputime_add(stime, sig->stime);
+                       thread_group_cputime(task, &cputime);
+                       utime = cputime.utime;
+                       stime = cputime.stime;
                        gtime = cputime_add(gtime, sig->gtime);
                }
 
index 55e434feec993d9656ccf6bcd31964005daa9667..f88d32f8ff7c8a0d242763328c8e14f39445006d 100644 (file)
@@ -45,7 +45,8 @@ struct clocksource;
  * @read:              returns a cycle value
  * @mask:              bitmask for two's complement
  *                     subtraction of non 64 bit counters
- * @mult:              cycle to nanosecond multiplier
+ * @mult:              cycle to nanosecond multiplier (adjusted by NTP)
+ * @mult_orig:         cycle to nanosecond multiplier (unadjusted by NTP)
  * @shift:             cycle to nanosecond divisor (power of two)
  * @flags:             flags describing special properties
  * @vread:             vsyscall based read
@@ -63,6 +64,7 @@ struct clocksource {
        cycle_t (*read)(void);
        cycle_t mask;
        u32 mult;
+       u32 mult_orig;
        u32 shift;
        unsigned long flags;
        cycle_t (*vread)(void);
@@ -77,6 +79,7 @@ struct clocksource {
        /* timekeeping specific data, ignore */
        cycle_t cycle_interval;
        u64     xtime_interval;
+       u32     raw_interval;
        /*
         * Second part is written at each timer interrupt
         * Keep it in a different cache line to dirty no
@@ -85,6 +88,7 @@ struct clocksource {
        cycle_t cycle_last ____cacheline_aligned_in_smp;
        u64 xtime_nsec;
        s64 error;
+       struct timespec raw_time;
 
 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG
        /* Watchdog related data, used by the framework */
@@ -201,17 +205,19 @@ static inline void clocksource_calculate_interval(struct clocksource *c,
 {
        u64 tmp;
 
-       /* XXX - All of this could use a whole lot of optimization */
+       /* Do the ns -> cycle conversion first, using original mult */
        tmp = length_nsec;
        tmp <<= c->shift;
-       tmp += c->mult/2;
-       do_div(tmp, c->mult);
+       tmp += c->mult_orig/2;
+       do_div(tmp, c->mult_orig);
 
        c->cycle_interval = (cycle_t)tmp;
        if (c->cycle_interval == 0)
                c->cycle_interval = 1;
 
+       /* Go back from cycles -> shifted ns, this time use ntp adjused mult */
        c->xtime_interval = (u64)c->cycle_interval * c->mult;
+       c->raw_interval = ((u64)c->cycle_interval * c->mult_orig) >> c->shift;
 }
 
 
index 2f245fe63bda5611ad909c1452aa8a79c4f29eb4..9a4e35cd5f79d80e52bf3a1509e0438bb6a77ec4 100644 (file)
@@ -125,12 +125,12 @@ struct hrtimer {
        enum hrtimer_restart            (*function)(struct hrtimer *);
        struct hrtimer_clock_base       *base;
        unsigned long                   state;
-       enum hrtimer_cb_mode            cb_mode;
        struct list_head                cb_entry;
+       enum hrtimer_cb_mode            cb_mode;
 #ifdef CONFIG_TIMER_STATS
+       int                             start_pid;
        void                            *start_site;
        char                            start_comm[16];
-       int                             start_pid;
 #endif
 };
 
@@ -155,10 +155,8 @@ struct hrtimer_sleeper {
  * @first:             pointer to the timer node which expires first
  * @resolution:                the resolution of the clock, in nanoseconds
  * @get_time:          function to retrieve the current time of the clock
- * @get_softirq_time:  function to retrieve the current time from the softirq
  * @softirq_time:      the time when running the hrtimer queue in the softirq
  * @offset:            offset of this clock to the monotonic base
- * @reprogram:         function to reprogram the timer event
  */
 struct hrtimer_clock_base {
        struct hrtimer_cpu_base *cpu_base;
@@ -167,13 +165,9 @@ struct hrtimer_clock_base {
        struct rb_node          *first;
        ktime_t                 resolution;
        ktime_t                 (*get_time)(void);
-       ktime_t                 (*get_softirq_time)(void);
        ktime_t                 softirq_time;
 #ifdef CONFIG_HIGH_RES_TIMERS
        ktime_t                 offset;
-       int                     (*reprogram)(struct hrtimer *t,
-                                            struct hrtimer_clock_base *b,
-                                            ktime_t n);
 #endif
 };
 
index cf9f40a91c9c79837753956b135e49c9ec984630..cac3750cd65e6c1f07b2dc1c4cc8b9f654282726 100644 (file)
@@ -52,6 +52,7 @@ static inline int kstat_irqs(int irq)
        return sum;
 }
 
+extern unsigned long long task_delta_exec(struct task_struct *);
 extern void account_user_time(struct task_struct *, cputime_t);
 extern void account_user_time_scaled(struct task_struct *, cputime_t);
 extern void account_system_time(struct task_struct *, int, cputime_t);
index a7dd38f30ade61d1cf6fba16d7f7f60b3e8cc944..a7c7213555492520e649a4920d9fc671e88460cd 100644 (file)
@@ -45,8 +45,6 @@ struct k_itimer {
        int it_requeue_pending;         /* waiting to requeue this timer */
 #define REQUEUE_PENDING 1
        int it_sigev_notify;            /* notify word of sigevent struct */
-       int it_sigev_signo;             /* signo word of sigevent struct */
-       sigval_t it_sigev_value;        /* value word of sigevent struct */
        struct task_struct *it_process; /* process to send signal to */
        struct sigqueue *sigq;          /* signal queue entry. */
        union {
@@ -115,4 +113,6 @@ void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx,
 
 long clock_nanosleep_restart(struct restart_block *restart_block);
 
+void update_rlimit_cpu(unsigned long rlim_new);
+
 #endif
index f52dbd3587a78c4c031759f6934b5d03685b666a..5c38db536e07f8b7b0d6e5b0dc4f31a9e3cb07c1 100644 (file)
@@ -434,6 +434,39 @@ struct pacct_struct {
        unsigned long           ac_minflt, ac_majflt;
 };
 
+/**
+ * struct task_cputime - collected CPU time counts
+ * @utime:             time spent in user mode, in &cputime_t units
+ * @stime:             time spent in kernel mode, in &cputime_t units
+ * @sum_exec_runtime:  total time spent on the CPU, in nanoseconds
+ *
+ * This structure groups together three kinds of CPU time that are
+ * tracked for threads and thread groups.  Most things considering
+ * CPU time want to group these counts together and treat all three
+ * of them in parallel.
+ */
+struct task_cputime {
+       cputime_t utime;
+       cputime_t stime;
+       unsigned long long sum_exec_runtime;
+};
+/* Alternate field names when used to cache expirations. */
+#define prof_exp       stime
+#define virt_exp       utime
+#define sched_exp      sum_exec_runtime
+
+/**
+ * struct thread_group_cputime - thread group interval timer counts
+ * @totals:            thread group interval timers; substructure for
+ *                     uniprocessor kernel, per-cpu for SMP kernel.
+ *
+ * This structure contains the version of task_cputime, above, that is
+ * used for thread group CPU clock calculations.
+ */
+struct thread_group_cputime {
+       struct task_cputime *totals;
+};
+
 /*
  * NOTE! "signal_struct" does not have it's own
  * locking, because a shared signal_struct always
@@ -479,6 +512,17 @@ struct signal_struct {
        cputime_t it_prof_expires, it_virt_expires;
        cputime_t it_prof_incr, it_virt_incr;
 
+       /*
+        * Thread group totals for process CPU clocks.
+        * See thread_group_cputime(), et al, for details.
+        */
+       struct thread_group_cputime cputime;
+
+       /* Earliest-expiration cache. */
+       struct task_cputime cputime_expires;
+
+       struct list_head cpu_timers[3];
+
        /* job control IDs */
 
        /*
@@ -509,7 +553,7 @@ struct signal_struct {
         * Live threads maintain their own counters and add to these
         * in __exit_signal, except for the group leader.
         */
-       cputime_t utime, stime, cutime, cstime;
+       cputime_t cutime, cstime;
        cputime_t gtime;
        cputime_t cgtime;
        unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
@@ -517,14 +561,6 @@ struct signal_struct {
        unsigned long inblock, oublock, cinblock, coublock;
        struct task_io_accounting ioac;
 
-       /*
-        * Cumulative ns of scheduled CPU time for dead threads in the
-        * group, not including a zombie group leader.  (This only differs
-        * from jiffies_to_ns(utime + stime) if sched_clock uses something
-        * other than jiffies.)
-        */
-       unsigned long long sum_sched_runtime;
-
        /*
         * We don't bother to synchronize most readers of this at all,
         * because there is no reader checking a limit that actually needs
@@ -536,8 +572,6 @@ struct signal_struct {
         */
        struct rlimit rlim[RLIM_NLIMITS];
 
-       struct list_head cpu_timers[3];
-
        /* keep the process-shared keyrings here so that they do the right
         * thing in threads created with CLONE_THREAD */
 #ifdef CONFIG_KEYS
@@ -1146,8 +1180,7 @@ struct task_struct {
 /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
        unsigned long min_flt, maj_flt;
 
-       cputime_t it_prof_expires, it_virt_expires;
-       unsigned long long it_sched_expires;
+       struct task_cputime cputime_expires;
        struct list_head cpu_timers[3];
 
 /* process credentials */
@@ -1597,6 +1630,7 @@ extern unsigned long long cpu_clock(int cpu);
 
 extern unsigned long long
 task_sched_runtime(struct task_struct *task);
+extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
 
 /* sched_exec is called by processes performing an exec */
 #ifdef CONFIG_SMP
@@ -2093,6 +2127,30 @@ static inline int spin_needbreak(spinlock_t *lock)
 #endif
 }
 
+/*
+ * Thread group CPU time accounting.
+ */
+
+extern int thread_group_cputime_alloc(struct task_struct *);
+extern void thread_group_cputime(struct task_struct *, struct task_cputime *);
+
+static inline void thread_group_cputime_init(struct signal_struct *sig)
+{
+       sig->cputime.totals = NULL;
+}
+
+static inline int thread_group_cputime_clone_thread(struct task_struct *curr)
+{
+       if (curr->signal->cputime.totals)
+               return 0;
+       return thread_group_cputime_alloc(curr);
+}
+
+static inline void thread_group_cputime_free(struct signal_struct *sig)
+{
+       free_percpu(sig->cputime.totals);
+}
+
 /*
  * Reevaluate whether the task has signals pending delivery.
  * Wake the task if so.
index 98921a3e1aa8db33b7e743e6711abb671485ea12..b6ec8189ac0c16673689f5327cde1d78b3ab49d6 100644 (file)
@@ -96,9 +96,11 @@ extern cpumask_t *tick_get_broadcast_oneshot_mask(void);
 extern void tick_clock_notify(void);
 extern int tick_check_oneshot_change(int allow_nohz);
 extern struct tick_sched *tick_get_tick_sched(int cpu);
+extern void tick_check_idle(int cpu);
 # else
 static inline void tick_clock_notify(void) { }
 static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
+static inline void tick_check_idle(int cpu) { }
 # endif
 
 #else /* CONFIG_GENERIC_CLOCKEVENTS */
@@ -106,26 +108,23 @@ static inline void tick_init(void) { }
 static inline void tick_cancel_sched_timer(int cpu) { }
 static inline void tick_clock_notify(void) { }
 static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
+static inline void tick_check_idle(int cpu) { }
 #endif /* !CONFIG_GENERIC_CLOCKEVENTS */
 
 # ifdef CONFIG_NO_HZ
 extern void tick_nohz_stop_sched_tick(int inidle);
 extern void tick_nohz_restart_sched_tick(void);
-extern void tick_nohz_update_jiffies(void);
 extern ktime_t tick_nohz_get_sleep_length(void);
-extern void tick_nohz_stop_idle(int cpu);
 extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
 # else
 static inline void tick_nohz_stop_sched_tick(int inidle) { }
 static inline void tick_nohz_restart_sched_tick(void) { }
-static inline void tick_nohz_update_jiffies(void) { }
 static inline ktime_t tick_nohz_get_sleep_length(void)
 {
        ktime_t len = { .tv64 = NSEC_PER_SEC/HZ };
 
        return len;
 }
-static inline void tick_nohz_stop_idle(int cpu) { }
 static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
 # endif /* !NO_HZ */
 
index 51e883df0fa51fe598832747477533fc4303e30a..4f1c9db577079ed3cc688def99e476219b5aa9b0 100644 (file)
@@ -119,6 +119,7 @@ extern int do_setitimer(int which, struct itimerval *value,
 extern unsigned int alarm_setitimer(unsigned int seconds);
 extern int do_getitimer(int which, struct itimerval *value);
 extern void getnstimeofday(struct timespec *tv);
+extern void getrawmonotonic(struct timespec *ts);
 extern void getboottime(struct timespec *ts);
 extern void monotonic_to_bootbased(struct timespec *ts);
 
@@ -127,6 +128,9 @@ extern int timekeeping_valid_for_hres(void);
 extern void update_wall_time(void);
 extern void update_xtime_cache(u64 nsec);
 
+struct tms;
+extern void do_sys_times(struct tms *);
+
 /**
  * timespec_to_ns - Convert timespec to nanoseconds
  * @ts:                pointer to the timespec variable to be converted
@@ -216,6 +220,7 @@ struct itimerval {
 #define CLOCK_MONOTONIC                        1
 #define CLOCK_PROCESS_CPUTIME_ID       2
 #define CLOCK_THREAD_CPUTIME_ID                3
+#define CLOCK_MONOTONIC_RAW            4
 
 /*
  * The IDs of various hardware clocks:
index fc6035d29d568a018e5c43f6575e5b860379fa96..9007313b5b7168ae03ed85e63d80ce6afe0ce438 100644 (file)
@@ -82,7 +82,7 @@
  */
 #define SHIFT_USEC 16          /* frequency offset scale (shift) */
 #define PPM_SCALE (NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC))
-#define PPM_SCALE_INV_SHIFT 20
+#define PPM_SCALE_INV_SHIFT 19
 #define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \
                       PPM_SCALE + 1)
 
@@ -141,8 +141,15 @@ struct timex {
 #define ADJ_MICRO              0x1000  /* select microsecond resolution */
 #define ADJ_NANO               0x2000  /* select nanosecond resolution */
 #define ADJ_TICK               0x4000  /* tick value */
+
+#ifdef __KERNEL__
+#define ADJ_ADJTIME            0x8000  /* switch between adjtime/adjtimex modes */
+#define ADJ_OFFSET_SINGLESHOT  0x0001  /* old-fashioned adjtime */
+#define ADJ_OFFSET_READONLY    0x2000  /* read-only adjtime */
+#else
 #define ADJ_OFFSET_SINGLESHOT  0x8001  /* old-fashioned adjtime */
-#define ADJ_OFFSET_SS_READ     0xa001  /* read-only adjtime */
+#define ADJ_OFFSET_SS_READ     0xa001  /* read-only adjtime */
+#endif
 
 /* xntp 3.4 compatibility names */
 #define MOD_OFFSET     ADJ_OFFSET
index 143990e48cb9aab2af2f04d93a372d63dc570882..8eafe3eb50d9feb76dce5b289a4a7278321e4d8a 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/timex.h>
 #include <linux/migrate.h>
 #include <linux/posix-timers.h>
+#include <linux/times.h>
 
 #include <asm/uaccess.h>
 
@@ -208,49 +209,23 @@ asmlinkage long compat_sys_setitimer(int which,
        return 0;
 }
 
+static compat_clock_t clock_t_to_compat_clock_t(clock_t x)
+{
+       return compat_jiffies_to_clock_t(clock_t_to_jiffies(x));
+}
+
 asmlinkage long compat_sys_times(struct compat_tms __user *tbuf)
 {
-       /*
-        *      In the SMP world we might just be unlucky and have one of
-        *      the times increment as we use it. Since the value is an
-        *      atomically safe type this is just fine. Conceptually its
-        *      as if the syscall took an instant longer to occur.
-        */
        if (tbuf) {
+               struct tms tms;
                struct compat_tms tmp;
-               struct task_struct *tsk = current;
-               struct task_struct *t;
-               cputime_t utime, stime, cutime, cstime;
-
-               read_lock(&tasklist_lock);
-               utime = tsk->signal->utime;
-               stime = tsk->signal->stime;
-               t = tsk;
-               do {
-                       utime = cputime_add(utime, t->utime);
-                       stime = cputime_add(stime, t->stime);
-                       t = next_thread(t);
-               } while (t != tsk);
-
-               /*
-                * While we have tasklist_lock read-locked, no dying thread
-                * can be updating current->signal->[us]time.  Instead,
-                * we got their counts included in the live thread loop.
-                * However, another thread can come in right now and
-                * do a wait call that updates current->signal->c[us]time.
-                * To make sure we always see that pair updated atomically,
-                * we take the siglock around fetching them.
-                */
-               spin_lock_irq(&tsk->sighand->siglock);
-               cutime = tsk->signal->cutime;
-               cstime = tsk->signal->cstime;
-               spin_unlock_irq(&tsk->sighand->siglock);
-               read_unlock(&tasklist_lock);
-
-               tmp.tms_utime = compat_jiffies_to_clock_t(cputime_to_jiffies(utime));
-               tmp.tms_stime = compat_jiffies_to_clock_t(cputime_to_jiffies(stime));
-               tmp.tms_cutime = compat_jiffies_to_clock_t(cputime_to_jiffies(cutime));
-               tmp.tms_cstime = compat_jiffies_to_clock_t(cputime_to_jiffies(cstime));
+
+               do_sys_times(&tms);
+               /* Convert our struct tms to the compat version. */
+               tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime);
+               tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime);
+               tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime);
+               tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime);
                if (copy_to_user(tbuf, &tmp, sizeof(tmp)))
                        return -EFAULT;
        }
index 0ef4673e351bddd4e738f88941511efa02f062fa..059b38cae3848ea62fc7f03fb5a08c3dfbb53f10 100644 (file)
@@ -112,8 +112,6 @@ static void __exit_signal(struct task_struct *tsk)
                 * We won't ever get here for the group leader, since it
                 * will have been the last reference on the signal_struct.
                 */
-               sig->utime = cputime_add(sig->utime, task_utime(tsk));
-               sig->stime = cputime_add(sig->stime, task_stime(tsk));
                sig->gtime = cputime_add(sig->gtime, task_gtime(tsk));
                sig->min_flt += tsk->min_flt;
                sig->maj_flt += tsk->maj_flt;
@@ -122,7 +120,6 @@ static void __exit_signal(struct task_struct *tsk)
                sig->inblock += task_io_get_inblock(tsk);
                sig->oublock += task_io_get_oublock(tsk);
                task_io_accounting_add(&sig->ioac, &tsk->ioac);
-               sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
                sig = NULL; /* Marker for below. */
        }
 
@@ -1301,6 +1298,7 @@ static int wait_task_zombie(struct task_struct *p, int options,
        if (likely(!traced)) {
                struct signal_struct *psig;
                struct signal_struct *sig;
+               struct task_cputime cputime;
 
                /*
                 * The resource counters for the group leader are in its
@@ -1316,20 +1314,23 @@ static int wait_task_zombie(struct task_struct *p, int options,
                 * need to protect the access to p->parent->signal fields,
                 * as other threads in the parent group can be right
                 * here reaping other children at the same time.
+                *
+                * We use thread_group_cputime() to get times for the thread
+                * group, which consolidates times for all threads in the
+                * group including the group leader.
                 */
                spin_lock_irq(&p->parent->sighand->siglock);
                psig = p->parent->signal;
                sig = p->signal;
+               thread_group_cputime(p, &cputime);
                psig->cutime =
                        cputime_add(psig->cutime,
-                       cputime_add(p->utime,
-                       cputime_add(sig->utime,
-                                   sig->cutime)));
+                       cputime_add(cputime.utime,
+                                   sig->cutime));
                psig->cstime =
                        cputime_add(psig->cstime,
-                       cputime_add(p->stime,
-                       cputime_add(sig->stime,
-                                   sig->cstime)));
+                       cputime_add(cputime.stime,
+                                   sig->cstime));
                psig->cgtime =
                        cputime_add(psig->cgtime,
                        cputime_add(p->gtime,
index 30de644a40c4d4d9617d650589f4c90da1e977a2..44e64d7ba29b8ecba78bc093ae18b8c0325e4047 100644 (file)
@@ -759,15 +759,44 @@ void __cleanup_sighand(struct sighand_struct *sighand)
                kmem_cache_free(sighand_cachep, sighand);
 }
 
+
+/*
+ * Initialize POSIX timer handling for a thread group.
+ */
+static void posix_cpu_timers_init_group(struct signal_struct *sig)
+{
+       /* Thread group counters. */
+       thread_group_cputime_init(sig);
+
+       /* Expiration times and increments. */
+       sig->it_virt_expires = cputime_zero;
+       sig->it_virt_incr = cputime_zero;
+       sig->it_prof_expires = cputime_zero;
+       sig->it_prof_incr = cputime_zero;
+
+       /* Cached expiration times. */
+       sig->cputime_expires.prof_exp = cputime_zero;
+       sig->cputime_expires.virt_exp = cputime_zero;
+       sig->cputime_expires.sched_exp = 0;
+
+       /* The timer lists. */
+       INIT_LIST_HEAD(&sig->cpu_timers[0]);
+       INIT_LIST_HEAD(&sig->cpu_timers[1]);
+       INIT_LIST_HEAD(&sig->cpu_timers[2]);
+}
+
 static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
 {
        struct signal_struct *sig;
        int ret;
 
        if (clone_flags & CLONE_THREAD) {
-               atomic_inc(&current->signal->count);
-               atomic_inc(&current->signal->live);
-               return 0;
+               ret = thread_group_cputime_clone_thread(current);
+               if (likely(!ret)) {
+                       atomic_inc(&current->signal->count);
+                       atomic_inc(&current->signal->live);
+               }
+               return ret;
        }
        sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
        tsk->signal = sig;
@@ -795,40 +824,25 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
        sig->it_real_incr.tv64 = 0;
        sig->real_timer.function = it_real_fn;
 
-       sig->it_virt_expires = cputime_zero;
-       sig->it_virt_incr = cputime_zero;
-       sig->it_prof_expires = cputime_zero;
-       sig->it_prof_incr = cputime_zero;
-
        sig->leader = 0;        /* session leadership doesn't inherit */
        sig->tty_old_pgrp = NULL;
        sig->tty = NULL;
 
-       sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
+       sig->cutime = sig->cstime = cputime_zero;
        sig->gtime = cputime_zero;
        sig->cgtime = cputime_zero;
        sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
        sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
        sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
        task_io_accounting_init(&sig->ioac);
-       sig->sum_sched_runtime = 0;
-       INIT_LIST_HEAD(&sig->cpu_timers[0]);
-       INIT_LIST_HEAD(&sig->cpu_timers[1]);
-       INIT_LIST_HEAD(&sig->cpu_timers[2]);
        taskstats_tgid_init(sig);
 
        task_lock(current->group_leader);
        memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
        task_unlock(current->group_leader);
 
-       if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
-               /*
-                * New sole thread in the process gets an expiry time
-                * of the whole CPU time limit.
-                */
-               tsk->it_prof_expires =
-                       secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
-       }
+       posix_cpu_timers_init_group(sig);
+
        acct_init_pacct(&sig->pacct);
 
        tty_audit_fork(sig);
@@ -838,6 +852,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
 
 void __cleanup_signal(struct signal_struct *sig)
 {
+       thread_group_cputime_free(sig);
        exit_thread_group_keys(sig);
        tty_kref_put(sig->tty);
        kmem_cache_free(signal_cachep, sig);
@@ -887,6 +902,19 @@ void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
 }
 #endif /* CONFIG_MM_OWNER */
 
+/*
+ * Initialize POSIX timer handling for a single task.
+ */
+static void posix_cpu_timers_init(struct task_struct *tsk)
+{
+       tsk->cputime_expires.prof_exp = cputime_zero;
+       tsk->cputime_expires.virt_exp = cputime_zero;
+       tsk->cputime_expires.sched_exp = 0;
+       INIT_LIST_HEAD(&tsk->cpu_timers[0]);
+       INIT_LIST_HEAD(&tsk->cpu_timers[1]);
+       INIT_LIST_HEAD(&tsk->cpu_timers[2]);
+}
+
 /*
  * This creates a new process as a copy of the old one,
  * but does not actually start it yet.
@@ -997,12 +1025,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        task_io_accounting_init(&p->ioac);
        acct_clear_integrals(p);
 
-       p->it_virt_expires = cputime_zero;
-       p->it_prof_expires = cputime_zero;
-       p->it_sched_expires = 0;
-       INIT_LIST_HEAD(&p->cpu_timers[0]);
-       INIT_LIST_HEAD(&p->cpu_timers[1]);
-       INIT_LIST_HEAD(&p->cpu_timers[2]);
+       posix_cpu_timers_init(p);
 
        p->lock_depth = -1;             /* -1 = no lock */
        do_posix_clock_monotonic_gettime(&p->start_time);
@@ -1203,21 +1226,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        if (clone_flags & CLONE_THREAD) {
                p->group_leader = current->group_leader;
                list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
-
-               if (!cputime_eq(current->signal->it_virt_expires,
-                               cputime_zero) ||
-                   !cputime_eq(current->signal->it_prof_expires,
-                               cputime_zero) ||
-                   current->signal->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY ||
-                   !list_empty(&current->signal->cpu_timers[0]) ||
-                   !list_empty(&current->signal->cpu_timers[1]) ||
-                   !list_empty(&current->signal->cpu_timers[2])) {
-                       /*
-                        * Have child wake up on its first tick to check
-                        * for process CPU timers.
-                        */
-                       p->it_prof_expires = jiffies_to_cputime(1);
-               }
        }
 
        if (likely(p->pid)) {
index cdec83e722fa1b80ee0af0f828d8e47532431a20..95978f48e039fcbd7e7e233224a7e3f4a0b2f884 100644 (file)
@@ -1403,9 +1403,7 @@ void hrtimer_run_queues(void)
                if (!base->first)
                        continue;
 
-               if (base->get_softirq_time)
-                       base->softirq_time = base->get_softirq_time();
-               else if (gettime) {
+               if (gettime) {
                        hrtimer_get_softirq_time(cpu_base);
                        gettime = 0;
                }
@@ -1688,9 +1686,11 @@ static void migrate_hrtimers(int cpu)
        new_base = &get_cpu_var(hrtimer_bases);
 
        tick_cancel_sched_timer(cpu);
-
-       local_irq_disable();
-       spin_lock(&new_base->lock);
+       /*
+        * The caller is globally serialized and nobody else
+        * takes two locks at once, deadlock is not possible.
+        */
+       spin_lock_irq(&new_base->lock);
        spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
 
        for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
@@ -1703,8 +1703,7 @@ static void migrate_hrtimers(int cpu)
                raise = 1;
 
        spin_unlock(&old_base->lock);
-       spin_unlock(&new_base->lock);
-       local_irq_enable();
+       spin_unlock_irq(&new_base->lock);
        put_cpu_var(hrtimer_bases);
 
        if (raise)
index ab982747d9bd8121c19b3327b96c7760adb7577e..db7c358b9a02f1cc50954517124c8272b5eed463 100644 (file)
@@ -55,17 +55,15 @@ int do_getitimer(int which, struct itimerval *value)
                spin_unlock_irq(&tsk->sighand->siglock);
                break;
        case ITIMER_VIRTUAL:
-               read_lock(&tasklist_lock);
                spin_lock_irq(&tsk->sighand->siglock);
                cval = tsk->signal->it_virt_expires;
                cinterval = tsk->signal->it_virt_incr;
                if (!cputime_eq(cval, cputime_zero)) {
-                       struct task_struct *t = tsk;
-                       cputime_t utime = tsk->signal->utime;
-                       do {
-                               utime = cputime_add(utime, t->utime);
-                               t = next_thread(t);
-                       } while (t != tsk);
+                       struct task_cputime cputime;
+                       cputime_t utime;
+
+                       thread_group_cputime(tsk, &cputime);
+                       utime = cputime.utime;
                        if (cputime_le(cval, utime)) { /* about to fire */
                                cval = jiffies_to_cputime(1);
                        } else {
@@ -73,25 +71,19 @@ int do_getitimer(int which, struct itimerval *value)
                        }
                }
                spin_unlock_irq(&tsk->sighand->siglock);
-               read_unlock(&tasklist_lock);
                cputime_to_timeval(cval, &value->it_value);
                cputime_to_timeval(cinterval, &value->it_interval);
                break;
        case ITIMER_PROF:
-               read_lock(&tasklist_lock);
                spin_lock_irq(&tsk->sighand->siglock);
                cval = tsk->signal->it_prof_expires;
                cinterval = tsk->signal->it_prof_incr;
                if (!cputime_eq(cval, cputime_zero)) {
-                       struct task_struct *t = tsk;
-                       cputime_t ptime = cputime_add(tsk->signal->utime,
-                                                     tsk->signal->stime);
-                       do {
-                               ptime = cputime_add(ptime,
-                                                   cputime_add(t->utime,
-                                                               t->stime));
-                               t = next_thread(t);
-                       } while (t != tsk);
+                       struct task_cputime times;
+                       cputime_t ptime;
+
+                       thread_group_cputime(tsk, &times);
+                       ptime = cputime_add(times.utime, times.stime);
                        if (cputime_le(cval, ptime)) { /* about to fire */
                                cval = jiffies_to_cputime(1);
                        } else {
@@ -99,7 +91,6 @@ int do_getitimer(int which, struct itimerval *value)
                        }
                }
                spin_unlock_irq(&tsk->sighand->siglock);
-               read_unlock(&tasklist_lock);
                cputime_to_timeval(cval, &value->it_value);
                cputime_to_timeval(cinterval, &value->it_interval);
                break;
@@ -185,7 +176,6 @@ again:
        case ITIMER_VIRTUAL:
                nval = timeval_to_cputime(&value->it_value);
                ninterval = timeval_to_cputime(&value->it_interval);
-               read_lock(&tasklist_lock);
                spin_lock_irq(&tsk->sighand->siglock);
                cval = tsk->signal->it_virt_expires;
                cinterval = tsk->signal->it_virt_incr;
@@ -200,7 +190,6 @@ again:
                tsk->signal->it_virt_expires = nval;
                tsk->signal->it_virt_incr = ninterval;
                spin_unlock_irq(&tsk->sighand->siglock);
-               read_unlock(&tasklist_lock);
                if (ovalue) {
                        cputime_to_timeval(cval, &ovalue->it_value);
                        cputime_to_timeval(cinterval, &ovalue->it_interval);
@@ -209,7 +198,6 @@ again:
        case ITIMER_PROF:
                nval = timeval_to_cputime(&value->it_value);
                ninterval = timeval_to_cputime(&value->it_interval);
-               read_lock(&tasklist_lock);
                spin_lock_irq(&tsk->sighand->siglock);
                cval = tsk->signal->it_prof_expires;
                cinterval = tsk->signal->it_prof_incr;
@@ -224,7 +212,6 @@ again:
                tsk->signal->it_prof_expires = nval;
                tsk->signal->it_prof_incr = ninterval;
                spin_unlock_irq(&tsk->sighand->siglock);
-               read_unlock(&tasklist_lock);
                if (ovalue) {
                        cputime_to_timeval(cval, &ovalue->it_value);
                        cputime_to_timeval(cinterval, &ovalue->it_interval);
index c42a03aef36f07fd326eba959a90aa7bbe45dbd8..153dcb2639c3df614c7862041a9d485b7dbe6614 100644 (file)
@@ -7,6 +7,93 @@
 #include <linux/errno.h>
 #include <linux/math64.h>
 #include <asm/uaccess.h>
+#include <linux/kernel_stat.h>
+
+/*
+ * Allocate the thread_group_cputime structure appropriately and fill in the
+ * current values of the fields.  Called from copy_signal() via
+ * thread_group_cputime_clone_thread() when adding a second or subsequent
+ * thread to a thread group.  Assumes interrupts are enabled when called.
+ */
+int thread_group_cputime_alloc(struct task_struct *tsk)
+{
+       struct signal_struct *sig = tsk->signal;
+       struct task_cputime *cputime;
+
+       /*
+        * If we have multiple threads and we don't already have a
+        * per-CPU task_cputime struct (checked in the caller), allocate
+        * one and fill it in with the times accumulated so far.  We may
+        * race with another thread so recheck after we pick up the sighand
+        * lock.
+        */
+       cputime = alloc_percpu(struct task_cputime);
+       if (cputime == NULL)
+               return -ENOMEM;
+       spin_lock_irq(&tsk->sighand->siglock);
+       if (sig->cputime.totals) {
+               spin_unlock_irq(&tsk->sighand->siglock);
+               free_percpu(cputime);
+               return 0;
+       }
+       sig->cputime.totals = cputime;
+       cputime = per_cpu_ptr(sig->cputime.totals, smp_processor_id());
+       cputime->utime = tsk->utime;
+       cputime->stime = tsk->stime;
+       cputime->sum_exec_runtime = tsk->se.sum_exec_runtime;
+       spin_unlock_irq(&tsk->sighand->siglock);
+       return 0;
+}
+
+/**
+ * thread_group_cputime - Sum the thread group time fields across all CPUs.
+ *
+ * @tsk:       The task we use to identify the thread group.
+ * @times:     task_cputime structure in which we return the summed fields.
+ *
+ * Walk the list of CPUs to sum the per-CPU time fields in the thread group
+ * time structure.
+ */
+void thread_group_cputime(
+       struct task_struct *tsk,
+       struct task_cputime *times)
+{
+       struct signal_struct *sig;
+       int i;
+       struct task_cputime *tot;
+
+       sig = tsk->signal;
+       if (unlikely(!sig) || !sig->cputime.totals) {
+               times->utime = tsk->utime;
+               times->stime = tsk->stime;
+               times->sum_exec_runtime = tsk->se.sum_exec_runtime;
+               return;
+       }
+       times->stime = times->utime = cputime_zero;
+       times->sum_exec_runtime = 0;
+       for_each_possible_cpu(i) {
+               tot = per_cpu_ptr(tsk->signal->cputime.totals, i);
+               times->utime = cputime_add(times->utime, tot->utime);
+               times->stime = cputime_add(times->stime, tot->stime);
+               times->sum_exec_runtime += tot->sum_exec_runtime;
+       }
+}
+
+/*
+ * Called after updating RLIMIT_CPU to set timer expiration if necessary.
+ */
+void update_rlimit_cpu(unsigned long rlim_new)
+{
+       cputime_t cputime;
+
+       cputime = secs_to_cputime(rlim_new);
+       if (cputime_eq(current->signal->it_prof_expires, cputime_zero) ||
+           cputime_lt(current->signal->it_prof_expires, cputime)) {
+               spin_lock_irq(&current->sighand->siglock);
+               set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
+               spin_unlock_irq(&current->sighand->siglock);
+       }
+}
 
 static int check_clock(const clockid_t which_clock)
 {
@@ -158,10 +245,6 @@ static inline cputime_t virt_ticks(struct task_struct *p)
 {
        return p->utime;
 }
-static inline unsigned long long sched_ns(struct task_struct *p)
-{
-       return task_sched_runtime(p);
-}
 
 int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
 {
@@ -211,7 +294,7 @@ static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
                cpu->cpu = virt_ticks(p);
                break;
        case CPUCLOCK_SCHED:
-               cpu->sched = sched_ns(p);
+               cpu->sched = p->se.sum_exec_runtime + task_delta_exec(p);
                break;
        }
        return 0;
@@ -220,59 +303,30 @@ static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
 /*
  * Sample a process (thread group) clock for the given group_leader task.
  * Must be called with tasklist_lock held for reading.
- * Must be called with tasklist_lock held for reading, and p->sighand->siglock.
  */
-static int cpu_clock_sample_group_locked(unsigned int clock_idx,
-                                        struct task_struct *p,
-                                        union cpu_time_count *cpu)
+static int cpu_clock_sample_group(const clockid_t which_clock,
+                                 struct task_struct *p,
+                                 union cpu_time_count *cpu)
 {
-       struct task_struct *t = p;
-       switch (clock_idx) {
+       struct task_cputime cputime;
+
+       thread_group_cputime(p, &cputime);
+       switch (which_clock) {
        default:
                return -EINVAL;
        case CPUCLOCK_PROF:
-               cpu->cpu = cputime_add(p->signal->utime, p->signal->stime);
-               do {
-                       cpu->cpu = cputime_add(cpu->cpu, prof_ticks(t));
-                       t = next_thread(t);
-               } while (t != p);
+               cpu->cpu = cputime_add(cputime.utime, cputime.stime);
                break;
        case CPUCLOCK_VIRT:
-               cpu->cpu = p->signal->utime;
-               do {
-                       cpu->cpu = cputime_add(cpu->cpu, virt_ticks(t));
-                       t = next_thread(t);
-               } while (t != p);
+               cpu->cpu = cputime.utime;
                break;
        case CPUCLOCK_SCHED:
-               cpu->sched = p->signal->sum_sched_runtime;
-               /* Add in each other live thread.  */
-               while ((t = next_thread(t)) != p) {
-                       cpu->sched += t->se.sum_exec_runtime;
-               }
-               cpu->sched += sched_ns(p);
+               cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p);
                break;
        }
        return 0;
 }
 
-/*
- * Sample a process (thread group) clock for the given group_leader task.
- * Must be called with tasklist_lock held for reading.
- */
-static int cpu_clock_sample_group(const clockid_t which_clock,
-                                 struct task_struct *p,
-                                 union cpu_time_count *cpu)
-{
-       int ret;
-       unsigned long flags;
-       spin_lock_irqsave(&p->sighand->siglock, flags);
-       ret = cpu_clock_sample_group_locked(CPUCLOCK_WHICH(which_clock), p,
-                                           cpu);
-       spin_unlock_irqrestore(&p->sighand->siglock, flags);
-       return ret;
-}
-
 
 int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
 {
@@ -471,80 +525,11 @@ void posix_cpu_timers_exit(struct task_struct *tsk)
 }
 void posix_cpu_timers_exit_group(struct task_struct *tsk)
 {
-       cleanup_timers(tsk->signal->cpu_timers,
-                      cputime_add(tsk->utime, tsk->signal->utime),
-                      cputime_add(tsk->stime, tsk->signal->stime),
-                    tsk->se.sum_exec_runtime + tsk->signal->sum_sched_runtime);
-}
+       struct task_cputime cputime;
 
-
-/*
- * Set the expiry times of all the threads in the process so one of them
- * will go off before the process cumulative expiry total is reached.
- */
-static void process_timer_rebalance(struct task_struct *p,
-                                   unsigned int clock_idx,
-                                   union cpu_time_count expires,
-                                   union cpu_time_count val)
-{
-       cputime_t ticks, left;
-       unsigned long long ns, nsleft;
-       struct task_struct *t = p;
-       unsigned int nthreads = atomic_read(&p->signal->live);
-
-       if (!nthreads)
-               return;
-
-       switch (clock_idx) {
-       default:
-               BUG();
-               break;
-       case CPUCLOCK_PROF:
-               left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu),
-                                      nthreads);
-               do {
-                       if (likely(!(t->flags & PF_EXITING))) {
-                               ticks = cputime_add(prof_ticks(t), left);
-                               if (cputime_eq(t->it_prof_expires,
-                                              cputime_zero) ||
-                                   cputime_gt(t->it_prof_expires, ticks)) {
-                                       t->it_prof_expires = ticks;
-                               }
-                       }
-                       t = next_thread(t);
-               } while (t != p);
-               break;
-       case CPUCLOCK_VIRT:
-               left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu),
-                                      nthreads);
-               do {
-                       if (likely(!(t->flags & PF_EXITING))) {
-                               ticks = cputime_add(virt_ticks(t), left);
-                               if (cputime_eq(t->it_virt_expires,
-                                              cputime_zero) ||
-                                   cputime_gt(t->it_virt_expires, ticks)) {
-                                       t->it_virt_expires = ticks;
-                               }
-                       }
-                       t = next_thread(t);
-               } while (t != p);
-               break;
-       case CPUCLOCK_SCHED:
-               nsleft = expires.sched - val.sched;
-               do_div(nsleft, nthreads);
-               nsleft = max_t(unsigned long long, nsleft, 1);
-               do {
-                       if (likely(!(t->flags & PF_EXITING))) {
-                               ns = t->se.sum_exec_runtime + nsleft;
-                               if (t->it_sched_expires == 0 ||
-                                   t->it_sched_expires > ns) {
-                                       t->it_sched_expires = ns;
-                               }
-                       }
-                       t = next_thread(t);
-               } while (t != p);
-               break;
-       }
+       thread_group_cputime(tsk, &cputime);
+       cleanup_timers(tsk->signal->cpu_timers,
+                      cputime.utime, cputime.stime, cputime.sum_exec_runtime);
 }
 
 static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
@@ -608,29 +593,32 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
                        default:
                                BUG();
                        case CPUCLOCK_PROF:
-                               if (cputime_eq(p->it_prof_expires,
+                               if (cputime_eq(p->cputime_expires.prof_exp,
                                               cputime_zero) ||
-                                   cputime_gt(p->it_prof_expires,
+                                   cputime_gt(p->cputime_expires.prof_exp,
                                               nt->expires.cpu))
-                                       p->it_prof_expires = nt->expires.cpu;
+                                       p->cputime_expires.prof_exp =
+                                               nt->expires.cpu;
                                break;
                        case CPUCLOCK_VIRT:
-                               if (cputime_eq(p->it_virt_expires,
+                               if (cputime_eq(p->cputime_expires.virt_exp,
                                               cputime_zero) ||
-                                   cputime_gt(p->it_virt_expires,
+                                   cputime_gt(p->cputime_expires.virt_exp,
                                               nt->expires.cpu))
-                                       p->it_virt_expires = nt->expires.cpu;
+                                       p->cputime_expires.virt_exp =
+                                               nt->expires.cpu;
                                break;
                        case CPUCLOCK_SCHED:
-                               if (p->it_sched_expires == 0 ||
-                                   p->it_sched_expires > nt->expires.sched)
-                                       p->it_sched_expires = nt->expires.sched;
+                               if (p->cputime_expires.sched_exp == 0 ||
+                                   p->cputime_expires.sched_exp >
+                                                       nt->expires.sched)
+                                       p->cputime_expires.sched_exp =
+                                               nt->expires.sched;
                                break;
                        }
                } else {
                        /*
-                        * For a process timer, we must balance
-                        * all the live threads' expirations.
+                        * For a process timer, set the cached expiration time.
                         */
                        switch (CPUCLOCK_WHICH(timer->it_clock)) {
                        default:
@@ -641,7 +629,9 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
                                    cputime_lt(p->signal->it_virt_expires,
                                               timer->it.cpu.expires.cpu))
                                        break;
-                               goto rebalance;
+                               p->signal->cputime_expires.virt_exp =
+                                       timer->it.cpu.expires.cpu;
+                               break;
                        case CPUCLOCK_PROF:
                                if (!cputime_eq(p->signal->it_prof_expires,
                                                cputime_zero) &&
@@ -652,13 +642,12 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
                                if (i != RLIM_INFINITY &&
                                    i <= cputime_to_secs(timer->it.cpu.expires.cpu))
                                        break;
-                               goto rebalance;
+                               p->signal->cputime_expires.prof_exp =
+                                       timer->it.cpu.expires.cpu;
+                               break;
                        case CPUCLOCK_SCHED:
-                       rebalance:
-                               process_timer_rebalance(
-                                       timer->it.cpu.task,
-                                       CPUCLOCK_WHICH(timer->it_clock),
-                                       timer->it.cpu.expires, now);
+                               p->signal->cputime_expires.sched_exp =
+                                       timer->it.cpu.expires.sched;
                                break;
                        }
                }
@@ -969,13 +958,13 @@ static void check_thread_timers(struct task_struct *tsk,
        struct signal_struct *const sig = tsk->signal;
 
        maxfire = 20;
-       tsk->it_prof_expires = cputime_zero;
+       tsk->cputime_expires.prof_exp = cputime_zero;
        while (!list_empty(timers)) {
                struct cpu_timer_list *t = list_first_entry(timers,
                                                      struct cpu_timer_list,
                                                      entry);
                if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) {
-                       tsk->it_prof_expires = t->expires.cpu;
+                       tsk->cputime_expires.prof_exp = t->expires.cpu;
                        break;
                }
                t->firing = 1;
@@ -984,13 +973,13 @@ static void check_thread_timers(struct task_struct *tsk,
 
        ++timers;
        maxfire = 20;
-       tsk->it_virt_expires = cputime_zero;
+       tsk->cputime_expires.virt_exp = cputime_zero;
        while (!list_empty(timers)) {
                struct cpu_timer_list *t = list_first_entry(timers,
                                                      struct cpu_timer_list,
                                                      entry);
                if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) {
-                       tsk->it_virt_expires = t->expires.cpu;
+                       tsk->cputime_expires.virt_exp = t->expires.cpu;
                        break;
                }
                t->firing = 1;
@@ -999,13 +988,13 @@ static void check_thread_timers(struct task_struct *tsk,
 
        ++timers;
        maxfire = 20;
-       tsk->it_sched_expires = 0;
+       tsk->cputime_expires.sched_exp = 0;
        while (!list_empty(timers)) {
                struct cpu_timer_list *t = list_first_entry(timers,
                                                      struct cpu_timer_list,
                                                      entry);
                if (!--maxfire || tsk->se.sum_exec_runtime < t->expires.sched) {
-                       tsk->it_sched_expires = t->expires.sched;
+                       tsk->cputime_expires.sched_exp = t->expires.sched;
                        break;
                }
                t->firing = 1;
@@ -1055,10 +1044,10 @@ static void check_process_timers(struct task_struct *tsk,
 {
        int maxfire;
        struct signal_struct *const sig = tsk->signal;
-       cputime_t utime, stime, ptime, virt_expires, prof_expires;
+       cputime_t utime, ptime, virt_expires, prof_expires;
        unsigned long long sum_sched_runtime, sched_expires;
-       struct task_struct *t;
        struct list_head *timers = sig->cpu_timers;
+       struct task_cputime cputime;
 
        /*
         * Don't sample the current process CPU clocks if there are no timers.
@@ -1074,18 +1063,10 @@ static void check_process_timers(struct task_struct *tsk,
        /*
         * Collect the current process totals.
         */
-       utime = sig->utime;
-       stime = sig->stime;
-       sum_sched_runtime = sig->sum_sched_runtime;
-       t = tsk;
-       do {
-               utime = cputime_add(utime, t->utime);
-               stime = cputime_add(stime, t->stime);
-               sum_sched_runtime += t->se.sum_exec_runtime;
-               t = next_thread(t);
-       } while (t != tsk);
-       ptime = cputime_add(utime, stime);
-
+       thread_group_cputime(tsk, &cputime);
+       utime = cputime.utime;
+       ptime = cputime_add(utime, cputime.stime);
+       sum_sched_runtime = cputime.sum_exec_runtime;
        maxfire = 20;
        prof_expires = cputime_zero;
        while (!list_empty(timers)) {
@@ -1193,60 +1174,18 @@ static void check_process_timers(struct task_struct *tsk,
                }
        }
 
-       if (!cputime_eq(prof_expires, cputime_zero) ||
-           !cputime_eq(virt_expires, cputime_zero) ||
-           sched_expires != 0) {
-               /*
-                * Rebalance the threads' expiry times for the remaining
-                * process CPU timers.
-                */
-
-               cputime_t prof_left, virt_left, ticks;
-               unsigned long long sched_left, sched;
-               const unsigned int nthreads = atomic_read(&sig->live);
-
-               if (!nthreads)
-                       return;
-
-               prof_left = cputime_sub(prof_expires, utime);
-               prof_left = cputime_sub(prof_left, stime);
-               prof_left = cputime_div_non_zero(prof_left, nthreads);
-               virt_left = cputime_sub(virt_expires, utime);
-               virt_left = cputime_div_non_zero(virt_left, nthreads);
-               if (sched_expires) {
-                       sched_left = sched_expires - sum_sched_runtime;
-                       do_div(sched_left, nthreads);
-                       sched_left = max_t(unsigned long long, sched_left, 1);
-               } else {
-                       sched_left = 0;
-               }
-               t = tsk;
-               do {
-                       if (unlikely(t->flags & PF_EXITING))
-                               continue;
-
-                       ticks = cputime_add(cputime_add(t->utime, t->stime),
-                                           prof_left);
-                       if (!cputime_eq(prof_expires, cputime_zero) &&
-                           (cputime_eq(t->it_prof_expires, cputime_zero) ||
-                            cputime_gt(t->it_prof_expires, ticks))) {
-                               t->it_prof_expires = ticks;
-                       }
-
-                       ticks = cputime_add(t->utime, virt_left);
-                       if (!cputime_eq(virt_expires, cputime_zero) &&
-                           (cputime_eq(t->it_virt_expires, cputime_zero) ||
-                            cputime_gt(t->it_virt_expires, ticks))) {
-                               t->it_virt_expires = ticks;
-                       }
-
-                       sched = t->se.sum_exec_runtime + sched_left;
-                       if (sched_expires && (t->it_sched_expires == 0 ||
-                                             t->it_sched_expires > sched)) {
-                               t->it_sched_expires = sched;
-                       }
-               } while ((t = next_thread(t)) != tsk);
-       }
+       if (!cputime_eq(prof_expires, cputime_zero) &&
+           (cputime_eq(sig->cputime_expires.prof_exp, cputime_zero) ||
+            cputime_gt(sig->cputime_expires.prof_exp, prof_expires)))
+               sig->cputime_expires.prof_exp = prof_expires;
+       if (!cputime_eq(virt_expires, cputime_zero) &&
+           (cputime_eq(sig->cputime_expires.virt_exp, cputime_zero) ||
+            cputime_gt(sig->cputime_expires.virt_exp, virt_expires)))
+               sig->cputime_expires.virt_exp = virt_expires;
+       if (sched_expires != 0 &&
+           (sig->cputime_expires.sched_exp == 0 ||
+            sig->cputime_expires.sched_exp > sched_expires))
+               sig->cputime_expires.sched_exp = sched_expires;
 }
 
 /*
@@ -1314,6 +1253,86 @@ out:
        ++timer->it_requeue_pending;
 }
 
+/**
+ * task_cputime_zero - Check a task_cputime struct for all zero fields.
+ *
+ * @cputime:   The struct to compare.
+ *
+ * Checks @cputime to see if all fields are zero.  Returns true if all fields
+ * are zero, false if any field is nonzero.
+ */
+static inline int task_cputime_zero(const struct task_cputime *cputime)
+{
+       if (cputime_eq(cputime->utime, cputime_zero) &&
+           cputime_eq(cputime->stime, cputime_zero) &&
+           cputime->sum_exec_runtime == 0)
+               return 1;
+       return 0;
+}
+
+/**
+ * task_cputime_expired - Compare two task_cputime entities.
+ *
+ * @sample:    The task_cputime structure to be checked for expiration.
+ * @expires:   Expiration times, against which @sample will be checked.
+ *
+ * Checks @sample against @expires to see if any field of @sample has expired.
+ * Returns true if any field of the former is greater than the corresponding
+ * field of the latter if the latter field is set.  Otherwise returns false.
+ */
+static inline int task_cputime_expired(const struct task_cputime *sample,
+                                       const struct task_cputime *expires)
+{
+       if (!cputime_eq(expires->utime, cputime_zero) &&
+           cputime_ge(sample->utime, expires->utime))
+               return 1;
+       if (!cputime_eq(expires->stime, cputime_zero) &&
+           cputime_ge(cputime_add(sample->utime, sample->stime),
+                      expires->stime))
+               return 1;
+       if (expires->sum_exec_runtime != 0 &&
+           sample->sum_exec_runtime >= expires->sum_exec_runtime)
+               return 1;
+       return 0;
+}
+
+/**
+ * fastpath_timer_check - POSIX CPU timers fast path.
+ *
+ * @tsk:       The task (thread) being checked.
+ *
+ * Check the task and thread group timers.  If both are zero (there are no
+ * timers set) return false.  Otherwise snapshot the task and thread group
+ * timers and compare them with the corresponding expiration times.  Return
+ * true if a timer has expired, else return false.
+ */
+static inline int fastpath_timer_check(struct task_struct *tsk)
+{
+       struct signal_struct *sig = tsk->signal;
+
+       if (unlikely(!sig))
+               return 0;
+
+       if (!task_cputime_zero(&tsk->cputime_expires)) {
+               struct task_cputime task_sample = {
+                       .utime = tsk->utime,
+                       .stime = tsk->stime,
+                       .sum_exec_runtime = tsk->se.sum_exec_runtime
+               };
+
+               if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
+                       return 1;
+       }
+       if (!task_cputime_zero(&sig->cputime_expires)) {
+               struct task_cputime group_sample;
+
+               thread_group_cputime(tsk, &group_sample);
+               if (task_cputime_expired(&group_sample, &sig->cputime_expires))
+                       return 1;
+       }
+       return 0;
+}
+
 /*
  * This is called from the timer interrupt handler.  The irq handler has
  * already updated our counts.  We need to check if any timers fire now.
@@ -1326,42 +1345,31 @@ void run_posix_cpu_timers(struct task_struct *tsk)
 
        BUG_ON(!irqs_disabled());
 
-#define UNEXPIRED(clock) \
-               (cputime_eq(tsk->it_##clock##_expires, cputime_zero) || \
-                cputime_lt(clock##_ticks(tsk), tsk->it_##clock##_expires))
-
-       if (UNEXPIRED(prof) && UNEXPIRED(virt) &&
-           (tsk->it_sched_expires == 0 ||
-            tsk->se.sum_exec_runtime < tsk->it_sched_expires))
+       /*
+        * The fast path checks that there are no expired thread or thread
+        * group timers.  If that's so, just return.
+        */
+       if (!fastpath_timer_check(tsk))
                return;
 
-#undef UNEXPIRED
-
+       spin_lock(&tsk->sighand->siglock);
        /*
-        * Double-check with locks held.
+        * Here we take off tsk->signal->cpu_timers[N] and
+        * tsk->cpu_timers[N] all the timers that are firing, and
+        * put them on the firing list.
         */
-       read_lock(&tasklist_lock);
-       if (likely(tsk->signal != NULL)) {
-               spin_lock(&tsk->sighand->siglock);
+       check_thread_timers(tsk, &firing);
+       check_process_timers(tsk, &firing);
 
-               /*
-                * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N]
-                * all the timers that are firing, and put them on the firing list.
-                */
-               check_thread_timers(tsk, &firing);
-               check_process_timers(tsk, &firing);
-
-               /*
-                * We must release these locks before taking any timer's lock.
-                * There is a potential race with timer deletion here, as the
-                * siglock now protects our private firing list.  We have set
-                * the firing flag in each timer, so that a deletion attempt
-                * that gets the timer lock before we do will give it up and
-                * spin until we've taken care of that timer below.
-                */
-               spin_unlock(&tsk->sighand->siglock);
-       }
-       read_unlock(&tasklist_lock);
+       /*
+        * We must release these locks before taking any timer's lock.
+        * There is a potential race with timer deletion here, as the
+        * siglock now protects our private firing list.  We have set
+        * the firing flag in each timer, so that a deletion attempt
+        * that gets the timer lock before we do will give it up and
+        * spin until we've taken care of that timer below.
+        */
+       spin_unlock(&tsk->sighand->siglock);
 
        /*
         * Now that all the timers on our list have the firing flag,
@@ -1389,10 +1397,9 @@ void run_posix_cpu_timers(struct task_struct *tsk)
 
 /*
  * Set one of the process-wide special case CPU timers.
- * The tasklist_lock and tsk->sighand->siglock must be held by the caller.
- * The oldval argument is null for the RLIMIT_CPU timer, where *newval is
- * absolute; non-null for ITIMER_*, where *newval is relative and we update
- * it to be absolute, *oldval is absolute and we update it to be relative.
+ * The tsk->sighand->siglock must be held by the caller.
+ * The *newval argument is relative and we update it to be absolute, *oldval
+ * is absolute and we update it to be relative.
  */
 void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
                           cputime_t *newval, cputime_t *oldval)
@@ -1401,7 +1408,7 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
        struct list_head *head;
 
        BUG_ON(clock_idx == CPUCLOCK_SCHED);
-       cpu_clock_sample_group_locked(clock_idx, tsk, &now);
+       cpu_clock_sample_group(clock_idx, tsk, &now);
 
        if (oldval) {
                if (!cputime_eq(*oldval, cputime_zero)) {
@@ -1435,13 +1442,14 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
            cputime_ge(list_first_entry(head,
                                  struct cpu_timer_list, entry)->expires.cpu,
                       *newval)) {
-               /*
-                * Rejigger each thread's expiry time so that one will
-                * notice before we hit the process-cumulative expiry time.
-                */
-               union cpu_time_count expires = { .sched = 0 };
-               expires.cpu = *newval;
-               process_timer_rebalance(tsk, clock_idx, expires, now);
+               switch (clock_idx) {
+               case CPUCLOCK_PROF:
+                       tsk->signal->cputime_expires.prof_exp = *newval;
+                       break;
+               case CPUCLOCK_VIRT:
+                       tsk->signal->cputime_expires.virt_exp = *newval;
+                       break;
+               }
        }
 }
 
index 5131e5471169226ef8db42f20792c8ffdac6d12b..b931d7cedbfa9fd70a47d07535d3b791661ec39a 100644 (file)
@@ -222,6 +222,15 @@ static int posix_ktime_get_ts(clockid_t which_clock, struct timespec *tp)
        return 0;
 }
 
+/*
+ * Get monotonic time for posix timers
+ */
+static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec *tp)
+{
+       getrawmonotonic(tp);
+       return 0;
+}
+
 /*
  * Initialize everything, well, just everything in Posix clocks/timers ;)
  */
@@ -235,9 +244,15 @@ static __init int init_posix_timers(void)
                .clock_get = posix_ktime_get_ts,
                .clock_set = do_posix_clock_nosettime,
        };
+       struct k_clock clock_monotonic_raw = {
+               .clock_getres = hrtimer_get_res,
+               .clock_get = posix_get_monotonic_raw,
+               .clock_set = do_posix_clock_nosettime,
+       };
 
        register_posix_clock(CLOCK_REALTIME, &clock_realtime);
        register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
+       register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
 
        posix_timers_cache = kmem_cache_create("posix_timers_cache",
                                        sizeof (struct k_itimer), 0, SLAB_PANIC,
@@ -298,6 +313,7 @@ void do_schedule_next_timer(struct siginfo *info)
 
 int posix_timer_event(struct k_itimer *timr, int si_private)
 {
+       int shared, ret;
        /*
         * FIXME: if ->sigq is queued we can race with
         * dequeue_signal()->do_schedule_next_timer().
@@ -311,25 +327,10 @@ int posix_timer_event(struct k_itimer *timr, int si_private)
         */
        timr->sigq->info.si_sys_private = si_private;
 
-       timr->sigq->info.si_signo = timr->it_sigev_signo;
-       timr->sigq->info.si_code = SI_TIMER;
-       timr->sigq->info.si_tid = timr->it_id;
-       timr->sigq->info.si_value = timr->it_sigev_value;
-
-       if (timr->it_sigev_notify & SIGEV_THREAD_ID) {
-               struct task_struct *leader;
-               int ret = send_sigqueue(timr->sigq, timr->it_process, 0);
-
-               if (likely(ret >= 0))
-                       return ret;
-
-               timr->it_sigev_notify = SIGEV_SIGNAL;
-               leader = timr->it_process->group_leader;
-               put_task_struct(timr->it_process);
-               timr->it_process = leader;
-       }
-
-       return send_sigqueue(timr->sigq, timr->it_process, 1);
+       shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
+       ret = send_sigqueue(timr->sigq, timr->it_process, shared);
+       /* If we failed to send the signal the timer stops. */
+       return ret > 0;
 }
 EXPORT_SYMBOL_GPL(posix_timer_event);
 
@@ -468,11 +469,9 @@ sys_timer_create(const clockid_t which_clock,
                 struct sigevent __user *timer_event_spec,
                 timer_t __user * created_timer_id)
 {
-       int error = 0;
-       struct k_itimer *new_timer = NULL;
-       int new_timer_id;
-       struct task_struct *process = NULL;
-       unsigned long flags;
+       struct k_itimer *new_timer;
+       int error, new_timer_id;
+       struct task_struct *process;
        sigevent_t event;
        int it_id_set = IT_ID_NOT_SET;
 
@@ -490,12 +489,11 @@ sys_timer_create(const clockid_t which_clock,
                goto out;
        }
        spin_lock_irq(&idr_lock);
-       error = idr_get_new(&posix_timers_id, (void *) new_timer,
-                           &new_timer_id);
+       error = idr_get_new(&posix_timers_id, new_timer, &new_timer_id);
        spin_unlock_irq(&idr_lock);
-       if (error == -EAGAIN)
-               goto retry;
-       else if (error) {
+       if (error) {
+               if (error == -EAGAIN)
+                       goto retry;
                /*
                 * Weird looking, but we return EAGAIN if the IDR is
                 * full (proper POSIX return value for this)
@@ -526,67 +524,43 @@ sys_timer_create(const clockid_t which_clock,
                        error = -EFAULT;
                        goto out;
                }
-               new_timer->it_sigev_notify = event.sigev_notify;
-               new_timer->it_sigev_signo = event.sigev_signo;
-               new_timer->it_sigev_value = event.sigev_value;
-
-               read_lock(&tasklist_lock);
-               if ((process = good_sigevent(&event))) {
-                       /*
-                        * We may be setting up this process for another
-                        * thread.  It may be exiting.  To catch this
-                        * case the we check the PF_EXITING flag.  If
-                        * the flag is not set, the siglock will catch
-                        * him before it is too late (in exit_itimers).
-                        *
-                        * The exec case is a bit more invloved but easy
-                        * to code.  If the process is in our thread
-                        * group (and it must be or we would not allow
-                        * it here) and is doing an exec, it will cause
-                        * us to be killed.  In this case it will wait
-                        * for us to die which means we can finish this
-                        * linkage with our last gasp. I.e. no code :)
-                        */
-                       spin_lock_irqsave(&process->sighand->siglock, flags);
-                       if (!(process->flags & PF_EXITING)) {
-                               new_timer->it_process = process;
-                               list_add(&new_timer->list,
-                                        &process->signal->posix_timers);
-                               if (new_timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID))
-                                       get_task_struct(process);
-                               spin_unlock_irqrestore(&process->sighand->siglock, flags);
-                       } else {
-                               spin_unlock_irqrestore(&process->sighand->siglock, flags);
-                               process = NULL;
-                       }
-               }
-               read_unlock(&tasklist_lock);
+               rcu_read_lock();
+               process = good_sigevent(&event);
+               if (process)
+                       get_task_struct(process);
+               rcu_read_unlock();
                if (!process) {
                        error = -EINVAL;
                        goto out;
                }
        } else {
-               new_timer->it_sigev_notify = SIGEV_SIGNAL;
-               new_timer->it_sigev_signo = SIGALRM;
-               new_timer->it_sigev_value.sival_int = new_timer->it_id;
+               event.sigev_notify = SIGEV_SIGNAL;
+               event.sigev_signo = SIGALRM;
+               event.sigev_value.sival_int = new_timer->it_id;
                process = current->group_leader;
-               spin_lock_irqsave(&process->sighand->siglock, flags);
-               new_timer->it_process = process;
-               list_add(&new_timer->list, &process->signal->posix_timers);
-               spin_unlock_irqrestore(&process->sighand->siglock, flags);
+               get_task_struct(process);
        }
 
+       new_timer->it_sigev_notify     = event.sigev_notify;
+       new_timer->sigq->info.si_signo = event.sigev_signo;
+       new_timer->sigq->info.si_value = event.sigev_value;
+       new_timer->sigq->info.si_tid   = new_timer->it_id;
+       new_timer->sigq->info.si_code  = SI_TIMER;
+
+       spin_lock_irq(&current->sighand->siglock);
+       new_timer->it_process = process;
+       list_add(&new_timer->list, &current->signal->posix_timers);
+       spin_unlock_irq(&current->sighand->siglock);
+
+       return 0;
        /*
         * In the case of the timer belonging to another task, after
         * the task is unlocked, the timer is owned by the other task
         * and may cease to exist at any time.  Don't use or modify
         * new_timer after the unlock call.
         */
-
 out:
-       if (error)
-               release_posix_timer(new_timer, it_id_set);
-
+       release_posix_timer(new_timer, it_id_set);
        return error;
 }
 
@@ -597,7 +571,7 @@ out:
  * the find to the timer lock.  To avoid a dead lock, the timer id MUST
  * be release with out holding the timer lock.
  */
-static struct k_itimer * lock_timer(timer_t timer_id, unsigned long *flags)
+static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags)
 {
        struct k_itimer *timr;
        /*
@@ -605,23 +579,20 @@ static struct k_itimer * lock_timer(timer_t timer_id, unsigned long *flags)
         * flags part over to the timer lock.  Must not let interrupts in
         * while we are moving the lock.
         */
-
        spin_lock_irqsave(&idr_lock, *flags);
-       timr = (struct k_itimer *) idr_find(&posix_timers_id, (int) timer_id);
+       timr = idr_find(&posix_timers_id, (int)timer_id);
        if (timr) {
                spin_lock(&timr->it_lock);
-
-               if ((timr->it_id != timer_id) || !(timr->it_process) ||
-                               !same_thread_group(timr->it_process, current)) {
-                       spin_unlock(&timr->it_lock);
-                       spin_unlock_irqrestore(&idr_lock, *flags);
-                       timr = NULL;
-               } else
+               if (timr->it_process &&
+                   same_thread_group(timr->it_process, current)) {
                        spin_unlock(&idr_lock);
-       } else
-               spin_unlock_irqrestore(&idr_lock, *flags);
+                       return timr;
+               }
+               spin_unlock(&timr->it_lock);
+       }
+       spin_unlock_irqrestore(&idr_lock, *flags);
 
-       return timr;
+       return NULL;
 }
 
 /*
@@ -862,8 +833,7 @@ retry_delete:
         * This keeps any tasks waiting on the spin lock from thinking
         * they got something (see the lock code above).
         */
-       if (timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID))
-               put_task_struct(timer->it_process);
+       put_task_struct(timer->it_process);
        timer->it_process = NULL;
 
        unlock_timer(timer, flags);
@@ -890,8 +860,7 @@ retry_delete:
         * This keeps any tasks waiting on the spin lock from thinking
         * they got something (see the lock code above).
         */
-       if (timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID))
-               put_task_struct(timer->it_process);
+       put_task_struct(timer->it_process);
        timer->it_process = NULL;
 
        unlock_timer(timer, flags);
index 6f230596bd0c1d21a2c68ffbff8207e93dcd65b5..09a8c15748f1e5fcd2cae7d98b47ecfd7e73ba40 100644 (file)
@@ -4052,23 +4052,26 @@ DEFINE_PER_CPU(struct kernel_stat, kstat);
 EXPORT_PER_CPU_SYMBOL(kstat);
 
 /*
- * Return p->sum_exec_runtime plus any more ns on the sched_clock
- * that have not yet been banked in case the task is currently running.
+ * Return any ns on the sched_clock that have not yet been banked in
+ * @p in case that task is currently running.
  */
-unsigned long long task_sched_runtime(struct task_struct *p)
+unsigned long long task_delta_exec(struct task_struct *p)
 {
        unsigned long flags;
-       u64 ns, delta_exec;
        struct rq *rq;
+       u64 ns = 0;
 
        rq = task_rq_lock(p, &flags);
-       ns = p->se.sum_exec_runtime;
+
        if (task_current(rq, p)) {
+               u64 delta_exec;
+
                update_rq_clock(rq);
                delta_exec = rq->clock - p->se.exec_start;
                if ((s64)delta_exec > 0)
-                       ns += delta_exec;
+                       ns = delta_exec;
        }
+
        task_rq_unlock(rq, &flags);
 
        return ns;
@@ -4085,6 +4088,7 @@ void account_user_time(struct task_struct *p, cputime_t cputime)
        cputime64_t tmp;
 
        p->utime = cputime_add(p->utime, cputime);
+       account_group_user_time(p, cputime);
 
        /* Add user time to cpustat. */
        tmp = cputime_to_cputime64(cputime);
@@ -4109,6 +4113,7 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime)
        tmp = cputime_to_cputime64(cputime);
 
        p->utime = cputime_add(p->utime, cputime);
+       account_group_user_time(p, cputime);
        p->gtime = cputime_add(p->gtime, cputime);
 
        cpustat->user = cputime64_add(cpustat->user, tmp);
@@ -4144,6 +4149,7 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
        }
 
        p->stime = cputime_add(p->stime, cputime);
+       account_group_system_time(p, cputime);
 
        /* Add system time to cpustat. */
        tmp = cputime_to_cputime64(cputime);
@@ -4185,6 +4191,7 @@ void account_steal_time(struct task_struct *p, cputime_t steal)
 
        if (p == rq->idle) {
                p->stime = cputime_add(p->stime, steal);
+               account_group_system_time(p, steal);
                if (atomic_read(&rq->nr_iowait) > 0)
                        cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
                else
index 18fd17172eb66bb567ca4bcc47ca6c0cea923462..f604dae71316264445e63b4d09f26a483d61113e 100644 (file)
@@ -449,6 +449,7 @@ static void update_curr(struct cfs_rq *cfs_rq)
                struct task_struct *curtask = task_of(curr);
 
                cpuacct_charge(curtask, delta_exec);
+               account_group_exec_runtime(curtask, delta_exec);
        }
 }
 
index cdf5740ab03e8133c0a2b7713d6c77d2be1f07bf..b446dc87494fd681fd0a7d7e265402ac09936773 100644 (file)
@@ -526,6 +526,8 @@ static void update_curr_rt(struct rq *rq)
        schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
 
        curr->se.sum_exec_runtime += delta_exec;
+       account_group_exec_runtime(curr, delta_exec);
+
        curr->se.exec_start = rq->clock;
        cpuacct_charge(curr, delta_exec);
 
@@ -1458,7 +1460,7 @@ static void watchdog(struct rq *rq, struct task_struct *p)
                p->rt.timeout++;
                next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
                if (p->rt.timeout > next)
-                       p->it_sched_expires = p->se.sum_exec_runtime;
+                       p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
        }
 }
 
index 8385d43987e29b3b6a4775947d9a52fd22e8554c..b8c156979cf2ad600b0f9b59f9a935cec86aab85 100644 (file)
@@ -270,3 +270,89 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next)
 #define sched_info_switch(t, next)             do { } while (0)
 #endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
 
+/*
+ * The following are functions that support scheduler-internal time accounting.
+ * These functions are generally called at the timer tick.  None of this depends
+ * on CONFIG_SCHEDSTATS.
+ */
+
+/**
+ * account_group_user_time - Maintain utime for a thread group.
+ *
+ * @tsk:       Pointer to task structure.
+ * @cputime:   Time value by which to increment the utime field of the
+ *             thread_group_cputime structure.
+ *
+ * If thread group time is being maintained, get the structure for the
+ * running CPU and update the utime field there.
+ */
+static inline void account_group_user_time(struct task_struct *tsk,
+                                          cputime_t cputime)
+{
+       struct signal_struct *sig;
+
+       sig = tsk->signal;
+       if (unlikely(!sig))
+               return;
+       if (sig->cputime.totals) {
+               struct task_cputime *times;
+
+               times = per_cpu_ptr(sig->cputime.totals, get_cpu());
+               times->utime = cputime_add(times->utime, cputime);
+               put_cpu_no_resched();
+       }
+}
+
+/**
+ * account_group_system_time - Maintain stime for a thread group.
+ *
+ * @tsk:       Pointer to task structure.
+ * @cputime:   Time value by which to increment the stime field of the
+ *             thread_group_cputime structure.
+ *
+ * If thread group time is being maintained, get the structure for the
+ * running CPU and update the stime field there.
+ */
+static inline void account_group_system_time(struct task_struct *tsk,
+                                            cputime_t cputime)
+{
+       struct signal_struct *sig;
+
+       sig = tsk->signal;
+       if (unlikely(!sig))
+               return;
+       if (sig->cputime.totals) {
+               struct task_cputime *times;
+
+               times = per_cpu_ptr(sig->cputime.totals, get_cpu());
+               times->stime = cputime_add(times->stime, cputime);
+               put_cpu_no_resched();
+       }
+}
+
+/**
+ * account_group_exec_runtime - Maintain exec runtime for a thread group.
+ *
+ * @tsk:       Pointer to task structure.
+ * @ns:                Time value by which to increment the sum_exec_runtime field
+ *             of the thread_group_cputime structure.
+ *
+ * If thread group time is being maintained, get the structure for the
+ * running CPU and update the sum_exec_runtime field there.
+ */
+static inline void account_group_exec_runtime(struct task_struct *tsk,
+                                             unsigned long long ns)
+{
+       struct signal_struct *sig;
+
+       sig = tsk->signal;
+       if (unlikely(!sig))
+               return;
+       if (sig->cputime.totals) {
+               struct task_cputime *times;
+
+               times = per_cpu_ptr(sig->cputime.totals, get_cpu());
+               times->sum_exec_runtime += ns;
+               put_cpu_no_resched();
+       }
+}
index e661b01d340f06a17afb6cfef13a712ae0338ff9..6eea5826d61820e12475f0eb96c9626b4f6e1ca3 100644 (file)
@@ -1338,6 +1338,7 @@ int do_notify_parent(struct task_struct *tsk, int sig)
        struct siginfo info;
        unsigned long flags;
        struct sighand_struct *psig;
+       struct task_cputime cputime;
        int ret = sig;
 
        BUG_ON(sig == -1);
@@ -1368,10 +1369,9 @@ int do_notify_parent(struct task_struct *tsk, int sig)
 
        info.si_uid = tsk->uid;
 
-       info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
-                                                      tsk->signal->utime));
-       info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
-                                                      tsk->signal->stime));
+       thread_group_cputime(tsk, &cputime);
+       info.si_utime = cputime_to_jiffies(cputime.utime);
+       info.si_stime = cputime_to_jiffies(cputime.stime);
 
        info.si_status = tsk->exit_code & 0x7f;
        if (tsk->exit_code & 0x80)
index 83ba21a13bd470cea2815d6792e8ff24af43e727..7110daeb9a90b2b585d97cf0fda94b7eb0b1869f 100644 (file)
@@ -267,16 +267,12 @@ asmlinkage void do_softirq(void)
  */
 void irq_enter(void)
 {
-#ifdef CONFIG_NO_HZ
        int cpu = smp_processor_id();
+
        if (idle_cpu(cpu) && !in_interrupt())
-               tick_nohz_stop_idle(cpu);
-#endif
+               tick_check_idle(cpu);
+
        __irq_enter();
-#ifdef CONFIG_NO_HZ
-       if (idle_cpu(cpu))
-               tick_nohz_update_jiffies();
-#endif
 }
 
 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
index 0bc8fa3c2288110b49fad4e9eaab2326f52c69f7..53879cdae483b6371543bdeb94818292e91f07bb 100644 (file)
@@ -853,38 +853,28 @@ asmlinkage long sys_setfsgid(gid_t gid)
        return old_fsgid;
 }
 
+void do_sys_times(struct tms *tms)
+{
+       struct task_cputime cputime;
+       cputime_t cutime, cstime;
+
+       spin_lock_irq(&current->sighand->siglock);
+       thread_group_cputime(current, &cputime);
+       cutime = current->signal->cutime;
+       cstime = current->signal->cstime;
+       spin_unlock_irq(&current->sighand->siglock);
+       tms->tms_utime = cputime_to_clock_t(cputime.utime);
+       tms->tms_stime = cputime_to_clock_t(cputime.stime);
+       tms->tms_cutime = cputime_to_clock_t(cutime);
+       tms->tms_cstime = cputime_to_clock_t(cstime);
+}
+
 asmlinkage long sys_times(struct tms __user * tbuf)
 {
-       /*
-        *      In the SMP world we might just be unlucky and have one of
-        *      the times increment as we use it. Since the value is an
-        *      atomically safe type this is just fine. Conceptually its
-        *      as if the syscall took an instant longer to occur.
-        */
        if (tbuf) {
                struct tms tmp;
-               struct task_struct *tsk = current;
-               struct task_struct *t;
-               cputime_t utime, stime, cutime, cstime;
-
-               spin_lock_irq(&tsk->sighand->siglock);
-               utime = tsk->signal->utime;
-               stime = tsk->signal->stime;
-               t = tsk;
-               do {
-                       utime = cputime_add(utime, t->utime);
-                       stime = cputime_add(stime, t->stime);
-                       t = next_thread(t);
-               } while (t != tsk);
-
-               cutime = tsk->signal->cutime;
-               cstime = tsk->signal->cstime;
-               spin_unlock_irq(&tsk->sighand->siglock);
-
-               tmp.tms_utime = cputime_to_clock_t(utime);
-               tmp.tms_stime = cputime_to_clock_t(stime);
-               tmp.tms_cutime = cputime_to_clock_t(cutime);
-               tmp.tms_cstime = cputime_to_clock_t(cstime);
+
+               do_sys_times(&tmp);
                if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
                        return -EFAULT;
        }
@@ -1449,7 +1439,6 @@ asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *r
 asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
 {
        struct rlimit new_rlim, *old_rlim;
-       unsigned long it_prof_secs;
        int retval;
 
        if (resource >= RLIM_NLIMITS)
@@ -1503,18 +1492,7 @@ asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
        if (new_rlim.rlim_cur == RLIM_INFINITY)
                goto out;
 
-       it_prof_secs = cputime_to_secs(current->signal->it_prof_expires);
-       if (it_prof_secs == 0 || new_rlim.rlim_cur <= it_prof_secs) {
-               unsigned long rlim_cur = new_rlim.rlim_cur;
-               cputime_t cputime;
-
-               cputime = secs_to_cputime(rlim_cur);
-               read_lock(&tasklist_lock);
-               spin_lock_irq(&current->sighand->siglock);
-               set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
-               spin_unlock_irq(&current->sighand->siglock);
-               read_unlock(&tasklist_lock);
-       }
+       update_rlimit_cpu(new_rlim.rlim_cur);
 out:
        return 0;
 }
@@ -1552,11 +1530,8 @@ out:
  *
  */
 
-static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r,
-                                    cputime_t *utimep, cputime_t *stimep)
+static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
 {
-       *utimep = cputime_add(*utimep, t->utime);
-       *stimep = cputime_add(*stimep, t->stime);
        r->ru_nvcsw += t->nvcsw;
        r->ru_nivcsw += t->nivcsw;
        r->ru_minflt += t->min_flt;
@@ -1570,12 +1545,13 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
        struct task_struct *t;
        unsigned long flags;
        cputime_t utime, stime;
+       struct task_cputime cputime;
 
        memset((char *) r, 0, sizeof *r);
        utime = stime = cputime_zero;
 
        if (who == RUSAGE_THREAD) {
-               accumulate_thread_rusage(p, r, &utime, &stime);
+               accumulate_thread_rusage(p, r);
                goto out;
        }
 
@@ -1598,8 +1574,9 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
                                break;
 
                case RUSAGE_SELF:
-                       utime = cputime_add(utime, p->signal->utime);
-                       stime = cputime_add(stime, p->signal->stime);
+                       thread_group_cputime(p, &cputime);
+                       utime = cputime_add(utime, cputime.utime);
+                       stime = cputime_add(stime, cputime.stime);
                        r->ru_nvcsw += p->signal->nvcsw;
                        r->ru_nivcsw += p->signal->nivcsw;
                        r->ru_minflt += p->signal->min_flt;
@@ -1608,7 +1585,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
                        r->ru_oublock += p->signal->oublock;
                        t = p;
                        do {
-                               accumulate_thread_rusage(t, r, &utime, &stime);
+                               accumulate_thread_rusage(t, r);
                                t = next_thread(t);
                        } while (t != p);
                        break;
index 093d4acf993b73fde0d575a4e29b06db88935942..9ed2eec97526546e26c4fb640702a8907b3530a9 100644 (file)
@@ -325,6 +325,9 @@ int clocksource_register(struct clocksource *c)
        unsigned long flags;
        int ret;
 
+       /* save mult_orig on registration */
+       c->mult_orig = c->mult;
+
        spin_lock_irqsave(&clocksource_lock, flags);
        ret = clocksource_enqueue(c);
        if (!ret)
index 4c256fdb8875b54f19e1c8062371c4b362cc5773..1ca99557e929261da5825325dafba49edde8be12 100644 (file)
@@ -61,6 +61,7 @@ struct clocksource clocksource_jiffies = {
        .read           = jiffies_read,
        .mask           = 0xffffffff, /*32bits*/
        .mult           = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */
+       .mult_orig      = NSEC_PER_JIFFY << JIFFIES_SHIFT,
        .shift          = JIFFIES_SHIFT,
 };
 
index 1ad46f3df6e76cd8994403b1c1ca72c14ec3553b..1a20715bfd6e4854e96e96eb1541f792767aeaa5 100644 (file)
 
 #include <linux/mm.h>
 #include <linux/time.h>
-#include <linux/timer.h>
 #include <linux/timex.h>
 #include <linux/jiffies.h>
 #include <linux/hrtimer.h>
 #include <linux/capability.h>
 #include <linux/math64.h>
 #include <linux/clocksource.h>
+#include <linux/workqueue.h>
 #include <asm/timex.h>
 
 /*
@@ -218,11 +218,11 @@ void second_overflow(void)
 /* Disable the cmos update - used by virtualization and embedded */
 int no_sync_cmos_clock  __read_mostly;
 
-static void sync_cmos_clock(unsigned long dummy);
+static void sync_cmos_clock(struct work_struct *work);
 
-static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0);
+static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock);
 
-static void sync_cmos_clock(unsigned long dummy)
+static void sync_cmos_clock(struct work_struct *work)
 {
        struct timespec now, next;
        int fail = 1;
@@ -258,13 +258,13 @@ static void sync_cmos_clock(unsigned long dummy)
                next.tv_sec++;
                next.tv_nsec -= NSEC_PER_SEC;
        }
-       mod_timer(&sync_cmos_timer, jiffies + timespec_to_jiffies(&next));
+       schedule_delayed_work(&sync_cmos_work, timespec_to_jiffies(&next));
 }
 
 static void notify_cmos_timer(void)
 {
        if (!no_sync_cmos_clock)
-               mod_timer(&sync_cmos_timer, jiffies + 1);
+               schedule_delayed_work(&sync_cmos_work, 0);
 }
 
 #else
@@ -277,38 +277,50 @@ static inline void notify_cmos_timer(void) { }
 int do_adjtimex(struct timex *txc)
 {
        struct timespec ts;
-       long save_adjust, sec;
        int result;
 
-       /* In order to modify anything, you gotta be super-user! */
-       if (txc->modes && !capable(CAP_SYS_TIME))
-               return -EPERM;
-
-       /* Now we validate the data before disabling interrupts */
-
-       if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) {
+       /* Validate the data before disabling interrupts */
+       if (txc->modes & ADJ_ADJTIME) {
                /* singleshot must not be used with any other mode bits */
-               if (txc->modes & ~ADJ_OFFSET_SS_READ)
+               if (!(txc->modes & ADJ_OFFSET_SINGLESHOT))
                        return -EINVAL;
+               if (!(txc->modes & ADJ_OFFSET_READONLY) &&
+                   !capable(CAP_SYS_TIME))
+                       return -EPERM;
+       } else {
+               /* In order to modify anything, you gotta be super-user! */
+                if (txc->modes && !capable(CAP_SYS_TIME))
+                       return -EPERM;
+
+               /* if the quartz is off by more than 10% something is VERY wrong! */
+               if (txc->modes & ADJ_TICK &&
+                   (txc->tick <  900000/USER_HZ ||
+                    txc->tick > 1100000/USER_HZ))
+                               return -EINVAL;
+
+               if (txc->modes & ADJ_STATUS && time_state != TIME_OK)
+                       hrtimer_cancel(&leap_timer);
        }
 
-       /* if the quartz is off by more than 10% something is VERY wrong ! */
-       if (txc->modes & ADJ_TICK)
-               if (txc->tick <  900000/USER_HZ ||
-                   txc->tick > 1100000/USER_HZ)
-                       return -EINVAL;
-
-       if (time_state != TIME_OK && txc->modes & ADJ_STATUS)
-               hrtimer_cancel(&leap_timer);
        getnstimeofday(&ts);
 
        write_seqlock_irq(&xtime_lock);
 
-       /* Save for later - semantics of adjtime is to return old value */
-       save_adjust = time_adjust;
-
        /* If there are input parameters, then process them */
+       if (txc->modes & ADJ_ADJTIME) {
+               long save_adjust = time_adjust;
+
+               if (!(txc->modes & ADJ_OFFSET_READONLY)) {
+                       /* adjtime() is independent from ntp_adjtime() */
+                       time_adjust = txc->offset;
+                       ntp_update_frequency();
+               }
+               txc->offset = save_adjust;
+               goto adj_done;
+       }
        if (txc->modes) {
+               long sec;
+
                if (txc->modes & ADJ_STATUS) {
                        if ((time_status & STA_PLL) &&
                            !(txc->status & STA_PLL)) {
@@ -375,13 +387,8 @@ int do_adjtimex(struct timex *txc)
                if (txc->modes & ADJ_TAI && txc->constant > 0)
                        time_tai = txc->constant;
 
-               if (txc->modes & ADJ_OFFSET) {
-                       if (txc->modes == ADJ_OFFSET_SINGLESHOT)
-                               /* adjtime() is independent from ntp_adjtime() */
-                               time_adjust = txc->offset;
-                       else
-                               ntp_update_offset(txc->offset);
-               }
+               if (txc->modes & ADJ_OFFSET)
+                       ntp_update_offset(txc->offset);
                if (txc->modes & ADJ_TICK)
                        tick_usec = txc->tick;
 
@@ -389,22 +396,18 @@ int do_adjtimex(struct timex *txc)
                        ntp_update_frequency();
        }
 
+       txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ,
+                                 NTP_SCALE_SHIFT);
+       if (!(time_status & STA_NANO))
+               txc->offset /= NSEC_PER_USEC;
+
+adj_done:
        result = time_state;    /* mostly `TIME_OK' */
        if (time_status & (STA_UNSYNC|STA_CLOCKERR))
                result = TIME_ERROR;
 
-       if ((txc->modes == ADJ_OFFSET_SINGLESHOT) ||
-           (txc->modes == ADJ_OFFSET_SS_READ))
-               txc->offset = save_adjust;
-       else {
-               txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ,
-                                         NTP_SCALE_SHIFT);
-               if (!(time_status & STA_NANO))
-                       txc->offset /= NSEC_PER_USEC;
-       }
-       txc->freq          = shift_right((s32)(time_freq >> PPM_SCALE_INV_SHIFT) *
-                                        (s64)PPM_SCALE_INV,
-                                        NTP_SCALE_SHIFT);
+       txc->freq          = shift_right((time_freq >> PPM_SCALE_INV_SHIFT) *
+                                        (s64)PPM_SCALE_INV, NTP_SCALE_SHIFT);
        txc->maxerror      = time_maxerror;
        txc->esterror      = time_esterror;
        txc->status        = time_status;
index cb01cd8f919b2752cfb7d97b4dc9ea4e87834a97..f98a1b7b16e942018ffe9e5998756405695da9a4 100644 (file)
@@ -383,6 +383,19 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
        return 0;
 }
 
+/*
+ * Called from irq_enter() when idle was interrupted to reenable the
+ * per cpu device.
+ */
+void tick_check_oneshot_broadcast(int cpu)
+{
+       if (cpu_isset(cpu, tick_broadcast_oneshot_mask)) {
+               struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
+
+               clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT);
+       }
+}
+
 /*
  * Handle oneshot mode broadcasting
  */
index 469248782c2355c0c2d31cf731dc8c692023268e..b1c05bf75ee0ce36e0664d3e2e71d00ac0e09edd 100644 (file)
@@ -36,6 +36,7 @@ extern void tick_broadcast_switch_to_oneshot(void);
 extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup);
 extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc);
 extern int tick_broadcast_oneshot_active(void);
+extern void tick_check_oneshot_broadcast(int cpu);
 # else /* BROADCAST */
 static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
 {
@@ -45,6 +46,7 @@ static inline void tick_broadcast_oneshot_control(unsigned long reason) { }
 static inline void tick_broadcast_switch_to_oneshot(void) { }
 static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
 static inline int tick_broadcast_oneshot_active(void) { return 0; }
+static inline void tick_check_oneshot_broadcast(int cpu) { }
 # endif /* !BROADCAST */
 
 #else /* !ONESHOT */
index b711ffcb106c906be19eab01140ec1e8cafd0063..0581c11fe6c6765121a84f531475e0e3c78809b3 100644 (file)
@@ -155,7 +155,7 @@ void tick_nohz_update_jiffies(void)
        touch_softlockup_watchdog();
 }
 
-void tick_nohz_stop_idle(int cpu)
+static void tick_nohz_stop_idle(int cpu)
 {
        struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
 
@@ -377,6 +377,32 @@ ktime_t tick_nohz_get_sleep_length(void)
        return ts->sleep_length;
 }
 
+static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
+{
+       hrtimer_cancel(&ts->sched_timer);
+       ts->sched_timer.expires = ts->idle_tick;
+
+       while (1) {
+               /* Forward the time to expire in the future */
+               hrtimer_forward(&ts->sched_timer, now, tick_period);
+
+               if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
+                       hrtimer_start(&ts->sched_timer,
+                                     ts->sched_timer.expires,
+                                     HRTIMER_MODE_ABS);
+                       /* Check, if the timer was already in the past */
+                       if (hrtimer_active(&ts->sched_timer))
+                               break;
+               } else {
+                       if (!tick_program_event(ts->sched_timer.expires, 0))
+                               break;
+               }
+               /* Update jiffies and reread time */
+               tick_do_update_jiffies64(now);
+               now = ktime_get();
+       }
+}
+
 /**
  * tick_nohz_restart_sched_tick - restart the idle tick from the idle task
  *
@@ -430,28 +456,7 @@ void tick_nohz_restart_sched_tick(void)
         */
        ts->tick_stopped  = 0;
        ts->idle_exittime = now;
-       hrtimer_cancel(&ts->sched_timer);
-       ts->sched_timer.expires = ts->idle_tick;
-
-       while (1) {
-               /* Forward the time to expire in the future */
-               hrtimer_forward(&ts->sched_timer, now, tick_period);
-
-               if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
-                       hrtimer_start(&ts->sched_timer,
-                                     ts->sched_timer.expires,
-                                     HRTIMER_MODE_ABS);
-                       /* Check, if the timer was already in the past */
-                       if (hrtimer_active(&ts->sched_timer))
-                               break;
-               } else {
-                       if (!tick_program_event(ts->sched_timer.expires, 0))
-                               break;
-               }
-               /* Update jiffies and reread time */
-               tick_do_update_jiffies64(now);
-               now = ktime_get();
-       }
+       tick_nohz_restart(ts, now);
        local_irq_enable();
 }
 
@@ -503,10 +508,6 @@ static void tick_nohz_handler(struct clock_event_device *dev)
        update_process_times(user_mode(regs));
        profile_tick(CPU_PROFILING);
 
-       /* Do not restart, when we are in the idle loop */
-       if (ts->tick_stopped)
-               return;
-
        while (tick_nohz_reprogram(ts, now)) {
                now = ktime_get();
                tick_do_update_jiffies64(now);
@@ -552,12 +553,46 @@ static void tick_nohz_switch_to_nohz(void)
               smp_processor_id());
 }
 
+/*
+ * When NOHZ is enabled and the tick is stopped, we need to kick the
+ * tick timer from irq_enter() so that the jiffies update is kept
+ * alive during long running softirqs. That's ugly as hell, but
+ * correctness is key even if we need to fix the offending softirq in
+ * the first place.
+ *
+ * Note, this is different to tick_nohz_restart. We just kick the
+ * timer and do not touch the other magic bits which need to be done
+ * when idle is left.
+ */
+static void tick_nohz_kick_tick(int cpu)
+{
+       struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+
+       if (!ts->tick_stopped)
+               return;
+
+       tick_nohz_restart(ts, ktime_get());
+}
+
 #else
 
 static inline void tick_nohz_switch_to_nohz(void) { }
 
 #endif /* NO_HZ */
 
+/*
+ * Called from irq_enter to notify about the possible interruption of idle()
+ */
+void tick_check_idle(int cpu)
+{
+       tick_check_oneshot_broadcast(cpu);
+#ifdef CONFIG_NO_HZ
+       tick_nohz_stop_idle(cpu);
+       tick_nohz_update_jiffies();
+       tick_nohz_kick_tick(cpu);
+#endif
+}
+
 /*
  * High resolution timer specific code
  */
@@ -611,10 +646,6 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
                profile_tick(CPU_PROFILING);
        }
 
-       /* Do not restart, when we are in the idle loop */
-       if (ts->tick_stopped)
-               return HRTIMER_NORESTART;
-
        hrtimer_forward(timer, now, tick_period);
 
        return HRTIMER_RESTART;
index e91c29f961c900d7739c0dc2f27b81c480cdb55c..e7acfb482a680ea248f5268fbb7f158868938e56 100644 (file)
@@ -58,27 +58,26 @@ struct clocksource *clock;
 
 #ifdef CONFIG_GENERIC_TIME
 /**
- * __get_nsec_offset - Returns nanoseconds since last call to periodic_hook
+ * clocksource_forward_now - update clock to the current time
  *
- * private function, must hold xtime_lock lock when being
- * called. Returns the number of nanoseconds since the
- * last call to update_wall_time() (adjusted by NTP scaling)
+ * Forward the current clock to update its state since the last call to
+ * update_wall_time(). This is useful before significant clock changes,
+ * as it avoids having to deal with this time offset explicitly.
  */
-static inline s64 __get_nsec_offset(void)
+static void clocksource_forward_now(void)
 {
        cycle_t cycle_now, cycle_delta;
-       s64 ns_offset;
+       s64 nsec;
 
-       /* read clocksource: */
        cycle_now = clocksource_read(clock);
-
-       /* calculate the delta since the last update_wall_time: */
        cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
+       clock->cycle_last = cycle_now;
 
-       /* convert to nanoseconds: */
-       ns_offset = cyc2ns(clock, cycle_delta);
+       nsec = cyc2ns(clock, cycle_delta);
+       timespec_add_ns(&xtime, nsec);
 
-       return ns_offset;
+       nsec = ((s64)cycle_delta * clock->mult_orig) >> clock->shift;
+       clock->raw_time.tv_nsec += nsec;
 }
 
 /**
@@ -89,6 +88,7 @@ static inline s64 __get_nsec_offset(void)
  */
 void getnstimeofday(struct timespec *ts)
 {
+       cycle_t cycle_now, cycle_delta;
        unsigned long seq;
        s64 nsecs;
 
@@ -96,7 +96,15 @@ void getnstimeofday(struct timespec *ts)
                seq = read_seqbegin(&xtime_lock);
 
                *ts = xtime;
-               nsecs = __get_nsec_offset();
+
+               /* read clocksource: */
+               cycle_now = clocksource_read(clock);
+
+               /* calculate the delta since the last update_wall_time: */
+               cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
+
+               /* convert to nanoseconds: */
+               nsecs = cyc2ns(clock, cycle_delta);
 
        } while (read_seqretry(&xtime_lock, seq));
 
@@ -129,22 +137,22 @@ EXPORT_SYMBOL(do_gettimeofday);
  */
 int do_settimeofday(struct timespec *tv)
 {
+       struct timespec ts_delta;
        unsigned long flags;
-       time_t wtm_sec, sec = tv->tv_sec;
-       long wtm_nsec, nsec = tv->tv_nsec;
 
        if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
                return -EINVAL;
 
        write_seqlock_irqsave(&xtime_lock, flags);
 
-       nsec -= __get_nsec_offset();
+       clocksource_forward_now();
+
+       ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec;
+       ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec;
+       wall_to_monotonic = timespec_sub(wall_to_monotonic, ts_delta);
 
-       wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
-       wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
+       xtime = *tv;
 
-       set_normalized_timespec(&xtime, sec, nsec);
-       set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
        update_xtime_cache(0);
 
        clock->error = 0;
@@ -170,22 +178,19 @@ EXPORT_SYMBOL(do_settimeofday);
 static void change_clocksource(void)
 {
        struct clocksource *new;
-       cycle_t now;
-       u64 nsec;
 
        new = clocksource_get_next();
 
        if (clock == new)
                return;
 
-       new->cycle_last = 0;
-       now = clocksource_read(new);
-       nsec =  __get_nsec_offset();
-       timespec_add_ns(&xtime, nsec);
+       clocksource_forward_now();
 
-       clock = new;
-       clock->cycle_last = now;
+       new->raw_time = clock->raw_time;
 
+       clock = new;
+       clock->cycle_last = 0;
+       clock->cycle_last = clocksource_read(new);
        clock->error = 0;
        clock->xtime_nsec = 0;
        clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
@@ -200,10 +205,43 @@ static void change_clocksource(void)
         */
 }
 #else
+static inline void clocksource_forward_now(void) { }
 static inline void change_clocksource(void) { }
-static inline s64 __get_nsec_offset(void) { return 0; }
 #endif
 
+/**
+ * getrawmonotonic - Returns the raw monotonic time in a timespec
+ * @ts:                pointer to the timespec to be set
+ *
+ * Returns the raw monotonic time (completely un-modified by ntp)
+ */
+void getrawmonotonic(struct timespec *ts)
+{
+       unsigned long seq;
+       s64 nsecs;
+       cycle_t cycle_now, cycle_delta;
+
+       do {
+               seq = read_seqbegin(&xtime_lock);
+
+               /* read clocksource: */
+               cycle_now = clocksource_read(clock);
+
+               /* calculate the delta since the last update_wall_time: */
+               cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
+
+               /* convert to nanoseconds: */
+               nsecs = ((s64)cycle_delta * clock->mult_orig) >> clock->shift;
+
+               *ts = clock->raw_time;
+
+       } while (read_seqretry(&xtime_lock, seq));
+
+       timespec_add_ns(ts, nsecs);
+}
+EXPORT_SYMBOL(getrawmonotonic);
+
+
 /**
  * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
  */
@@ -265,8 +303,6 @@ void __init timekeeping_init(void)
 static int timekeeping_suspended;
 /* time in seconds when suspend began */
 static unsigned long timekeeping_suspend_time;
-/* xtime offset when we went into suspend */
-static s64 timekeeping_suspend_nsecs;
 
 /**
  * timekeeping_resume - Resumes the generic timekeeping subsystem.
@@ -292,8 +328,6 @@ static int timekeeping_resume(struct sys_device *dev)
                wall_to_monotonic.tv_sec -= sleep_length;
                total_sleep_time += sleep_length;
        }
-       /* Make sure that we have the correct xtime reference */
-       timespec_add_ns(&xtime, timekeeping_suspend_nsecs);
        update_xtime_cache(0);
        /* re-base the last cycle value */
        clock->cycle_last = 0;
@@ -319,8 +353,7 @@ static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
        timekeeping_suspend_time = read_persistent_clock();
 
        write_seqlock_irqsave(&xtime_lock, flags);
-       /* Get the current xtime offset */
-       timekeeping_suspend_nsecs = __get_nsec_offset();
+       clocksource_forward_now();
        timekeeping_suspended = 1;
        write_sequnlock_irqrestore(&xtime_lock, flags);
 
@@ -454,23 +487,29 @@ void update_wall_time(void)
 #else
        offset = clock->cycle_interval;
 #endif
-       clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift;
+       clock->xtime_nsec = (s64)xtime.tv_nsec << clock->shift;
 
        /* normally this loop will run just once, however in the
         * case of lost or late ticks, it will accumulate correctly.
         */
        while (offset >= clock->cycle_interval) {
                /* accumulate one interval */
-               clock->xtime_nsec += clock->xtime_interval;
-               clock->cycle_last += clock->cycle_interval;
                offset -= clock->cycle_interval;
+               clock->cycle_last += clock->cycle_interval;
 
+               clock->xtime_nsec += clock->xtime_interval;
                if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) {
                        clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift;
                        xtime.tv_sec++;
                        second_overflow();
                }
 
+               clock->raw_time.tv_nsec += clock->raw_interval;
+               if (clock->raw_time.tv_nsec >= NSEC_PER_SEC) {
+                       clock->raw_time.tv_nsec -= NSEC_PER_SEC;
+                       clock->raw_time.tv_sec++;
+               }
+
                /* accumulate error between NTP and clock interval */
                clock->error += tick_length;
                clock->error -= clock->xtime_interval << (NTP_SCALE_SHIFT - clock->shift);
@@ -479,9 +518,12 @@ void update_wall_time(void)
        /* correct the clock when NTP error is too big */
        clocksource_adjust(offset);
 
-       /* store full nanoseconds into xtime */
-       xtime.tv_nsec = (s64)clock->xtime_nsec >> clock->shift;
+       /* store full nanoseconds into xtime after rounding it up and
+        * add the remainder to the error difference.
+        */
+       xtime.tv_nsec = ((s64)clock->xtime_nsec >> clock->shift) + 1;
        clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift;
+       clock->error += clock->xtime_nsec << (NTP_SCALE_SHIFT - clock->shift);
 
        update_xtime_cache(cyc2ns(clock, offset));
 
index a40e20fd00010d000a3dfc2406888b2f81146db6..f6426911e35a6da3a711e8b8e2cb602a5057a2dc 100644 (file)
@@ -47,13 +47,14 @@ static void print_name_offset(struct seq_file *m, void *sym)
 }
 
 static void
-print_timer(struct seq_file *m, struct hrtimer *timer, int idx, u64 now)
+print_timer(struct seq_file *m, struct hrtimer *taddr, struct hrtimer *timer,
+           int idx, u64 now)
 {
 #ifdef CONFIG_TIMER_STATS
        char tmp[TASK_COMM_LEN + 1];
 #endif
        SEQ_printf(m, " #%d: ", idx);
-       print_name_offset(m, timer);
+       print_name_offset(m, taddr);
        SEQ_printf(m, ", ");
        print_name_offset(m, timer->function);
        SEQ_printf(m, ", S:%02lx", timer->state);
@@ -99,7 +100,7 @@ next_one:
                tmp = *timer;
                spin_unlock_irqrestore(&base->cpu_base->lock, flags);
 
-               print_timer(m, &tmp, i, now);
+               print_timer(m, timer, &tmp, i, now);
                next++;
                goto next_one;
        }
@@ -109,6 +110,7 @@ next_one:
 static void
 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
 {
+       SEQ_printf(m, "  .base:       %p\n", base);
        SEQ_printf(m, "  .index:      %d\n",
                        base->index);
        SEQ_printf(m, "  .resolution: %Lu nsecs\n",
@@ -183,12 +185,16 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now)
 
 #ifdef CONFIG_GENERIC_CLOCKEVENTS
 static void
-print_tickdevice(struct seq_file *m, struct tick_device *td)
+print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu)
 {
        struct clock_event_device *dev = td->evtdev;
 
        SEQ_printf(m, "\n");
        SEQ_printf(m, "Tick Device: mode:     %d\n", td->mode);
+       if (cpu < 0)
+               SEQ_printf(m, "Broadcast device\n");
+       else
+               SEQ_printf(m, "Per CPU device: %d\n", cpu);
 
        SEQ_printf(m, "Clock Event Device: ");
        if (!dev) {
@@ -222,7 +228,7 @@ static void timer_list_show_tickdevices(struct seq_file *m)
        int cpu;
 
 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
-       print_tickdevice(m, tick_get_broadcast_device());
+       print_tickdevice(m, tick_get_broadcast_device(), -1);
        SEQ_printf(m, "tick_broadcast_mask: %08lx\n",
                   tick_get_broadcast_mask()->bits[0]);
 #ifdef CONFIG_TICK_ONESHOT
@@ -232,7 +238,7 @@ static void timer_list_show_tickdevices(struct seq_file *m)
        SEQ_printf(m, "\n");
 #endif
        for_each_online_cpu(cpu)
-                  print_tickdevice(m, tick_get_device(cpu));
+               print_tickdevice(m, tick_get_device(cpu), cpu);
        SEQ_printf(m, "\n");
 }
 #else
@@ -244,7 +250,7 @@ static int timer_list_show(struct seq_file *m, void *v)
        u64 now = ktime_to_ns(ktime_get());
        int cpu;
 
-       SEQ_printf(m, "Timer List Version: v0.3\n");
+       SEQ_printf(m, "Timer List Version: v0.4\n");
        SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES);
        SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now);
 
index 510fe69351ca2ec19700802b59ac3dbe9d142019..56becf373c589ba90c36a5b6e23df0527b227663 100644 (file)
@@ -1436,9 +1436,11 @@ static void __cpuinit migrate_timers(int cpu)
        BUG_ON(cpu_online(cpu));
        old_base = per_cpu(tvec_bases, cpu);
        new_base = get_cpu_var(tvec_bases);
-
-       local_irq_disable();
-       spin_lock(&new_base->lock);
+       /*
+        * The caller is globally serialized and nobody else
+        * takes two locks at once, deadlock is not possible.
+        */
+       spin_lock_irq(&new_base->lock);
        spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
 
        BUG_ON(old_base->running_timer);
@@ -1453,8 +1455,7 @@ static void __cpuinit migrate_timers(int cpu)
        }
 
        spin_unlock(&old_base->lock);
-       spin_unlock(&new_base->lock);
-       local_irq_enable();
+       spin_unlock_irq(&new_base->lock);
        put_cpu_var(tvec_bases);
 }
 #endif /* CONFIG_HOTPLUG_CPU */
index 576e511990794eacfad7ad525722c4fa077ae852..3e3fde7c1d2bf2a48af6d19d72e47f3adc95514b 100644 (file)
@@ -75,6 +75,7 @@
 #include <linux/string.h>
 #include <linux/selinux.h>
 #include <linux/mutex.h>
+#include <linux/posix-timers.h>
 
 #include "avc.h"
 #include "objsec.h"
@@ -2322,13 +2323,7 @@ static void selinux_bprm_post_apply_creds(struct linux_binprm *bprm)
                        initrlim = init_task.signal->rlim+i;
                        rlim->rlim_cur = min(rlim->rlim_max, initrlim->rlim_cur);
                }
-               if (current->signal->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
-                       /*
-                        * This will cause RLIMIT_CPU calculations
-                        * to be refigured.
-                        */
-                       current->it_prof_expires = jiffies_to_cputime(1);
-               }
+               update_rlimit_cpu(rlim->rlim_cur);
        }
 
        /* Wake up the parent if it is waiting so that it can