Merge commit 'v2.6.29-rc1' into timers/hrtimers
authorIngo Molnar <mingo@elte.hu>
Mon, 12 Jan 2009 10:32:03 +0000 (11:32 +0100)
committerIngo Molnar <mingo@elte.hu>
Mon, 12 Jan 2009 10:32:03 +0000 (11:32 +0100)
Conflicts:
kernel/time/tick-common.c

include/linux/clockchips.h
kernel/hrtimer.c
kernel/time/tick-common.c

index cea153697ec788a3c3d61c2402a3f1188b5b06c7..3a1dbba4d3ae2da500e710387d130cd8ad5dd4c2 100644 (file)
@@ -36,6 +36,7 @@ enum clock_event_nofitiers {
        CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
        CLOCK_EVT_NOTIFY_SUSPEND,
        CLOCK_EVT_NOTIFY_RESUME,
+       CLOCK_EVT_NOTIFY_CPU_DYING,
        CLOCK_EVT_NOTIFY_CPU_DEAD,
 };
 
index 1455b7651b6b27f9343809260976a1df3e4678e6..77aa33bb877ce79756aeab18c7c1b3717ff2333d 100644 (file)
@@ -1156,6 +1156,29 @@ static void __run_hrtimer(struct hrtimer *timer)
 
 #ifdef CONFIG_HIGH_RES_TIMERS
 
+static int force_clock_reprogram;
+
+/*
+ * After 5 iteration's attempts, we consider that hrtimer_interrupt()
+ * is hanging, which could happen with something that slows the interrupt
+ * such as the tracing. Then we force the clock reprogramming for each future
+ * hrtimer interrupts to avoid infinite loops and use the min_delta_ns
+ * threshold that we will overwrite.
+ * The next tick event will be scheduled to 3 times we currently spend on
+ * hrtimer_interrupt(). This gives a good compromise, the cpus will spend
+ * 1/4 of their time to process the hrtimer interrupts. This is enough to
+ * let it running without serious starvation.
+ */
+
+static inline void
+hrtimer_interrupt_hanging(struct clock_event_device *dev,
+                       ktime_t try_time)
+{
+       force_clock_reprogram = 1;
+       dev->min_delta_ns = (unsigned long)try_time.tv64 * 3;
+       printk(KERN_WARNING "hrtimer: interrupt too slow, "
+               "forcing clock min delta to %lu ns\n", dev->min_delta_ns);
+}
 /*
  * High resolution timer interrupt
  * Called with interrupts disabled
@@ -1165,6 +1188,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
        struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
        struct hrtimer_clock_base *base;
        ktime_t expires_next, now;
+       int nr_retries = 0;
        int i;
 
        BUG_ON(!cpu_base->hres_active);
@@ -1172,6 +1196,10 @@ void hrtimer_interrupt(struct clock_event_device *dev)
        dev->next_event.tv64 = KTIME_MAX;
 
  retry:
+       /* 5 retries is enough to notice a hang */
+       if (!(++nr_retries % 5))
+               hrtimer_interrupt_hanging(dev, ktime_sub(ktime_get(), now));
+
        now = ktime_get();
 
        expires_next.tv64 = KTIME_MAX;
@@ -1224,7 +1252,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
 
        /* Reprogramming necessary ? */
        if (expires_next.tv64 != KTIME_MAX) {
-               if (tick_program_event(expires_next, 0))
+               if (tick_program_event(expires_next, force_clock_reprogram))
                        goto retry;
        }
 }
@@ -1578,6 +1606,10 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
                break;
 
 #ifdef CONFIG_HOTPLUG_CPU
+       case CPU_DYING:
+       case CPU_DYING_FROZEN:
+               clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DYING, &scpu);
+               break;
        case CPU_DEAD:
        case CPU_DEAD_FROZEN:
        {
index 63e05d423a09903907558d7d2aba661f5b426db6..21a5ca849514b40b689c5b34e2c9caaf9cbf051b 100644 (file)
@@ -273,6 +273,21 @@ out_bc:
        return ret;
 }
 
+/*
+ * Transfer the do_timer job away from a dying cpu.
+ *
+ * Called with interrupts disabled.
+ */
+static void tick_handover_do_timer(int *cpup)
+{
+       if (*cpup == tick_do_timer_cpu) {
+               int cpu = cpumask_first(cpu_online_mask);
+
+               tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu :
+                       TICK_DO_TIMER_NONE;
+       }
+}
+
 /*
  * Shutdown an event device on a given cpu:
  *
@@ -297,13 +312,6 @@ static void tick_shutdown(unsigned int *cpup)
                clockevents_exchange_device(dev, NULL);
                td->evtdev = NULL;
        }
-       /* Transfer the do_timer job away from this cpu */
-       if (*cpup == tick_do_timer_cpu) {
-               int cpu = cpumask_first(cpu_online_mask);
-
-               tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu :
-                       TICK_DO_TIMER_NONE;
-       }
        spin_unlock_irqrestore(&tick_device_lock, flags);
 }
 
@@ -357,6 +365,10 @@ static int tick_notify(struct notifier_block *nb, unsigned long reason,
                tick_broadcast_oneshot_control(reason);
                break;
 
+       case CLOCK_EVT_NOTIFY_CPU_DYING:
+               tick_handover_do_timer(dev);
+               break;
+
        case CLOCK_EVT_NOTIFY_CPU_DEAD:
                tick_shutdown_broadcast_oneshot(dev);
                tick_shutdown_broadcast(dev);