PM / devfreq: rk3399_dmc: rename driver and internals to rockchip
[firefly-linux-kernel-4.4.55.git] / kernel / watchdog.c
index 18f34cf75f741e2a63db6dc6c522dfa2e03d1a54..b718530034d23b10415c5fb39f9c5018b8ef811f 100644 (file)
@@ -103,6 +103,11 @@ static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
 static DEFINE_PER_CPU(bool, hard_watchdog_warn);
 static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
+#endif
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
+static cpumask_t __read_mostly watchdog_cpus;
+#endif
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
 #endif
 static unsigned long soft_lockup_nmi_warn;
@@ -114,7 +119,7 @@ static unsigned long soft_lockup_nmi_warn;
 #ifdef CONFIG_HARDLOCKUP_DETECTOR
 unsigned int __read_mostly hardlockup_panic =
                        CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
-static unsigned long hardlockup_allcpu_dumped;
+static unsigned long __maybe_unused hardlockup_allcpu_dumped;
 /*
  * We may not want to enable hard lockup detection by default in all cases,
  * for example when running the kernel as a guest on a hypervisor. In these
@@ -271,7 +276,7 @@ void touch_softlockup_watchdog_sync(void)
        __this_cpu_write(watchdog_touch_ts, 0);
 }
 
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
 /* watchdog detector functions */
 static bool is_hardlockup(void)
 {
@@ -285,6 +290,76 @@ static bool is_hardlockup(void)
 }
 #endif
 
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
+static unsigned int watchdog_next_cpu(unsigned int cpu)
+{
+       cpumask_t cpus = watchdog_cpus;
+       unsigned int next_cpu;
+
+       next_cpu = cpumask_next(cpu, &cpus);
+       if (next_cpu >= nr_cpu_ids)
+               next_cpu = cpumask_first(&cpus);
+
+       if (next_cpu == cpu)
+               return nr_cpu_ids;
+
+       return next_cpu;
+}
+
+static int is_hardlockup_other_cpu(unsigned int cpu)
+{
+       unsigned long hrint = per_cpu(hrtimer_interrupts, cpu);
+
+       if (per_cpu(hrtimer_interrupts_saved, cpu) == hrint)
+               return 1;
+
+       per_cpu(hrtimer_interrupts_saved, cpu) = hrint;
+       return 0;
+}
+
+static void watchdog_check_hardlockup_other_cpu(void)
+{
+       unsigned int next_cpu;
+
+       /*
+        * Test for hardlockups every 3 samples.  The sample period is
+        *  watchdog_thresh * 2 / 5, so 3 samples gets us back to slightly over
+        *  watchdog_thresh (over by 20%).
+        */
+       if (__this_cpu_read(hrtimer_interrupts) % 3 != 0)
+               return;
+
+       /* check for a hardlockup on the next cpu */
+       next_cpu = watchdog_next_cpu(smp_processor_id());
+       if (next_cpu >= nr_cpu_ids)
+               return;
+
+       smp_rmb();
+
+       if (per_cpu(watchdog_nmi_touch, next_cpu) == true) {
+               per_cpu(watchdog_nmi_touch, next_cpu) = false;
+               return;
+       }
+
+       if (is_hardlockup_other_cpu(next_cpu)) {
+               /* only warn once */
+               if (per_cpu(hard_watchdog_warn, next_cpu) == true)
+                       return;
+
+               if (hardlockup_panic)
+                       panic("Watchdog detected hard LOCKUP on cpu %u", next_cpu);
+               else
+                       WARN(1, "Watchdog detected hard LOCKUP on cpu %u", next_cpu);
+
+               per_cpu(hard_watchdog_warn, next_cpu) = true;
+       } else {
+               per_cpu(hard_watchdog_warn, next_cpu) = false;
+       }
+}
+#else
+static inline void watchdog_check_hardlockup_other_cpu(void) { return; }
+#endif
+
 static int is_softlockup(unsigned long touch_ts)
 {
        unsigned long now = get_timestamp();
@@ -297,7 +372,7 @@ static int is_softlockup(unsigned long touch_ts)
        return 0;
 }
 
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
 
 static struct perf_event_attr wd_hw_attr = {
        .type           = PERF_TYPE_HARDWARE,
@@ -328,7 +403,6 @@ static void watchdog_overflow_callback(struct perf_event *event,
         */
        if (is_hardlockup()) {
                int this_cpu = smp_processor_id();
-               struct pt_regs *regs = get_irq_regs();
 
                /* only print hardlockups once */
                if (__this_cpu_read(hard_watchdog_warn) == true)
@@ -360,7 +434,7 @@ static void watchdog_overflow_callback(struct perf_event *event,
        __this_cpu_write(hard_watchdog_warn, false);
        return;
 }
-#endif /* CONFIG_HARDLOCKUP_DETECTOR */
+#endif /* CONFIG_HARDLOCKUP_DETECTOR_NMI */
 
 static void watchdog_interrupt_count(void)
 {
@@ -384,6 +458,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
        /* kick the hardlockup detector */
        watchdog_interrupt_count();
 
+       /* test for hardlockups on the next cpu */
+       watchdog_check_hardlockup_other_cpu();
+
        /* kick the softlockup detector */
        wake_up_process(__this_cpu_read(softlockup_watchdog));
 
@@ -561,7 +638,7 @@ static void watchdog(unsigned int cpu)
                watchdog_nmi_disable(cpu);
 }
 
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
 /*
  * People like the simple clean cpu node info on boot.
  * Reduce the watchdog noise by only printing messages
@@ -659,10 +736,45 @@ static void watchdog_nmi_disable(unsigned int cpu)
        }
 }
 
+#else
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
+static int watchdog_nmi_enable(unsigned int cpu)
+{
+       /*
+        * The new cpu will be marked online before the first hrtimer interrupt
+        * runs on it.  If another cpu tests for a hardlockup on the new cpu
+        * before it has run its first hrtimer, it will get a false positive.
+        * Touch the watchdog on the new cpu to delay the first check for at
+        * least 3 sampling periods to guarantee one hrtimer has run on the new
+        * cpu.
+        */
+       per_cpu(watchdog_nmi_touch, cpu) = true;
+       smp_wmb();
+       cpumask_set_cpu(cpu, &watchdog_cpus);
+       return 0;
+}
+
+static void watchdog_nmi_disable(unsigned int cpu)
+{
+       unsigned int next_cpu = watchdog_next_cpu(cpu);
+
+       /*
+        * Offlining this cpu will cause the cpu before this one to start
+        * checking the one after this one.  If this cpu just finished checking
+        * the next cpu and updating hrtimer_interrupts_saved, and then the
+        * previous cpu checks it within one sample period, it will trigger a
+        * false positive.  Touch the watchdog on the next cpu to prevent it.
+        */
+       if (next_cpu < nr_cpu_ids)
+               per_cpu(watchdog_nmi_touch, next_cpu) = true;
+       smp_wmb();
+       cpumask_clear_cpu(cpu, &watchdog_cpus);
+}
 #else
 static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
 static void watchdog_nmi_disable(unsigned int cpu) { return; }
-#endif /* CONFIG_HARDLOCKUP_DETECTOR */
+#endif /* CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU */
+#endif /* CONFIG_HARDLOCKUP_DETECTOR_NMI */
 
 static struct smp_hotplug_thread watchdog_threads = {
        .store                  = &softlockup_watchdog,
@@ -907,6 +1019,9 @@ static int proc_watchdog_common(int which, struct ctl_table *table, int write,
                 * both lockup detectors are disabled if proc_watchdog_update()
                 * returns an error.
                 */
+               if (old == new)
+                       goto out;
+
                err = proc_watchdog_update();
        }
 out:
@@ -951,7 +1066,7 @@ int proc_soft_watchdog(struct ctl_table *table, int write,
 int proc_watchdog_thresh(struct ctl_table *table, int write,
                         void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-       int err, old;
+       int err, old, new;
 
        get_online_cpus();
        mutex_lock(&watchdog_proc_mutex);
@@ -971,6 +1086,10 @@ int proc_watchdog_thresh(struct ctl_table *table, int write,
        /*
         * Update the sample period. Restore on failure.
         */
+       new = ACCESS_ONCE(watchdog_thresh);
+       if (old == new)
+               goto out;
+
        set_sample_period();
        err = proc_watchdog_update();
        if (err) {