Merge branch 'hwpoison-2.6.32' of git://git.kernel.org/pub/scm/linux/kernel/git/ak...
[firefly-linux-kernel-4.4.55.git] / kernel / trace / trace_clock.c
index b588fd81f7f996792852e5035278736cdf9bec46..20c5f92e28a8864cc3ca188cc1205200a89b6c55 100644 (file)
@@ -66,10 +66,14 @@ u64 notrace trace_clock(void)
  * Used by plugins that need globally coherent timestamps.
  */
 
-static u64 prev_trace_clock_time;
-
-static raw_spinlock_t trace_clock_lock ____cacheline_aligned_in_smp =
-       (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+/* keep prev_time and lock in the same cacheline. */
+static struct {
+       u64 prev_time;
+       raw_spinlock_t lock;
+} trace_clock_struct ____cacheline_aligned_in_smp =
+       {
+               .lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED,
+       };
 
 u64 notrace trace_clock_global(void)
 {
@@ -88,19 +92,19 @@ u64 notrace trace_clock_global(void)
        if (unlikely(in_nmi()))
                goto out;
 
-       __raw_spin_lock(&trace_clock_lock);
+       __raw_spin_lock(&trace_clock_struct.lock);
 
        /*
         * TODO: if this happens often then maybe we should reset
-        * my_scd->clock to prev_trace_clock_time+1, to make sure
+        * my_scd->clock to prev_time+1, to make sure
         * we start ticking with the local clock from now on?
         */
-       if ((s64)(now - prev_trace_clock_time) < 0)
-               now = prev_trace_clock_time + 1;
+       if ((s64)(now - trace_clock_struct.prev_time) < 0)
+               now = trace_clock_struct.prev_time + 1;
 
-       prev_trace_clock_time = now;
+       trace_clock_struct.prev_time = now;
 
-       __raw_spin_unlock(&trace_clock_lock);
+       __raw_spin_unlock(&trace_clock_struct.lock);
 
  out:
        raw_local_irq_restore(flags);