Merge branch 'for-3.18-consistent-ops' of git://git.kernel.org/pub/scm/linux/kernel...
[firefly-linux-kernel-4.4.55.git] / kernel / time / hrtimer.c
index ab370ffffd53dc5d826137c796df5c32afe81abf..37e50aadd471195bebb3dc32c9ad026dbbcf2ab7 100644 (file)
@@ -558,7 +558,7 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
 static int hrtimer_reprogram(struct hrtimer *timer,
                             struct hrtimer_clock_base *base)
 {
-       struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+       struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
        ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
        int res;
 
@@ -629,7 +629,7 @@ static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
  */
 static void retrigger_next_event(void *arg)
 {
-       struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
+       struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
 
        if (!hrtimer_hres_active())
                return;
@@ -903,7 +903,7 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
                 */
                debug_deactivate(timer);
                timer_stats_hrtimer_clear_start_info(timer);
-               reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases);
+               reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
                /*
                 * We must preserve the CALLBACK state flag here,
                 * otherwise we could move the timer base in
@@ -963,7 +963,7 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
                 * on dynticks target.
                 */
                wake_up_nohz_cpu(new_base->cpu_base->cpu);
-       } else if (new_base->cpu_base == &__get_cpu_var(hrtimer_bases) &&
+       } else if (new_base->cpu_base == this_cpu_ptr(&hrtimer_bases) &&
                        hrtimer_reprogram(timer, new_base)) {
                /*
                 * Only allow reprogramming if the new base is on this CPU.
@@ -1103,7 +1103,7 @@ EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
  */
 ktime_t hrtimer_get_next_event(void)
 {
-       struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+       struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
        struct hrtimer_clock_base *base = cpu_base->clock_base;
        ktime_t delta, mindelta = { .tv64 = KTIME_MAX };
        unsigned long flags;
@@ -1144,7 +1144,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
 
        memset(timer, 0, sizeof(struct hrtimer));
 
-       cpu_base = &__raw_get_cpu_var(hrtimer_bases);
+       cpu_base = raw_cpu_ptr(&hrtimer_bases);
 
        if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
                clock_id = CLOCK_MONOTONIC;
@@ -1187,7 +1187,7 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
        struct hrtimer_cpu_base *cpu_base;
        int base = hrtimer_clockid_to_base(which_clock);
 
-       cpu_base = &__raw_get_cpu_var(hrtimer_bases);
+       cpu_base = raw_cpu_ptr(&hrtimer_bases);
        *tp = ktime_to_timespec(cpu_base->clock_base[base].resolution);
 
        return 0;
@@ -1242,7 +1242,7 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
  */
 void hrtimer_interrupt(struct clock_event_device *dev)
 {
-       struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+       struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
        ktime_t expires_next, now, entry_time, delta;
        int i, retries = 0;
 
@@ -1376,7 +1376,7 @@ static void __hrtimer_peek_ahead_timers(void)
        if (!hrtimer_hres_active())
                return;
 
-       td = &__get_cpu_var(tick_cpu_device);
+       td = this_cpu_ptr(&tick_cpu_device);
        if (td && td->evtdev)
                hrtimer_interrupt(td->evtdev);
 }
@@ -1440,7 +1440,7 @@ void hrtimer_run_pending(void)
 void hrtimer_run_queues(void)
 {
        struct timerqueue_node *node;
-       struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+       struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
        struct hrtimer_clock_base *base;
        int index, gettime = 1;
 
@@ -1679,7 +1679,7 @@ static void migrate_hrtimers(int scpu)
 
        local_irq_disable();
        old_base = &per_cpu(hrtimer_bases, scpu);
-       new_base = &__get_cpu_var(hrtimer_bases);
+       new_base = this_cpu_ptr(&hrtimer_bases);
        /*
         * The caller is globally serialized and nobody else
         * takes two locks at once, deadlock is not possible.