x86/asm/tsc: Rename native_read_tsc() to rdtsc()
authorAndy Lutomirski <luto@kernel.org>
Thu, 25 Jun 2015 16:44:07 +0000 (18:44 +0200)
committerIngo Molnar <mingo@kernel.org>
Mon, 6 Jul 2015 13:23:28 +0000 (15:23 +0200)
Now that there is no paravirt TSC, the "native" is
inappropriate. The function does RDTSC, so give it the obvious
name: rdtsc().

Suggested-by: Borislav Petkov <bp@suse.de>
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Huang Rui <ray.huang@amd.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: Len Brown <lenb@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: kvm ML <kvm@vger.kernel.org>
Link: http://lkml.kernel.org/r/fd43e16281991f096c1e4d21574d9e1402c62d39.1434501121.git.luto@kernel.org
[ Ported it to v4.2-rc1. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
25 files changed:
arch/x86/boot/compressed/aslr.c
arch/x86/entry/vdso/vclock_gettime.c
arch/x86/include/asm/msr.h
arch/x86/include/asm/pvclock.h
arch/x86/include/asm/stackprotector.h
arch/x86/include/asm/tsc.h
arch/x86/kernel/apb_timer.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/espfix_64.c
arch/x86/kernel/hpet.c
arch/x86/kernel/trace_clock.c
arch/x86/kernel/tsc.c
arch/x86/kvm/lapic.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/lib/delay.c
drivers/cpufreq/intel_pstate.c
drivers/input/gameport/gameport.c
drivers/input/joystick/analog.c
drivers/net/hamradio/baycom_epp.c
drivers/thermal/intel_powerclamp.c
tools/power/cpupower/debug/kernel/cpufreq-test_tsc.c

index ea33236190b182ab75b11cc8521abfd93dd0a522..6a9b96b4624d2ed93586b44d05848738bbd80178 100644 (file)
@@ -82,7 +82,7 @@ static unsigned long get_random_long(void)
 
        if (has_cpuflag(X86_FEATURE_TSC)) {
                debug_putstr(" RDTSC");
-               raw = native_read_tsc();
+               raw = rdtsc();
 
                random ^= raw;
                use_i8254 = false;
index 972b488ac16ad23e309debe8afbcb82d8f44054b..0340d93c18ca46e9bde33713e3e63a9281e18757 100644 (file)
@@ -186,7 +186,7 @@ notrace static cycle_t vread_tsc(void)
         * but no one has ever seen it happen.
         */
        rdtsc_barrier();
-       ret = (cycle_t)native_read_tsc();
+       ret = (cycle_t)rdtsc();
 
        last = gtod->cycle_last;
 
index c89ed6ceed02de5ad0f9176855fb85fabc76cda9..ff0c120dafe5416cfe3dab2917402f2669fe77ac 100644 (file)
@@ -109,7 +109,16 @@ notrace static inline int native_write_msr_safe(unsigned int msr,
 extern int rdmsr_safe_regs(u32 regs[8]);
 extern int wrmsr_safe_regs(u32 regs[8]);
 
-static __always_inline unsigned long long native_read_tsc(void)
+/**
+ * rdtsc() - returns the current TSC without ordering constraints
+ *
+ * rdtsc() returns the result of RDTSC as a 64-bit integer.  The
+ * only ordering constraint it supplies is the ordering implied by
+ * "asm volatile": it will put the RDTSC in the place you expect.  The
+ * CPU can and will speculatively execute that RDTSC, though, so the
+ * results can be non-monotonic if compared on different CPUs.
+ */
+static __always_inline unsigned long long rdtsc(void)
 {
        DECLARE_ARGS(val, low, high);
 
index 2bd69d62c6233eaec23197178da16b9ce9d39b0f..5c490db62e32133c69b3cb8f87fe08e4a91b7cbc 100644 (file)
@@ -62,7 +62,7 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
 static __always_inline
 u64 pvclock_get_nsec_offset(const struct pvclock_vcpu_time_info *src)
 {
-       u64 delta = native_read_tsc() - src->tsc_timestamp;
+       u64 delta = rdtsc() - src->tsc_timestamp;
        return pvclock_scale_delta(delta, src->tsc_to_system_mul,
                                   src->tsc_shift);
 }
index bc5fa2af112e791fc8f090e293828e83a783566e..58505f01962f31f80f2e99c5b0bb9e5b2c8405b8 100644 (file)
@@ -72,7 +72,7 @@ static __always_inline void boot_init_stack_canary(void)
         * on during the bootup the random pool has true entropy too.
         */
        get_random_bytes(&canary, sizeof(canary));
-       tsc = native_read_tsc();
+       tsc = rdtsc();
        canary += tsc + (tsc << 32UL);
 
        current->stack_canary = canary;
index b4883902948bb426aed764f22438bda0bd2ce036..3df7675debcffdc51d549cd22a496a4773e5fdd0 100644 (file)
@@ -26,7 +26,7 @@ static inline cycles_t get_cycles(void)
                return 0;
 #endif
 
-       return native_read_tsc();
+       return rdtsc();
 }
 
 extern void tsc_init(void);
index 25efa534c4e4d67e2022064514f52bcea21bf605..222a57076039f25686087274ef6a42d7db9f91fc 100644 (file)
@@ -263,7 +263,7 @@ static int apbt_clocksource_register(void)
 
        /* Verify whether apbt counter works */
        t1 = dw_apb_clocksource_read(clocksource_apbt);
-       start = native_read_tsc();
+       start = rdtsc();
 
        /*
         * We don't know the TSC frequency yet, but waiting for
@@ -273,7 +273,7 @@ static int apbt_clocksource_register(void)
         */
        do {
                rep_nop();
-               now = native_read_tsc();
+               now = rdtsc();
        } while ((now - start) < 200000UL);
 
        /* APBT is the only always on clocksource, it has to work! */
@@ -390,13 +390,13 @@ unsigned long apbt_quick_calibrate(void)
        old = dw_apb_clocksource_read(clocksource_apbt);
        old += loop;
 
-       t1 = native_read_tsc();
+       t1 = rdtsc();
 
        do {
                new = dw_apb_clocksource_read(clocksource_apbt);
        } while (new < old);
 
-       t2 = native_read_tsc();
+       t2 = rdtsc();
 
        shift = 5;
        if (unlikely(loop >> shift == 0)) {
index 51af1ed1ae2e714792971db9e1d5e105171fd755..0d71cd9b4a50405b01ea09b69e3512618264b7aa 100644 (file)
@@ -457,7 +457,7 @@ static int lapic_next_deadline(unsigned long delta,
 {
        u64 tsc;
 
-       tsc = native_read_tsc();
+       tsc = rdtsc();
        wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR));
        return 0;
 }
@@ -592,7 +592,7 @@ static void __init lapic_cal_handler(struct clock_event_device *dev)
        unsigned long pm = acpi_pm_read_early();
 
        if (cpu_has_tsc)
-               tsc = native_read_tsc();
+               tsc = rdtsc();
 
        switch (lapic_cal_loops++) {
        case 0:
@@ -1209,7 +1209,7 @@ void setup_local_APIC(void)
        long long max_loops = cpu_khz ? cpu_khz : 1000000;
 
        if (cpu_has_tsc)
-               tsc = native_read_tsc();
+               tsc = rdtsc();
 
        if (disable_apic) {
                disable_ioapic_support();
@@ -1293,7 +1293,7 @@ void setup_local_APIC(void)
                }
                if (queued) {
                        if (cpu_has_tsc && cpu_khz) {
-                               ntsc = native_read_tsc();
+                               ntsc = rdtsc();
                                max_loops = (cpu_khz << 10) - (ntsc - tsc);
                        } else
                                max_loops--;
index a69710db6112f7b01b33459a3693e54778457e5a..51ad2af84a72b01afe4d855a0794594a40f91981 100644 (file)
@@ -125,10 +125,10 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
 
                n = K6_BUG_LOOP;
                f_vide = vide;
-               d = native_read_tsc();
+               d = rdtsc();
                while (n--)
                        f_vide();
-               d2 = native_read_tsc();
+               d2 = rdtsc();
                d = d2-d;
 
                if (d > 20*K6_BUG_LOOP)
index a5283d2d00944c40b4f06455ad65b36ed1048b78..96cceccd11b440993c194b3026ffcaa51ad0b49a 100644 (file)
@@ -125,7 +125,7 @@ void mce_setup(struct mce *m)
 {
        memset(m, 0, sizeof(struct mce));
        m->cpu = m->extcpu = smp_processor_id();
-       m->tsc = native_read_tsc();
+       m->tsc = rdtsc();
        /* We hope get_seconds stays lockless */
        m->time = get_seconds();
        m->cpuvendor = boot_cpu_data.x86_vendor;
@@ -1784,7 +1784,7 @@ static void collect_tscs(void *data)
 {
        unsigned long *cpu_tsc = (unsigned long *)data;
 
-       cpu_tsc[smp_processor_id()] = native_read_tsc();
+       cpu_tsc[smp_processor_id()] = rdtsc();
 }
 
 static int mce_apei_read_done;
index 334a2a9c034d3f53c2ab1bd163fdc3c78e315095..67315cd0132cfe4df78b1c67414be930449df022 100644 (file)
@@ -110,7 +110,7 @@ static void init_espfix_random(void)
         */
        if (!arch_get_random_long(&rand)) {
                /* The constant is an arbitrary large prime */
-               rand = native_read_tsc();
+               rand = rdtsc();
                rand *= 0xc345c6b72fd16123UL;
        }
 
index cc390fe69b71a38d188a9d3419ebf3c5e274fd4d..f75c5908c7a6a9d1391752c975995d6d6d67bb22 100644 (file)
@@ -735,7 +735,7 @@ static int hpet_clocksource_register(void)
 
        /* Verify whether hpet counter works */
        t1 = hpet_readl(HPET_COUNTER);
-       start = native_read_tsc();
+       start = rdtsc();
 
        /*
         * We don't know the TSC frequency yet, but waiting for
@@ -745,7 +745,7 @@ static int hpet_clocksource_register(void)
         */
        do {
                rep_nop();
-               now = native_read_tsc();
+               now = rdtsc();
        } while ((now - start) < 200000UL);
 
        if (t1 == hpet_readl(HPET_COUNTER)) {
index bd8f4d41bd563104797dfefbb55cbdd72e0b8f48..67efb8c96fc4c6a73ad60380a816a56efffa3aba 100644 (file)
@@ -15,7 +15,7 @@ u64 notrace trace_clock_x86_tsc(void)
        u64 ret;
 
        rdtsc_barrier();
-       ret = native_read_tsc();
+       ret = rdtsc();
 
        return ret;
 }
index e66f5dcaeb632be5b1025f4a9dd8ddbb6676c995..21d6e04e3e825939c8e843a9890b099149f27e2c 100644 (file)
@@ -248,7 +248,7 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
 
        data = cyc2ns_write_begin(cpu);
 
-       tsc_now = native_read_tsc();
+       tsc_now = rdtsc();
        ns_now = cycles_2_ns(tsc_now);
 
        /*
@@ -290,7 +290,7 @@ u64 native_sched_clock(void)
        }
 
        /* read the Time Stamp Counter: */
-       tsc_now = native_read_tsc();
+       tsc_now = rdtsc();
 
        /* return the value in ns */
        return cycles_2_ns(tsc_now);
index 954e98a8c2e38bf9861d4d6349eb721264e8cb18..2f0ade48614feb43b6221de5f2ecdc594279f0a8 100644 (file)
@@ -1172,7 +1172,7 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu)
 
        tsc_deadline = apic->lapic_timer.expired_tscdeadline;
        apic->lapic_timer.expired_tscdeadline = 0;
-       guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc());
+       guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, rdtsc());
        trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
 
        /* __delay is delay_tsc whenever the hardware has TSC, thus always.  */
@@ -1240,7 +1240,7 @@ static void start_apic_timer(struct kvm_lapic *apic)
                local_irq_save(flags);
 
                now = apic->lapic_timer.timer.base->get_time();
-               guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc());
+               guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, rdtsc());
                if (likely(tscdeadline > guest_tsc)) {
                        ns = (tscdeadline - guest_tsc) * 1000000ULL;
                        do_div(ns, this_tsc_khz);
index 602b974a60a626e18d11965ed5ad1461c329bee8..8dfbad7a2c44e8545d8530723e7e34b9a29b3ecb 100644 (file)
@@ -1080,7 +1080,7 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
 {
        u64 tsc;
 
-       tsc = svm_scale_tsc(vcpu, native_read_tsc());
+       tsc = svm_scale_tsc(vcpu, rdtsc());
 
        return target_tsc - tsc;
 }
@@ -3079,7 +3079,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        switch (msr_info->index) {
        case MSR_IA32_TSC: {
                msr_info->data = svm->vmcb->control.tsc_offset +
-                       svm_scale_tsc(vcpu, native_read_tsc());
+                       svm_scale_tsc(vcpu, rdtsc());
 
                break;
        }
index 4fa1ccad7bebc985c9910bc8d10c01d63b68dfeb..10d69a6df14fbd652d2fbe8f8eba329e0654cfce 100644 (file)
@@ -2236,7 +2236,7 @@ static u64 guest_read_tsc(void)
 {
        u64 host_tsc, tsc_offset;
 
-       host_tsc = native_read_tsc();
+       host_tsc = rdtsc();
        tsc_offset = vmcs_read64(TSC_OFFSET);
        return host_tsc + tsc_offset;
 }
@@ -2317,7 +2317,7 @@ static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool ho
 
 static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
 {
-       return target_tsc - native_read_tsc();
+       return target_tsc - rdtsc();
 }
 
 static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
index f771058cfb5cc4376e89b8870423803fa572b599..dfa97139282db4eb02735e9d8f6a23c13135f5a0 100644 (file)
@@ -1455,7 +1455,7 @@ static cycle_t read_tsc(void)
         * but no one has ever seen it happen.
         */
        rdtsc_barrier();
-       ret = (cycle_t)native_read_tsc();
+       ret = (cycle_t)rdtsc();
 
        last = pvclock_gtod_data.clock.cycle_last;
 
@@ -1646,7 +1646,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
                return 1;
        }
        if (!use_master_clock) {
-               host_tsc = native_read_tsc();
+               host_tsc = rdtsc();
                kernel_ns = get_kernel_ns();
        }
 
@@ -2810,7 +2810,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 
        if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) {
                s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
-                               native_read_tsc() - vcpu->arch.last_host_tsc;
+                               rdtsc() - vcpu->arch.last_host_tsc;
                if (tsc_delta < 0)
                        mark_tsc_unstable("KVM discovered backwards TSC");
                if (check_tsc_unstable()) {
@@ -2838,7 +2838,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 {
        kvm_x86_ops->vcpu_put(vcpu);
        kvm_put_guest_fpu(vcpu);
-       vcpu->arch.last_host_tsc = native_read_tsc();
+       vcpu->arch.last_host_tsc = rdtsc();
 }
 
 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
@@ -6623,7 +6623,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                hw_breakpoint_restore();
 
        vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu,
-                                                          native_read_tsc());
+                                                          rdtsc());
 
        vcpu->mode = OUTSIDE_GUEST_MODE;
        smp_wmb();
@@ -7437,7 +7437,7 @@ int kvm_arch_hardware_enable(void)
        if (ret != 0)
                return ret;
 
-       local_tsc = native_read_tsc();
+       local_tsc = rdtsc();
        stable = !check_tsc_unstable();
        list_for_each_entry(kvm, &vm_list, vm_list) {
                kvm_for_each_vcpu(i, vcpu, kvm) {
index 35115f3786a908e5b487b7f976c06ded5b13ab14..f24bc59ab0a0ac8261769ac49d6875025d1db480 100644 (file)
@@ -55,10 +55,10 @@ static void delay_tsc(unsigned long __loops)
        preempt_disable();
        cpu = smp_processor_id();
        rdtsc_barrier();
-       bclock = native_read_tsc();
+       bclock = rdtsc();
        for (;;) {
                rdtsc_barrier();
-               now = native_read_tsc();
+               now = rdtsc();
                if ((now - bclock) >= loops)
                        break;
 
@@ -80,7 +80,7 @@ static void delay_tsc(unsigned long __loops)
                        loops -= (now - bclock);
                        cpu = smp_processor_id();
                        rdtsc_barrier();
-                       bclock = native_read_tsc();
+                       bclock = rdtsc();
                }
        }
        preempt_enable();
@@ -100,7 +100,7 @@ void use_tsc_delay(void)
 int read_current_timer(unsigned long *timer_val)
 {
        if (delay_fn == delay_tsc) {
-               *timer_val = native_read_tsc();
+               *timer_val = rdtsc();
                return 0;
        }
        return -1;
index 15ada47bb720b710454795d8d7e83c235c2fccfc..7c56d7eaa671471a29fde79b95a837a1d1c2c357 100644 (file)
@@ -765,7 +765,7 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
        local_irq_save(flags);
        rdmsrl(MSR_IA32_APERF, aperf);
        rdmsrl(MSR_IA32_MPERF, mperf);
-       tsc = native_read_tsc();
+       tsc = rdtsc();
        local_irq_restore(flags);
 
        cpu->last_sample_time = cpu->sample.time;
index abc0cb22e7504cca309c343c41750f03eb67a010..4a2a9e370be74a20d51fd651f86e497ddac94157 100644 (file)
@@ -149,9 +149,9 @@ static int old_gameport_measure_speed(struct gameport *gameport)
 
        for(i = 0; i < 50; i++) {
                local_irq_save(flags);
-               t1 = native_read_tsc();
+               t1 = rdtsc();
                for (t = 0; t < 50; t++) gameport_read(gameport);
-               t2 = native_read_tsc();
+               t2 = rdtsc();
                local_irq_restore(flags);
                udelay(i * 10);
                if (t2 - t1 < tx) tx = t2 - t1;
index f871b4f0005680de78d8c92634d6939214201dce..6f8b084e13d0724e77c68c249a227fdb6377a02d 100644 (file)
@@ -143,7 +143,7 @@ struct analog_port {
 
 #include <linux/i8253.h>
 
-#define GET_TIME(x)    do { if (cpu_has_tsc) x = (unsigned int)native_read_tsc(); else x = get_time_pit(); } while (0)
+#define GET_TIME(x)    do { if (cpu_has_tsc) x = (unsigned int)rdtsc(); else x = get_time_pit(); } while (0)
 #define DELTA(x,y)     (cpu_has_tsc ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? PIT_TICK_RATE / HZ : 0)))
 #define TIME_NAME      (cpu_has_tsc?"TSC":"PIT")
 static unsigned int get_time_pit(void)
@@ -160,7 +160,7 @@ static unsigned int get_time_pit(void)
         return count;
 }
 #elif defined(__x86_64__)
-#define GET_TIME(x)    do { x = (unsigned int)native_read_tsc(); } while (0)
+#define GET_TIME(x)    do { x = (unsigned int)rdtsc(); } while (0)
 #define DELTA(x,y)     ((y)-(x))
 #define TIME_NAME      "TSC"
 #elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_TILE)
index 44e5c3b5e0af698ffcdbda940fab03238fd94c19..72c9f1f352b4ec686a073b48f8df52d6245f771f 100644 (file)
@@ -638,7 +638,7 @@ static int receive(struct net_device *dev, int cnt)
 #define GETTICK(x)                                                \
 ({                                                                \
        if (cpu_has_tsc)                                          \
-               x = (unsigned int)native_read_tsc();              \
+               x = (unsigned int)rdtsc();                \
 })
 #else /* __i386__ */
 #define GETTICK(x)
index ab13448defcfd4925c38ab936d4368c1ab629faf..2ac0c704bcb85b5884365d54485c57383952b81a 100644 (file)
@@ -340,7 +340,7 @@ static bool powerclamp_adjust_controls(unsigned int target_ratio,
 
        /* check result for the last window */
        msr_now = pkg_state_counter();
-       tsc_now = native_read_tsc();
+       tsc_now = rdtsc();
 
        /* calculate pkg cstate vs tsc ratio */
        if (!msr_last || !tsc_last)
@@ -482,7 +482,7 @@ static void poll_pkg_cstate(struct work_struct *dummy)
        u64 val64;
 
        msr_now = pkg_state_counter();
-       tsc_now = native_read_tsc();
+       tsc_now = rdtsc();
        jiffies_now = jiffies;
 
        /* calculate pkg cstate vs tsc ratio */
index f02b0c0bff9b43f8708217a8124c70d0b04730be..6ff8383f2941de141db863be784aae04f5cbe797 100644 (file)
@@ -81,11 +81,11 @@ static int __init cpufreq_test_tsc(void)
 
        printk(KERN_DEBUG "start--> \n");
        then = read_pmtmr();
-       then_tsc = native_read_tsc();
+       then_tsc = rdtsc();
        for (i=0;i<20;i++) {
                mdelay(100);
                now = read_pmtmr();
-               now_tsc = native_read_tsc();
+               now_tsc = rdtsc();
                diff = (now - then) & 0xFFFFFF;
                diff_tsc = now_tsc - then_tsc;
                printk(KERN_DEBUG "t1: %08u t2: %08u diff_pmtmr: %08u diff_tsc: %016llu\n", then, now, diff, diff_tsc);