Merge remote branch 'origin/x86/cpu' into x86/amd-nb
authorH. Peter Anvin <hpa@linux.intel.com>
Fri, 1 Oct 2010 23:18:11 +0000 (16:18 -0700)
committerH. Peter Anvin <hpa@linux.intel.com>
Fri, 1 Oct 2010 23:18:11 +0000 (16:18 -0700)
1  2 
arch/x86/include/asm/cpufeature.h
arch/x86/kernel/tsc.c

index c6fbb7b430d167c7663f99901b2c653ec6556911,bffeab7eab976db6cefe3aa4e9267295ab7cc927..0450a2842f481634903809b59f4aeb346b95a573
  #define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */
  #define X86_FEATURE_OSVW      (6*32+ 9) /* OS Visible Workaround */
  #define X86_FEATURE_IBS               (6*32+10) /* Instruction Based Sampling */
- #define X86_FEATURE_SSE5      (6*32+11) /* SSE-5 */
+ #define X86_FEATURE_XOP               (6*32+11) /* extended AVX instructions */
  #define X86_FEATURE_SKINIT    (6*32+12) /* SKINIT/STGI instructions */
  #define X86_FEATURE_WDT               (6*32+13) /* Watchdog timer */
+ #define X86_FEATURE_LWP               (6*32+15) /* Light Weight Profiling */
+ #define X86_FEATURE_FMA4      (6*32+16) /* 4 operands MAC instructions */
  #define X86_FEATURE_NODEID_MSR        (6*32+19) /* NodeId MSR */
+ #define X86_FEATURE_TBM               (6*32+21) /* trailing bit manipulations */
+ #define X86_FEATURE_TOPOEXT   (6*32+22) /* topology extensions CPUID leafs */
  
  /*
   * Auxiliary flags: Linux defined - For features scattered in various
  #define X86_FEATURE_LBRV      (8*32+ 6) /* AMD LBR Virtualization support */
  #define X86_FEATURE_SVML      (8*32+ 7) /* "svm_lock" AMD SVM locking MSR */
  #define X86_FEATURE_NRIPS     (8*32+ 8) /* "nrip_save" AMD SVM next_rip save */
+ #define X86_FEATURE_TSCRATEMSR  (8*32+ 9) /* "tsc_scale" AMD TSC scaling support */
+ #define X86_FEATURE_VMCBCLEAN   (8*32+10) /* "vmcb_clean" AMD VMCB clean bits support */
+ #define X86_FEATURE_FLUSHBYASID (8*32+11) /* AMD flush-by-ASID support */
+ #define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */
+ #define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */
+ #define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */
  
  /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
  #define X86_FEATURE_FSGSBASE  (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
@@@ -296,7 -307,6 +307,7 @@@ extern const char * const x86_power_fla
  
  #endif /* CONFIG_X86_64 */
  
 +#if __GNUC__ >= 4
  /*
   * Static testing of CPU features.  Used the same as boot_cpu_has().
   * These are only valid after alternatives have run, but will statically
   */
  static __always_inline __pure bool __static_cpu_has(u16 bit)
  {
 -#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
 +#if __GNUC__ > 4 || __GNUC_MINOR__ >= 5
                asm goto("1: jmp %l[t_no]\n"
                         "2:\n"
                         ".section .altinstructions,\"a\"\n"
  #endif
  }
  
 -#if __GNUC__ >= 4
  #define static_cpu_has(bit)                                   \
  (                                                             \
        __builtin_constant_p(boot_cpu_has(bit)) ?               \
diff --combined arch/x86/kernel/tsc.c
index 26a863a9c2a815ec797b6211cd3de5e0b9cbed8e,13b6a6cc77f201aa30c9bbaedc469d31e10139cd..4496315eb2243e8b04040563112fea538a8816be
@@@ -626,44 -626,6 +626,44 @@@ static void set_cyc2ns_scale(unsigned l
        local_irq_restore(flags);
  }
  
 +static unsigned long long cyc2ns_suspend;
 +
 +void save_sched_clock_state(void)
 +{
 +      if (!sched_clock_stable)
 +              return;
 +
 +      cyc2ns_suspend = sched_clock();
 +}
 +
 +/*
 + * Even on processors with invariant TSC, TSC gets reset in some the
 + * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to
 + * arbitrary value (still sync'd across cpu's) during resume from such sleep
 + * states. To cope up with this, recompute the cyc2ns_offset for each cpu so
 + * that sched_clock() continues from the point where it was left off during
 + * suspend.
 + */
 +void restore_sched_clock_state(void)
 +{
 +      unsigned long long offset;
 +      unsigned long flags;
 +      int cpu;
 +
 +      if (!sched_clock_stable)
 +              return;
 +
 +      local_irq_save(flags);
 +
 +      __get_cpu_var(cyc2ns_offset) = 0;
 +      offset = cyc2ns_suspend - sched_clock();
 +
 +      for_each_possible_cpu(cpu)
 +              per_cpu(cyc2ns_offset, cpu) = offset;
 +
 +      local_irq_restore(flags);
 +}
 +
  #ifdef CONFIG_CPU_FREQ
  
  /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
@@@ -892,60 -854,6 +892,6 @@@ static void __init init_tsc_clocksource
        clocksource_register_khz(&clocksource_tsc, tsc_khz);
  }
  
- #ifdef CONFIG_X86_64
- /*
-  * calibrate_cpu is used on systems with fixed rate TSCs to determine
-  * processor frequency
-  */
- #define TICK_COUNT 100000000
- static unsigned long __init calibrate_cpu(void)
- {
-       int tsc_start, tsc_now;
-       int i, no_ctr_free;
-       unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0;
-       unsigned long flags;
-       for (i = 0; i < 4; i++)
-               if (avail_to_resrv_perfctr_nmi_bit(i))
-                       break;
-       no_ctr_free = (i == 4);
-       if (no_ctr_free) {
-               WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... "
-                    "cpu_khz value may be incorrect.\n");
-               i = 3;
-               rdmsrl(MSR_K7_EVNTSEL3, evntsel3);
-               wrmsrl(MSR_K7_EVNTSEL3, 0);
-               rdmsrl(MSR_K7_PERFCTR3, pmc3);
-       } else {
-               reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i);
-               reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
-       }
-       local_irq_save(flags);
-       /* start measuring cycles, incrementing from 0 */
-       wrmsrl(MSR_K7_PERFCTR0 + i, 0);
-       wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76);
-       rdtscl(tsc_start);
-       do {
-               rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now);
-               tsc_now = get_cycles();
-       } while ((tsc_now - tsc_start) < TICK_COUNT);
-       local_irq_restore(flags);
-       if (no_ctr_free) {
-               wrmsrl(MSR_K7_EVNTSEL3, 0);
-               wrmsrl(MSR_K7_PERFCTR3, pmc3);
-               wrmsrl(MSR_K7_EVNTSEL3, evntsel3);
-       } else {
-               release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
-               release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
-       }
-       return pmc_now * tsc_khz / (tsc_now - tsc_start);
- }
- #else
- static inline unsigned long calibrate_cpu(void) { return cpu_khz; }
- #endif
  void __init tsc_init(void)
  {
        u64 lpj;
                return;
        }
  
-       if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) &&
-                       (boot_cpu_data.x86_vendor == X86_VENDOR_AMD))
-               cpu_khz = calibrate_cpu();
        printk("Detected %lu.%03lu MHz processor.\n",
                        (unsigned long)cpu_khz / 1000,
                        (unsigned long)cpu_khz % 1000);