Merge branch 'x86-timers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 12 Dec 2012 04:01:33 +0000 (20:01 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 12 Dec 2012 04:01:33 +0000 (20:01 -0800)
Pull x86 timer update from Ingo Molnar:
 "This tree includes HPET fixes and also implements a calibration-free,
  TSC match driven APIC timer interrupt mode: 'TSC deadline mode'
  supported in SandyBridge and later CPUs."

* 'x86-timers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86: hpet: Fix inverted return value check in arch_setup_hpet_msi()
  x86: hpet: Fix masking of MSI interrupts
  x86: apic: Use tsc deadline for oneshot when available

1  2 
Documentation/kernel-parameters.txt
arch/x86/kernel/apic/io_apic.c

index 5190f1706414b942587bac9dcc6386b0f08c691a,4aa9ca0de63c6653e263f3ab9d7172b272ae5494..28bd0f0e32c517359c946029fc814c9cc0d4a474
@@@ -1304,6 -1304,10 +1304,10 @@@ bytes respectively. Such letter suffixe
        lapic           [X86-32,APIC] Enable the local APIC even if BIOS
                        disabled it.
  
+       lapic=          [x86,APIC] "notscdeadline" Do not use TSC deadline
+                       value for LAPIC timer one-shot implementation. Default
+                       back to the programmable timer unit in the LAPIC.
        lapic_timer_c2_ok       [X86,APIC] trust the local apic timer
                        in C2 power state.
  
  
        nox2apic        [X86-64,APIC] Do not enable x2APIC mode.
  
 +      cpu0_hotplug    [X86] Turn on CPU0 hotplug feature when
 +                      CONFIG_BOOTPARAM_HOTPLUG_CPU0 is off.
 +                      Some features depend on CPU0. Known dependencies are:
 +                      1. Resume from suspend/hibernate depends on CPU0.
 +                      Suspend/hibernate will fail if CPU0 is offline and you
 +                      need to online CPU0 before suspend/hibernate.
 +                      2. PIC interrupts also depend on CPU0. CPU0 can't be
 +                      removed if a PIC interrupt is detected.
 +                      It's said poweroff/reboot may depend on CPU0 on some
 +                      machines although I haven't seen such issues so far
 +                      after CPU0 is offline on a few tested machines.
 +                      If the dependencies are under your control, you can
 +                      turn on cpu0_hotplug.
 +
        nptcg=          [IA-64] Override max number of concurrent global TLB
                        purges which is reported from either PAL_VM_SUMMARY or
                        SAL PALO.
        ramdisk_size=   [RAM] Sizes of RAM disks in kilobytes
                        See Documentation/blockdev/ramdisk.txt.
  
 +      rcu_nocbs=      [KNL,BOOT]
 +                      In kernels built with CONFIG_RCU_NOCB_CPU=y, set
 +                      the specified list of CPUs to be no-callback CPUs.
 +                      Invocation of these CPUs' RCU callbacks will
 +                      be offloaded to "rcuoN" kthreads created for
 +                      that purpose.  This reduces OS jitter on the
 +                      offloaded CPUs, which can be useful for HPC and
 +                      real-time workloads.  It can also improve energy
 +                      efficiency for asymmetric multiprocessors.
 +
 +      rcu_nocbs_poll  [KNL,BOOT]
 +                      Rather than requiring that offloaded CPUs
 +                      (specified by rcu_nocbs= above) explicitly
 +                      awaken the corresponding "rcuoN" kthreads,
 +                      make these kthreads poll for callbacks.
 +                      This improves the real-time response for the
 +                      offloaded CPUs by relieving them of the need to
 +                      wake up the corresponding kthread, but degrades
 +                      energy efficiency by requiring that the kthreads
 +                      periodically wake up to do the polling.
 +
        rcutree.blimit= [KNL,BOOT]
                        Set maximum number of finished RCU callbacks to process
                        in one batch.
                        to facilitate early boot debugging.
                        See also Documentation/trace/events.txt
  
 +      trace_options=[option-list]
 +                      [FTRACE] Enable or disable tracer options at boot.
 +                      The option-list is a comma delimited list of options
 +                      that can be enabled or disabled just as if you were
 +                      to echo the option name into
 +
 +                          /sys/kernel/debug/tracing/trace_options
 +
 +                      For example, to enable stacktrace option (to dump the
 +                      stack trace of each event), add to the command line:
 +
 +                            trace_options=stacktrace
 +
 +                      See also Documentation/trace/ftrace.txt "trace options"
 +                      section.
 +
        transparent_hugepage=
                        [KNL]
                        Format: [always|madvise|never]
index 70aa621df118d69d08e3b4654678318c58c6dbf2,b134f0b7ed25cc877f2b83e96a17103694cc4029..b739d398bb29bc6951e777827cd56c982cedb452
@@@ -234,11 -234,11 +234,11 @@@ int __init arch_early_irq_init(void
                zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_KERNEL, node);
                /*
                 * For legacy IRQ's, start with assigning irq0 to irq15 to
 -               * IRQ0_VECTOR to IRQ15_VECTOR on cpu 0.
 +               * IRQ0_VECTOR to IRQ15_VECTOR for all cpu's.
                 */
                if (i < legacy_pic->nr_legacy_irqs) {
                        cfg[i].vector = IRQ0_VECTOR + i;
 -                      cpumask_set_cpu(0, cfg[i].domain);
 +                      cpumask_setall(cfg[i].domain);
                }
        }
  
@@@ -1141,8 -1141,7 +1141,8 @@@ __assign_irq_vector(int irq, struct irq
                         * allocation for the members that are not used anymore.
                         */
                        cpumask_andnot(cfg->old_domain, cfg->domain, tmp_mask);
 -                      cfg->move_in_progress = 1;
 +                      cfg->move_in_progress =
 +                         cpumask_intersects(cfg->old_domain, cpu_online_mask);
                        cpumask_and(cfg->domain, cfg->domain, tmp_mask);
                        break;
                }
@@@ -1173,9 -1172,8 +1173,9 @@@ next
                current_vector = vector;
                current_offset = offset;
                if (cfg->vector) {
 -                      cfg->move_in_progress = 1;
                        cpumask_copy(cfg->old_domain, cfg->domain);
 +                      cfg->move_in_progress =
 +                         cpumask_intersects(cfg->old_domain, cpu_online_mask);
                }
                for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
                        per_cpu(vector_irq, new_cpu)[vector] = irq;
@@@ -1243,6 -1241,12 +1243,6 @@@ void __setup_vector_irq(int cpu
                cfg = irq_get_chip_data(irq);
                if (!cfg)
                        continue;
 -              /*
 -               * If it is a legacy IRQ handled by the legacy PIC, this cpu
 -               * will be part of the irq_cfg's domain.
 -               */
 -              if (irq < legacy_pic->nr_legacy_irqs && !IO_APIC_IRQ(irq))
 -                      cpumask_set_cpu(cpu, cfg->domain);
  
                if (!cpumask_test_cpu(cpu, cfg->domain))
                        continue;
@@@ -1352,6 -1356,16 +1352,6 @@@ static void setup_ioapic_irq(unsigned i
        if (!IO_APIC_IRQ(irq))
                return;
  
 -      /*
 -       * For legacy irqs, cfg->domain starts with cpu 0. Now that IO-APIC
 -       * can handle this irq and the apic driver is finialized at this point,
 -       * update the cfg->domain.
 -       */
 -      if (irq < legacy_pic->nr_legacy_irqs &&
 -          cpumask_equal(cfg->domain, cpumask_of(0)))
 -              apic->vector_allocation_domain(0, cfg->domain,
 -                                             apic->target_cpus());
 -
        if (assign_irq_vector(irq, cfg, apic->target_cpus()))
                return;
  
@@@ -2185,11 -2199,9 +2185,11 @@@ static int ioapic_retrigger_irq(struct 
  {
        struct irq_cfg *cfg = data->chip_data;
        unsigned long flags;
 +      int cpu;
  
        raw_spin_lock_irqsave(&vector_lock, flags);
 -      apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector);
 +      cpu = cpumask_first_and(cfg->domain, cpu_online_mask);
 +      apic->send_IPI_mask(cpumask_of(cpu), cfg->vector);
        raw_spin_unlock_irqrestore(&vector_lock, flags);
  
        return 1;
@@@ -3305,8 -3317,9 +3305,9 @@@ int arch_setup_hpet_msi(unsigned int ir
        int ret;
  
        if (irq_remapping_enabled) {
-               if (!setup_hpet_msi_remapped(irq, id))
-                       return -1;
+               ret = setup_hpet_msi_remapped(irq, id);
+               if (ret)
+                       return ret;
        }
  
        ret = msi_compose_msg(NULL, irq, &msg, id);