2 * linux/arch/arm/kernel/smp.c
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/module.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/spinlock.h>
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/cache.h>
17 #include <linux/profile.h>
18 #include <linux/errno.h>
20 #include <linux/err.h>
21 #include <linux/cpu.h>
22 #include <linux/smp.h>
23 #include <linux/seq_file.h>
24 #include <linux/irq.h>
25 #include <linux/percpu.h>
26 #include <linux/clockchips.h>
27 #include <linux/completion.h>
28 #include <linux/cpufreq.h>
30 #include <linux/atomic.h>
31 #include <asm/cacheflush.h>
33 #include <asm/cputype.h>
34 #include <asm/exception.h>
35 #include <asm/idmap.h>
36 #include <asm/topology.h>
37 #include <asm/mmu_context.h>
38 #include <asm/pgtable.h>
39 #include <asm/pgalloc.h>
40 #include <asm/processor.h>
41 #include <asm/sections.h>
42 #include <asm/tlbflush.h>
43 #include <asm/ptrace.h>
44 #include <asm/localtimer.h>
45 #include <asm/smp_plat.h>
48 * as from 2.5, kernels no longer have an init_tasks structure
49 * so we need some other way of telling a new secondary core
50 * where to place its SVC stack
52 struct secondary_data secondary_data;
62 static DECLARE_COMPLETION(cpu_running);
64 int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
69 * We need to tell the secondary core where to find
70 * its stack and the page tables.
72 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
73 secondary_data.pgdir = virt_to_phys(idmap_pgd);
74 secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir);
75 __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
76 outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
79 * Now bring the CPU into our world.
81 ret = boot_secondary(cpu, idle);
84 * CPU was successfully started, wait for it
85 * to come online or time out.
87 wait_for_completion_timeout(&cpu_running,
88 msecs_to_jiffies(1000));
90 if (!cpu_online(cpu)) {
91 pr_crit("CPU%u: failed to come online\n", cpu);
95 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
98 secondary_data.stack = NULL;
99 secondary_data.pgdir = 0;
104 #ifdef CONFIG_HOTPLUG_CPU
105 static void percpu_timer_stop(void);
108 * __cpu_disable runs on the processor to be shutdown.
110 int __cpu_disable(void)
112 unsigned int cpu = smp_processor_id();
115 ret = platform_cpu_disable(cpu);
120 * Take this CPU offline. Once we clear this, we can't return,
121 * and we must not schedule until we're ready to give up the cpu.
123 set_cpu_online(cpu, false);
126 * OK - migrate IRQs away from this CPU
131 * Stop the local timer for this CPU.
136 * Flush user cache and TLB mappings, and then remove this CPU
137 * from the vm mask set of all processes.
140 local_flush_tlb_all();
142 clear_tasks_mm_cpumask(cpu);
147 static DECLARE_COMPLETION(cpu_died);
150 * called on the thread which is asking for a CPU to be shutdown -
151 * waits until shutdown has completed, or it is timed out.
153 void __cpu_die(unsigned int cpu)
155 if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
156 pr_err("CPU%u: cpu didn't die\n", cpu);
159 printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
161 if (!platform_cpu_kill(cpu))
162 printk("CPU%u: unable to kill\n", cpu);
166 * Called from the idle thread for the CPU which has been shutdown.
168 * Note that we disable IRQs here, but do not re-enable them
169 * before returning to the caller. This is also the behaviour
170 * of the other hotplug-cpu capable cores, so presumably coming
171 * out of idle fixes this.
173 void __ref cpu_die(void)
175 unsigned int cpu = smp_processor_id();
182 /* Tell __cpu_die() that this CPU is now safe to dispose of */
183 RCU_NONIDLE(complete(&cpu_died));
186 * actual CPU shutdown procedure is at least platform (if not
189 platform_cpu_die(cpu);
192 * Do not return to the idle loop - jump back to the secondary
193 * cpu initialisation. There's some initialisation which needs
194 * to be repeated to undo the effects of taking the CPU offline.
196 __asm__("mov sp, %0\n"
198 " b secondary_start_kernel"
200 : "r" (task_stack_page(current) + THREAD_SIZE - 8));
202 #endif /* CONFIG_HOTPLUG_CPU */
205 * Called by both boot and secondaries to move global data into
206 * per-processor storage.
208 static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
210 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
212 cpu_info->loops_per_jiffy = loops_per_jiffy;
214 store_cpu_topology(cpuid);
217 static void percpu_timer_setup(void);
220 * This is the secondary CPU boot entry. We're using this CPUs
221 * idle thread stack, but a set of temporary page tables.
223 asmlinkage void __cpuinit secondary_start_kernel(void)
225 struct mm_struct *mm = &init_mm;
226 unsigned int cpu = smp_processor_id();
229 * All kernel threads share the same mm context; grab a
230 * reference and switch to it.
232 atomic_inc(&mm->mm_count);
233 current->active_mm = mm;
234 cpumask_set_cpu(cpu, mm_cpumask(mm));
235 cpu_switch_mm(mm->pgd, mm);
236 enter_lazy_tlb(mm, current);
237 local_flush_tlb_all();
239 printk("CPU%u: Booted secondary processor\n", cpu);
243 trace_hardirqs_off();
246 * Give the platform a chance to do its own initialisation.
248 platform_secondary_init(cpu);
250 notify_cpu_starting(cpu);
254 smp_store_cpu_info(cpu);
257 * OK, now it's safe to let the boot CPU continue. Wait for
258 * the CPU migration code to notice that the CPU is online
259 * before we continue - which happens after __cpu_up returns.
261 set_cpu_online(cpu, true);
262 complete(&cpu_running);
265 * Setup the percpu timer for this CPU.
267 percpu_timer_setup();
273 * OK, it's off to the idle thread for us
278 void __init smp_cpus_done(unsigned int max_cpus)
281 unsigned long bogosum = 0;
283 for_each_online_cpu(cpu)
284 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
286 printk(KERN_INFO "SMP: Total of %d processors activated "
287 "(%lu.%02lu BogoMIPS).\n",
289 bogosum / (500000/HZ),
290 (bogosum / (5000/HZ)) % 100);
293 void __init smp_prepare_boot_cpu(void)
297 void __init smp_prepare_cpus(unsigned int max_cpus)
299 unsigned int ncores = num_possible_cpus();
303 smp_store_cpu_info(smp_processor_id());
306 * are we trying to boot more cores than exist?
308 if (max_cpus > ncores)
310 if (ncores > 1 && max_cpus) {
312 * Enable the local timer or broadcast device for the
313 * boot CPU, but only if we have more than one CPU.
315 percpu_timer_setup();
318 * Initialise the present map, which describes the set of CPUs
319 * actually populated at the present time. A platform should
320 * re-initialize the map in platform_smp_prepare_cpus() if
321 * present != possible (e.g. physical hotplug).
323 init_cpu_present(cpu_possible_mask);
326 * Initialise the SCU if there are more than one CPU
327 * and let them know where to start.
329 platform_smp_prepare_cpus(max_cpus);
333 static void (*smp_cross_call)(const struct cpumask *, unsigned int);
335 void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
340 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
342 smp_cross_call(mask, IPI_CALL_FUNC);
345 void arch_send_call_function_single_ipi(int cpu)
347 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
350 static const char *ipi_types[NR_IPI] = {
351 #define S(x,s) [x - IPI_TIMER] = s
352 S(IPI_TIMER, "Timer broadcast interrupts"),
353 S(IPI_RESCHEDULE, "Rescheduling interrupts"),
354 S(IPI_CALL_FUNC, "Function call interrupts"),
355 S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
356 S(IPI_CPU_STOP, "CPU stop interrupts"),
359 void show_ipi_list(struct seq_file *p, int prec)
363 for (i = 0; i < NR_IPI; i++) {
364 seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
366 for_each_present_cpu(cpu)
367 seq_printf(p, "%10u ",
368 __get_irq_stat(cpu, ipi_irqs[i]));
370 seq_printf(p, " %s\n", ipi_types[i]);
374 u64 smp_irq_stat_cpu(unsigned int cpu)
379 for (i = 0; i < NR_IPI; i++)
380 sum += __get_irq_stat(cpu, ipi_irqs[i]);
386 * Timer (local or broadcast) support
388 static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
390 static void ipi_timer(void)
392 struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
393 evt->event_handler(evt);
396 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
397 static void smp_timer_broadcast(const struct cpumask *mask)
399 smp_cross_call(mask, IPI_TIMER);
402 #define smp_timer_broadcast NULL
405 static void broadcast_timer_set_mode(enum clock_event_mode mode,
406 struct clock_event_device *evt)
410 static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
412 evt->name = "dummy_timer";
413 evt->features = CLOCK_EVT_FEAT_ONESHOT |
414 CLOCK_EVT_FEAT_PERIODIC |
415 CLOCK_EVT_FEAT_DUMMY;
418 evt->set_mode = broadcast_timer_set_mode;
420 clockevents_register_device(evt);
423 static struct local_timer_ops *lt_ops;
425 #ifdef CONFIG_LOCAL_TIMERS
426 int local_timer_register(struct local_timer_ops *ops)
428 if (!is_smp() || !setup_max_cpus)
439 static void __cpuinit percpu_timer_setup(void)
441 unsigned int cpu = smp_processor_id();
442 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
444 evt->cpumask = cpumask_of(cpu);
445 evt->broadcast = smp_timer_broadcast;
447 if (!lt_ops || lt_ops->setup(evt))
448 broadcast_timer_setup(evt);
451 #ifdef CONFIG_HOTPLUG_CPU
453 * The generic clock events code purposely does not stop the local timer
454 * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it
457 static void percpu_timer_stop(void)
459 unsigned int cpu = smp_processor_id();
460 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
467 static DEFINE_RAW_SPINLOCK(stop_lock);
470 * ipi_cpu_stop - handle IPI from smp_send_stop()
472 static void ipi_cpu_stop(unsigned int cpu)
474 if (system_state == SYSTEM_BOOTING ||
475 system_state == SYSTEM_RUNNING) {
476 raw_spin_lock(&stop_lock);
477 printk(KERN_CRIT "CPU%u: stopping\n", cpu);
479 raw_spin_unlock(&stop_lock);
482 set_cpu_online(cpu, false);
492 * Main handler for inter-processor interrupts
494 asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
496 handle_IPI(ipinr, regs);
499 void handle_IPI(int ipinr, struct pt_regs *regs)
501 unsigned int cpu = smp_processor_id();
502 struct pt_regs *old_regs = set_irq_regs(regs);
504 if (ipinr >= IPI_TIMER && ipinr < IPI_TIMER + NR_IPI)
505 __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_TIMER]);
520 generic_smp_call_function_interrupt();
524 case IPI_CALL_FUNC_SINGLE:
526 generic_smp_call_function_single_interrupt();
537 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
541 set_irq_regs(old_regs);
544 void smp_send_reschedule(int cpu)
546 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
549 #ifdef CONFIG_HOTPLUG_CPU
550 static void smp_kill_cpus(cpumask_t *mask)
553 for_each_cpu(cpu, mask)
554 platform_cpu_kill(cpu);
557 static void smp_kill_cpus(cpumask_t *mask) { }
560 void smp_send_stop(void)
562 unsigned long timeout;
565 cpumask_copy(&mask, cpu_online_mask);
566 cpumask_clear_cpu(smp_processor_id(), &mask);
567 if (!cpumask_empty(&mask))
568 smp_cross_call(&mask, IPI_CPU_STOP);
570 /* Wait up to one second for other CPUs to stop */
571 timeout = USEC_PER_SEC;
572 while (num_online_cpus() > 1 && timeout--)
575 if (num_online_cpus() > 1)
576 pr_warning("SMP: failed to stop secondary CPUs\n");
578 smp_kill_cpus(&mask);
584 int setup_profiling_timer(unsigned int multiplier)
589 #ifdef CONFIG_CPU_FREQ
591 static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
592 static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
593 static unsigned long global_l_p_j_ref;
594 static unsigned long global_l_p_j_ref_freq;
596 static int cpufreq_callback(struct notifier_block *nb,
597 unsigned long val, void *data)
599 struct cpufreq_freqs *freq = data;
602 if (freq->flags & CPUFREQ_CONST_LOOPS)
605 if (!per_cpu(l_p_j_ref, cpu)) {
606 per_cpu(l_p_j_ref, cpu) =
607 per_cpu(cpu_data, cpu).loops_per_jiffy;
608 per_cpu(l_p_j_ref_freq, cpu) = freq->old;
609 if (!global_l_p_j_ref) {
610 global_l_p_j_ref = loops_per_jiffy;
611 global_l_p_j_ref_freq = freq->old;
615 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
616 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
617 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
618 loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
619 global_l_p_j_ref_freq,
621 per_cpu(cpu_data, cpu).loops_per_jiffy =
622 cpufreq_scale(per_cpu(l_p_j_ref, cpu),
623 per_cpu(l_p_j_ref_freq, cpu),
629 static struct notifier_block cpufreq_notifier = {
630 .notifier_call = cpufreq_callback,
633 static int __init register_cpufreq_notifier(void)
635 return cpufreq_register_notifier(&cpufreq_notifier,
636 CPUFREQ_TRANSITION_NOTIFIER);
638 core_initcall(register_cpufreq_notifier);