2 * linux/arch/arm/kernel/smp.c
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/module.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/spinlock.h>
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/cache.h>
17 #include <linux/profile.h>
18 #include <linux/errno.h>
19 #include <linux/ftrace.h>
21 #include <linux/err.h>
22 #include <linux/cpu.h>
23 #include <linux/smp.h>
24 #include <linux/seq_file.h>
25 #include <linux/irq.h>
26 #include <linux/percpu.h>
27 #include <linux/clockchips.h>
28 #include <linux/completion.h>
30 #include <asm/atomic.h>
31 #include <asm/cacheflush.h>
33 #include <asm/cputype.h>
34 #include <asm/mmu_context.h>
35 #include <asm/pgtable.h>
36 #include <asm/pgalloc.h>
37 #include <asm/processor.h>
38 #include <asm/sections.h>
39 #include <asm/tlbflush.h>
40 #include <asm/ptrace.h>
41 #include <asm/localtimer.h>
44 * as from 2.5, kernels no longer have an init_tasks structure
45 * so we need some other way of telling a new secondary core
46 * where to place its SVC stack
48 struct secondary_data secondary_data;
59 int __cpuinit __cpu_up(unsigned int cpu)
61 struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
62 struct task_struct *idle = ci->idle;
67 * Spawn a new process manually, if not already done.
68 * Grab a pointer to its task struct so we can mess with it
71 idle = fork_idle(cpu);
73 printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
79 * Since this idle thread is being re-used, call
80 * init_idle() to reinitialize the thread structure.
86 * Allocate initial page tables to allow the new CPU to
87 * enable the MMU safely. This essentially means a set
88 * of our "standard" page tables, with the addition of
89 * a 1:1 mapping for the physical address of the kernel.
91 pgd = pgd_alloc(&init_mm);
95 if (PHYS_OFFSET != PAGE_OFFSET) {
96 #ifndef CONFIG_HOTPLUG_CPU
97 identity_mapping_add(pgd, __pa(__init_begin), __pa(__init_end));
99 identity_mapping_add(pgd, __pa(_stext), __pa(_etext));
100 identity_mapping_add(pgd, __pa(_sdata), __pa(_edata));
104 * We need to tell the secondary core where to find
105 * its stack and the page tables.
107 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
108 secondary_data.pgdir = virt_to_phys(pgd);
109 secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir);
110 __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
111 outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
114 * Now bring the CPU into our world.
116 ret = boot_secondary(cpu, idle);
118 unsigned long timeout;
121 * CPU was successfully started, wait for it
122 * to come online or time out.
124 timeout = jiffies + HZ;
125 while (time_before(jiffies, timeout)) {
133 if (!cpu_online(cpu)) {
134 pr_crit("CPU%u: failed to come online\n", cpu);
138 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
141 secondary_data.stack = NULL;
142 secondary_data.pgdir = 0;
144 if (PHYS_OFFSET != PAGE_OFFSET) {
145 #ifndef CONFIG_HOTPLUG_CPU
146 identity_mapping_del(pgd, __pa(__init_begin), __pa(__init_end));
148 identity_mapping_del(pgd, __pa(_stext), __pa(_etext));
149 identity_mapping_del(pgd, __pa(_sdata), __pa(_edata));
152 pgd_free(&init_mm, pgd);
157 #ifdef CONFIG_HOTPLUG_CPU
158 static void percpu_timer_stop(void);
161 * __cpu_disable runs on the processor to be shutdown.
163 int __cpu_disable(void)
165 unsigned int cpu = smp_processor_id();
166 struct task_struct *p;
169 ret = platform_cpu_disable(cpu);
174 * Take this CPU offline. Once we clear this, we can't return,
175 * and we must not schedule until we're ready to give up the cpu.
177 set_cpu_online(cpu, false);
180 * OK - migrate IRQs away from this CPU
185 * Stop the local timer for this CPU.
190 * Flush user cache and TLB mappings, and then remove this CPU
191 * from the vm mask set of all processes.
194 local_flush_tlb_all();
196 read_lock(&tasklist_lock);
197 for_each_process(p) {
199 cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
201 read_unlock(&tasklist_lock);
206 static DECLARE_COMPLETION(cpu_died);
209 * called on the thread which is asking for a CPU to be shutdown -
210 * waits until shutdown has completed, or it is timed out.
212 void __cpu_die(unsigned int cpu)
214 if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
215 pr_err("CPU%u: cpu didn't die\n", cpu);
218 printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
220 if (!platform_cpu_kill(cpu))
221 printk("CPU%u: unable to kill\n", cpu);
225 * Called from the idle thread for the CPU which has been shutdown.
227 * Note that we disable IRQs here, but do not re-enable them
228 * before returning to the caller. This is also the behaviour
229 * of the other hotplug-cpu capable cores, so presumably coming
230 * out of idle fixes this.
232 void __ref cpu_die(void)
234 unsigned int cpu = smp_processor_id();
241 /* Tell __cpu_die() that this CPU is now safe to dispose of */
245 * actual CPU shutdown procedure is at least platform (if not
248 platform_cpu_die(cpu);
251 * Do not return to the idle loop - jump back to the secondary
252 * cpu initialisation. There's some initialisation which needs
253 * to be repeated to undo the effects of taking the CPU offline.
255 __asm__("mov sp, %0\n"
257 " b secondary_start_kernel"
259 : "r" (task_stack_page(current) + THREAD_SIZE - 8));
261 #endif /* CONFIG_HOTPLUG_CPU */
264 * Called by both boot and secondaries to move global data into
265 * per-processor storage.
267 static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
269 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
271 cpu_info->loops_per_jiffy = loops_per_jiffy;
275 * This is the secondary CPU boot entry. We're using this CPUs
276 * idle thread stack, but a set of temporary page tables.
278 asmlinkage void __cpuinit secondary_start_kernel(void)
280 struct mm_struct *mm = &init_mm;
281 unsigned int cpu = smp_processor_id();
284 * All kernel threads share the same mm context; grab a
285 * reference and switch to it.
287 atomic_inc(&mm->mm_count);
288 current->active_mm = mm;
289 cpumask_set_cpu(cpu, mm_cpumask(mm));
290 cpu_switch_mm(mm->pgd, mm);
291 enter_lazy_tlb(mm, current);
292 local_flush_tlb_all();
294 printk("CPU%u: Booted secondary processor\n", cpu);
298 trace_hardirqs_off();
301 * Give the platform a chance to do its own initialisation.
303 platform_secondary_init(cpu);
305 notify_cpu_starting(cpu);
307 #ifndef CONFIG_PLAT_RK
311 smp_store_cpu_info(cpu);
314 * OK, now it's safe to let the boot CPU continue. Wait for
315 * the CPU migration code to notice that the CPU is online
316 * before we continue.
318 set_cpu_online(cpu, true);
321 * Setup the percpu timer for this CPU.
323 percpu_timer_setup();
329 * OK, it's off to the idle thread for us
334 void __init smp_cpus_done(unsigned int max_cpus)
337 unsigned long bogosum = 0;
339 for_each_online_cpu(cpu)
340 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
342 printk(KERN_INFO "SMP: Total of %d processors activated "
343 "(%lu.%02lu BogoMIPS).\n",
345 bogosum / (500000/HZ),
346 (bogosum / (5000/HZ)) % 100);
349 void __init smp_prepare_boot_cpu(void)
351 unsigned int cpu = smp_processor_id();
353 per_cpu(cpu_data, cpu).idle = current;
356 void __init smp_prepare_cpus(unsigned int max_cpus)
358 unsigned int ncores = num_possible_cpus();
360 smp_store_cpu_info(smp_processor_id());
363 * are we trying to boot more cores than exist?
365 if (max_cpus > ncores)
370 * Enable the local timer or broadcast device for the
371 * boot CPU, but only if we have more than one CPU.
373 percpu_timer_setup();
376 * Initialise the SCU if there are more than one CPU
377 * and let them know where to start.
379 platform_smp_prepare_cpus(max_cpus);
383 static void (*smp_cross_call)(const struct cpumask *, unsigned int);
385 void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
390 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
392 smp_cross_call(mask, IPI_CALL_FUNC);
395 void arch_send_call_function_single_ipi(int cpu)
397 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
400 static const char *ipi_types[NR_IPI] = {
401 #define S(x,s) [x - IPI_TIMER] = s
402 S(IPI_TIMER, "Timer broadcast interrupts"),
403 S(IPI_RESCHEDULE, "Rescheduling interrupts"),
404 S(IPI_CALL_FUNC, "Function call interrupts"),
405 S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
406 S(IPI_CPU_STOP, "CPU stop interrupts"),
407 S(IPI_CPU_BACKTRACE, "CPU backtrace"),
410 void show_ipi_list(struct seq_file *p, int prec)
414 for (i = 0; i < NR_IPI; i++) {
415 seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
417 for_each_present_cpu(cpu)
418 seq_printf(p, "%10u ",
419 __get_irq_stat(cpu, ipi_irqs[i]));
421 seq_printf(p, " %s\n", ipi_types[i]);
425 u64 smp_irq_stat_cpu(unsigned int cpu)
430 for (i = 0; i < NR_IPI; i++)
431 sum += __get_irq_stat(cpu, ipi_irqs[i]);
433 #ifdef CONFIG_LOCAL_TIMERS
434 sum += __get_irq_stat(cpu, local_timer_irqs);
441 * Timer (local or broadcast) support
443 static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
445 static void ipi_timer(void)
447 struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
448 evt->event_handler(evt);
451 #ifdef CONFIG_LOCAL_TIMERS
452 asmlinkage void __exception_irq_entry do_local_timer(struct pt_regs *regs)
454 struct pt_regs *old_regs = set_irq_regs(regs);
455 int cpu = smp_processor_id();
457 if (local_timer_ack()) {
458 __inc_irq_stat(cpu, local_timer_irqs);
464 set_irq_regs(old_regs);
467 void show_local_irqs(struct seq_file *p, int prec)
471 seq_printf(p, "%*s: ", prec, "LOC");
473 for_each_present_cpu(cpu)
474 seq_printf(p, "%10u ", __get_irq_stat(cpu, local_timer_irqs));
476 seq_printf(p, " Local timer interrupts\n");
480 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
481 static void smp_timer_broadcast(const struct cpumask *mask)
483 smp_cross_call(mask, IPI_TIMER);
486 #define smp_timer_broadcast NULL
489 static void broadcast_timer_set_mode(enum clock_event_mode mode,
490 struct clock_event_device *evt)
494 static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
496 evt->name = "dummy_timer";
497 evt->features = CLOCK_EVT_FEAT_ONESHOT |
498 CLOCK_EVT_FEAT_PERIODIC |
499 CLOCK_EVT_FEAT_DUMMY;
502 evt->set_mode = broadcast_timer_set_mode;
504 clockevents_register_device(evt);
507 void __cpuinit percpu_timer_setup(void)
509 unsigned int cpu = smp_processor_id();
510 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
512 evt->cpumask = cpumask_of(cpu);
513 evt->broadcast = smp_timer_broadcast;
515 if (local_timer_setup(evt))
516 broadcast_timer_setup(evt);
519 #ifdef CONFIG_HOTPLUG_CPU
521 * The generic clock events code purposely does not stop the local timer
522 * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it
525 static void percpu_timer_stop(void)
527 unsigned int cpu = smp_processor_id();
528 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
530 evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
534 static DEFINE_SPINLOCK(stop_lock);
537 * ipi_cpu_stop - handle IPI from smp_send_stop()
539 static void ipi_cpu_stop(unsigned int cpu)
541 if (system_state == SYSTEM_BOOTING ||
542 system_state == SYSTEM_RUNNING) {
543 spin_lock(&stop_lock);
544 printk(KERN_CRIT "CPU%u: stopping\n", cpu);
546 spin_unlock(&stop_lock);
549 set_cpu_online(cpu, false);
558 static cpumask_t backtrace_mask;
559 static DEFINE_RAW_SPINLOCK(backtrace_lock);
561 /* "in progress" flag of arch_trigger_all_cpu_backtrace */
562 static unsigned long backtrace_flag;
564 void smp_send_all_cpu_backtrace(void)
566 unsigned int this_cpu = smp_processor_id();
569 if (test_and_set_bit(0, &backtrace_flag))
571 * If there is already a trigger_all_cpu_backtrace() in progress
572 * (backtrace_flag == 1), don't output double cpu dump infos.
576 cpumask_copy(&backtrace_mask, cpu_online_mask);
577 cpu_clear(this_cpu, backtrace_mask);
579 pr_info("Backtrace for cpu %d (current):\n", this_cpu);
582 pr_info("\nsending IPI to all other CPUs:\n");
583 smp_cross_call(&backtrace_mask, IPI_CPU_BACKTRACE);
585 /* Wait for up to 10 seconds for all other CPUs to do the backtrace */
586 for (i = 0; i < 10 * 1000; i++) {
587 if (cpumask_empty(&backtrace_mask))
592 clear_bit(0, &backtrace_flag);
593 smp_mb__after_clear_bit();
597 * ipi_cpu_backtrace - handle IPI from smp_send_all_cpu_backtrace()
599 static void ipi_cpu_backtrace(unsigned int cpu, struct pt_regs *regs)
601 if (cpu_isset(cpu, backtrace_mask)) {
602 raw_spin_lock(&backtrace_lock);
603 pr_warning("IPI backtrace for cpu %d\n", cpu);
605 raw_spin_unlock(&backtrace_lock);
606 cpu_clear(cpu, backtrace_mask);
611 * Main handler for inter-processor interrupts
613 asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
615 unsigned int cpu = smp_processor_id();
616 struct pt_regs *old_regs = set_irq_regs(regs);
618 if (ipinr >= IPI_TIMER && ipinr < IPI_TIMER + NR_IPI)
619 __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_TIMER]);
634 generic_smp_call_function_interrupt();
638 case IPI_CALL_FUNC_SINGLE:
640 generic_smp_call_function_single_interrupt();
650 case IPI_CPU_BACKTRACE:
651 ipi_cpu_backtrace(cpu, regs);
655 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
659 set_irq_regs(old_regs);
662 void smp_send_reschedule(int cpu)
664 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
667 void smp_send_stop(void)
669 unsigned long timeout;
671 if (num_online_cpus() > 1) {
672 cpumask_t mask = cpu_online_map;
673 cpu_clear(smp_processor_id(), mask);
675 smp_cross_call(&mask, IPI_CPU_STOP);
678 /* Wait up to one second for other CPUs to stop */
679 timeout = USEC_PER_SEC;
680 while (num_online_cpus() > 1 && timeout--)
683 if (num_online_cpus() > 1)
684 pr_warning("SMP: failed to stop secondary CPUs\n");
690 int setup_profiling_timer(unsigned int multiplier)