2 * linux/arch/arm/mach-tegra/platsmp.c
4 * Copyright (C) 2002 ARM Ltd.
7 * Copyright (C) 2009 Palm
10 * Copyright (C) 2010 NVIDIA Corporation
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
16 #include <linux/init.h>
17 #include <linux/errno.h>
18 #include <linux/delay.h>
19 #include <linux/device.h>
20 #include <linux/jiffies.h>
21 #include <linux/smp.h>
23 #include <linux/completion.h>
24 #include <linux/sched.h>
25 #include <linux/cpu.h>
26 #include <linux/slab.h>
28 #include <asm/cacheflush.h>
29 #include <mach/hardware.h>
30 #include <asm/mach-types.h>
31 #include <asm/localtimer.h>
32 #include <asm/tlbflush.h>
33 #include <asm/smp_scu.h>
35 #include <asm/mmu_context.h>
36 #include <asm/pgalloc.h>
38 #include <mach/iomap.h>
42 extern void tegra_secondary_startup(void);
44 static DEFINE_SPINLOCK(boot_lock);
45 static void __iomem *scu_base = IO_ADDRESS(TEGRA_ARM_PERIF_BASE);
46 extern void __cortex_a9_restore(void);
47 extern void __shut_off_mmu(void);
49 #ifdef CONFIG_HOTPLUG_CPU
50 static DEFINE_PER_CPU(struct completion, cpu_killed);
51 extern void tegra_hotplug_startup(void);
54 static DECLARE_BITMAP(cpu_init_bits, CONFIG_NR_CPUS) __read_mostly;
55 const struct cpumask *const cpu_init_mask = to_cpumask(cpu_init_bits);
56 #define cpu_init_map (*(cpumask_t *)cpu_init_mask)
58 #define EVP_CPU_RESET_VECTOR \
59 (IO_ADDRESS(TEGRA_EXCEPTION_VECTORS_BASE) + 0x100)
60 #define CLK_RST_CONTROLLER_CLK_CPU_CMPLX \
61 (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x4c)
62 #define CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET \
63 (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x340)
64 #define CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR \
65 (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x344)
67 unsigned long tegra_pgd_phys; /* pgd used by hotplug & LP2 bootup */
68 static pgd_t *tegra_pgd;
69 void *tegra_context_area = NULL;
71 void __cpuinit platform_secondary_init(unsigned int cpu)
74 gic_cpu_init(0, IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x100);
76 * Synchronise with the boot thread.
78 spin_lock(&boot_lock);
79 #ifdef CONFIG_HOTPLUG_CPU
80 cpu_set(cpu, cpu_init_map);
81 INIT_COMPLETION(per_cpu(cpu_killed, cpu));
83 spin_unlock(&boot_lock);
86 int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
88 unsigned long old_boot_vector;
89 unsigned long boot_vector;
90 unsigned long timeout;
94 * set synchronisation state between this boot processor
95 * and the secondary one
97 spin_lock(&boot_lock);
99 /* set the reset vector to point to the secondary_startup routine */
100 #ifdef CONFIG_HOTPLUG_CPU
101 if (cpumask_test_cpu(cpu, cpu_init_mask))
102 boot_vector = virt_to_phys(tegra_hotplug_startup);
105 boot_vector = virt_to_phys(tegra_secondary_startup);
109 old_boot_vector = readl(EVP_CPU_RESET_VECTOR);
110 writel(boot_vector, EVP_CPU_RESET_VECTOR);
112 /* enable cpu clock on cpu */
113 reg = readl(CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
114 writel(reg & ~(1<<(8+cpu)), CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
117 writel(reg, CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR);
120 writel(0, IO_ADDRESS(TEGRA_FLOW_CTRL_BASE) + 0x14 + 0x8*(cpu-1));
122 timeout = jiffies + HZ;
123 while (time_before(jiffies, timeout)) {
124 if (readl(EVP_CPU_RESET_VECTOR) != boot_vector)
129 /* put the old boot vector back */
130 writel(old_boot_vector, EVP_CPU_RESET_VECTOR);
133 * now the secondary core is starting up let it run its
134 * calibrations, then wait for it to finish
136 spin_unlock(&boot_lock);
142 * Initialise the CPU possible map early - this describes the CPUs
143 * which may be present or become present in the system.
145 void __init smp_init_cpus(void)
147 unsigned int i, ncores = scu_get_core_count(scu_base);
149 for (i = 0; i < ncores; i++)
150 cpu_set(i, cpu_possible_map);
153 static int create_suspend_pgtable(void)
157 /* arrays of virtual-to-physical mappings which must be
158 * present to safely boot hotplugged / LP2-idled CPUs.
159 * tegra_hotplug_startup (hotplug reset vector) is mapped
160 * VA=PA so that the translation post-MMU is the same as
161 * pre-MMU, IRAM is mapped VA=PA so that SDRAM self-refresh
162 * can safely disable the MMU */
163 unsigned long addr_v[] = {
166 (unsigned long)tegra_context_area,
167 (unsigned long)virt_to_phys(tegra_hotplug_startup),
168 (unsigned long)__cortex_a9_restore,
169 (unsigned long)virt_to_phys(__shut_off_mmu),
171 unsigned long addr_p[] = {
174 (unsigned long)virt_to_phys(tegra_context_area),
175 (unsigned long)virt_to_phys(tegra_hotplug_startup),
176 (unsigned long)virt_to_phys(__cortex_a9_restore),
177 (unsigned long)virt_to_phys(__shut_off_mmu),
179 unsigned int flags = PMD_TYPE_SECT | PMD_SECT_AP_WRITE |
180 PMD_SECT_WBWA | PMD_SECT_S;
182 tegra_pgd = pgd_alloc(&init_mm);
186 for (i=0; i<ARRAY_SIZE(addr_p); i++) {
187 unsigned long v = addr_v[i];
188 pmd = pmd_offset(tegra_pgd + pgd_index(v), v);
189 *pmd = __pmd((addr_p[i] & PGDIR_MASK) | flags);
190 flush_pmd_entry(pmd);
191 outer_clean_range(__pa(pmd), __pa(pmd + 1));
194 tegra_pgd_phys = virt_to_phys(tegra_pgd);
195 __cpuc_flush_dcache_area(&tegra_pgd_phys,
196 sizeof(tegra_pgd_phys));
197 outer_clean_range(__pa(&tegra_pgd_phys),
198 __pa(&tegra_pgd_phys+1));
200 __cpuc_flush_dcache_area(&tegra_context_area,
201 sizeof(tegra_context_area));
202 outer_clean_range(__pa(&tegra_context_area),
203 __pa(&tegra_context_area+1));
208 void __init smp_prepare_cpus(unsigned int max_cpus)
210 unsigned int ncores = scu_get_core_count(scu_base);
211 unsigned int cpu = smp_processor_id();
214 smp_store_cpu_info(cpu);
217 * are we trying to boot more cores than exist?
219 if (max_cpus > ncores)
222 tegra_context_area = kzalloc(CONTEXT_SIZE_BYTES * ncores, GFP_KERNEL);
224 if (tegra_context_area && create_suspend_pgtable()) {
225 kfree(tegra_context_area);
226 tegra_context_area = NULL;
230 * Initialise the present map, which describes the set of CPUs
231 * actually populated at the present time.
233 for (i = 0; i < max_cpus; i++)
234 set_cpu_present(i, true);
236 #ifdef CONFIG_HOTPLUG_CPU
237 for_each_present_cpu(i) {
238 init_completion(&per_cpu(cpu_killed, i));
243 * Initialise the SCU if there are more than one CPU and let
244 * them know where to start. Note that, on modern versions of
245 * MILO, the "poke" doesn't actually do anything until each
246 * individual core is sent a soft interrupt to get it out of
250 percpu_timer_setup();
251 scu_enable(scu_base);
255 #ifdef CONFIG_HOTPLUG_CPU
257 extern void vfp_sync_state(struct thread_info *thread);
259 void __cpuinit secondary_start_kernel(void);
261 int platform_cpu_kill(unsigned int cpu)
266 e = wait_for_completion_timeout(&per_cpu(cpu_killed, cpu), 100);
267 printk(KERN_NOTICE "CPU%u: %s shutdown\n", cpu, (e) ? "clean":"forced");
271 reg = readl(CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET);
273 } while (!(reg & (1<<cpu)));
275 writel(0x1111<<cpu, CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET);
276 /* put flow controller in WAIT_EVENT mode */
277 writel(2<<29, IO_ADDRESS(TEGRA_FLOW_CTRL_BASE)+0x14 + 0x8*(cpu-1));
279 spin_lock(&boot_lock);
280 reg = readl(CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
281 writel(reg | (1<<(8+cpu)), CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
282 spin_unlock(&boot_lock);
286 void platform_cpu_die(unsigned int cpu)
289 unsigned int this_cpu = hard_smp_processor_id();
291 if (cpu != this_cpu) {
292 printk(KERN_CRIT "Eek! platform_cpu_die running on %u, should be %u\n",
300 complete(&per_cpu(cpu_killed, cpu));
305 /* return happens from __cortex_a9_restore */
307 writel(smp_processor_id(), EVP_CPU_RESET_VECTOR);
310 int platform_cpu_disable(unsigned int cpu)
313 * we don't allow CPU 0 to be shutdown (it is still too special
314 * e.g. clock tick interrupts)
316 if (unlikely(!tegra_context_area))
319 return cpu == 0 ? -EPERM : 0;