Merge commit 'v2.6.36-rc8' into linux-tegra-2.6.36
[firefly-linux-kernel-4.4.55.git] / arch / arm / mach-tegra / platsmp.c
1 /*
2  *  linux/arch/arm/mach-tegra/platsmp.c
3  *
4  *  Copyright (C) 2002 ARM Ltd.
5  *  All Rights Reserved
6  *
7  *  Copyright (C) 2009 Palm
8  *  All Rights Reserved
9  *
10  *  Copyright (C) 2010 NVIDIA Corporation
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  */
16 #include <linux/init.h>
17 #include <linux/errno.h>
18 #include <linux/delay.h>
19 #include <linux/device.h>
20 #include <linux/jiffies.h>
21 #include <linux/smp.h>
22 #include <linux/io.h>
23 #include <linux/completion.h>
24 #include <linux/sched.h>
25 #include <linux/cpu.h>
26 #include <linux/slab.h>
27
28 #include <asm/cacheflush.h>
29 #include <mach/hardware.h>
30 #include <asm/mach-types.h>
31 #include <asm/localtimer.h>
32 #include <asm/tlbflush.h>
33 #include <asm/smp_scu.h>
34 #include <asm/cpu.h>
35 #include <asm/mmu_context.h>
36 #include <asm/pgalloc.h>
37
38 #include <mach/iomap.h>
39
40 #include "power.h"
41
42 extern void tegra_secondary_startup(void);
43
44 static DEFINE_SPINLOCK(boot_lock);
45 static void __iomem *scu_base = IO_ADDRESS(TEGRA_ARM_PERIF_BASE);
46 extern void __cortex_a9_restore(void);
47 extern void __shut_off_mmu(void);
48
49 #ifdef CONFIG_HOTPLUG_CPU
50 static DEFINE_PER_CPU(struct completion, cpu_killed);
51 extern void tegra_hotplug_startup(void);
52 #endif
53
54 static DECLARE_BITMAP(cpu_init_bits, CONFIG_NR_CPUS) __read_mostly;
55 const struct cpumask *const cpu_init_mask = to_cpumask(cpu_init_bits);
56 #define cpu_init_map (*(cpumask_t *)cpu_init_mask)
57
58 #define EVP_CPU_RESET_VECTOR \
59         (IO_ADDRESS(TEGRA_EXCEPTION_VECTORS_BASE) + 0x100)
60 #define CLK_RST_CONTROLLER_CLK_CPU_CMPLX \
61         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x4c)
62 #define CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET \
63         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x340)
64 #define CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR \
65         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x344)
66
67 unsigned long tegra_pgd_phys;  /* pgd used by hotplug & LP2 bootup */
68 static pgd_t *tegra_pgd;
69 void *tegra_context_area = NULL;
70
71 void __cpuinit platform_secondary_init(unsigned int cpu)
72 {
73         trace_hardirqs_off();
74         gic_cpu_init(0, IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x100);
75         /*
76          * Synchronise with the boot thread.
77          */
78         spin_lock(&boot_lock);
79 #ifdef CONFIG_HOTPLUG_CPU
80         cpu_set(cpu, cpu_init_map);
81         INIT_COMPLETION(per_cpu(cpu_killed, cpu));
82 #endif
83         spin_unlock(&boot_lock);
84 }
85
86 int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
87 {
88         unsigned long old_boot_vector;
89         unsigned long boot_vector;
90         unsigned long timeout;
91         u32 reg;
92
93         /*
94          * set synchronisation state between this boot processor
95          * and the secondary one
96          */
97         spin_lock(&boot_lock);
98
99         /* set the reset vector to point to the secondary_startup routine */
100 #ifdef CONFIG_HOTPLUG_CPU
101         if (cpumask_test_cpu(cpu, cpu_init_mask))
102                 boot_vector = virt_to_phys(tegra_hotplug_startup);
103         else
104 #endif
105                 boot_vector = virt_to_phys(tegra_secondary_startup);
106
107         smp_wmb();
108
109         old_boot_vector = readl(EVP_CPU_RESET_VECTOR);
110         writel(boot_vector, EVP_CPU_RESET_VECTOR);
111
112         /* enable cpu clock on cpu */
113         reg = readl(CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
114         writel(reg & ~(1<<(8+cpu)), CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
115
116         reg = 0x1111<<cpu;
117         writel(reg, CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR);
118
119         /* unhalt the cpu */
120         writel(0, IO_ADDRESS(TEGRA_FLOW_CTRL_BASE) + 0x14 + 0x8*(cpu-1));
121
122         timeout = jiffies + HZ;
123         while (time_before(jiffies, timeout)) {
124                 if (readl(EVP_CPU_RESET_VECTOR) != boot_vector)
125                         break;
126                 udelay(10);
127         }
128
129         /* put the old boot vector back */
130         writel(old_boot_vector, EVP_CPU_RESET_VECTOR);
131
132         /*
133          * now the secondary core is starting up let it run its
134          * calibrations, then wait for it to finish
135          */
136         spin_unlock(&boot_lock);
137
138         return 0;
139 }
140
141 /*
142  * Initialise the CPU possible map early - this describes the CPUs
143  * which may be present or become present in the system.
144  */
145 void __init smp_init_cpus(void)
146 {
147         unsigned int i, ncores = scu_get_core_count(scu_base);
148
149         for (i = 0; i < ncores; i++)
150                 cpu_set(i, cpu_possible_map);
151 }
152
153 static int create_suspend_pgtable(void)
154 {
155         int i;
156         pmd_t *pmd;
157         /* arrays of virtual-to-physical mappings which must be
158          * present to safely boot hotplugged / LP2-idled CPUs.
159          * tegra_hotplug_startup (hotplug reset vector) is mapped
160          * VA=PA so that the translation post-MMU is the same as
161          * pre-MMU, IRAM is mapped VA=PA so that SDRAM self-refresh
162          * can safely disable the MMU */
163         unsigned long addr_v[] = {
164                 PHYS_OFFSET,
165                 IO_IRAM_PHYS,
166                 (unsigned long)tegra_context_area,
167                 (unsigned long)virt_to_phys(tegra_hotplug_startup),
168                 (unsigned long)__cortex_a9_restore,
169                 (unsigned long)virt_to_phys(__shut_off_mmu),
170         };
171         unsigned long addr_p[] = {
172                 PHYS_OFFSET,
173                 IO_IRAM_PHYS,
174                 (unsigned long)virt_to_phys(tegra_context_area),
175                 (unsigned long)virt_to_phys(tegra_hotplug_startup),
176                 (unsigned long)virt_to_phys(__cortex_a9_restore),
177                 (unsigned long)virt_to_phys(__shut_off_mmu),
178         };
179         unsigned int flags = PMD_TYPE_SECT | PMD_SECT_AP_WRITE |
180                 PMD_SECT_WBWA | PMD_SECT_S;
181
182         tegra_pgd = pgd_alloc(&init_mm);
183         if (!tegra_pgd)
184                 return -ENOMEM;
185
186         for (i=0; i<ARRAY_SIZE(addr_p); i++) {
187                 unsigned long v = addr_v[i];
188                 pmd = pmd_offset(tegra_pgd + pgd_index(v), v);
189                 *pmd = __pmd((addr_p[i] & PGDIR_MASK) | flags);
190                 flush_pmd_entry(pmd);
191                 outer_clean_range(__pa(pmd), __pa(pmd + 1));
192         }
193
194         tegra_pgd_phys = virt_to_phys(tegra_pgd);
195         __cpuc_flush_dcache_area(&tegra_pgd_phys,
196                 sizeof(tegra_pgd_phys));
197         outer_clean_range(__pa(&tegra_pgd_phys),
198                 __pa(&tegra_pgd_phys+1));
199
200         __cpuc_flush_dcache_area(&tegra_context_area,
201                 sizeof(tegra_context_area));
202         outer_clean_range(__pa(&tegra_context_area),
203                 __pa(&tegra_context_area+1));
204
205         return 0;
206 }
207
208 void __init smp_prepare_cpus(unsigned int max_cpus)
209 {
210         unsigned int ncores = scu_get_core_count(scu_base);
211         unsigned int cpu = smp_processor_id();
212         int i;
213
214         smp_store_cpu_info(cpu);
215
216         /*
217          * are we trying to boot more cores than exist?
218          */
219         if (max_cpus > ncores)
220                 max_cpus = ncores;
221
222         tegra_context_area = kzalloc(CONTEXT_SIZE_BYTES * ncores, GFP_KERNEL);
223
224         if (tegra_context_area && create_suspend_pgtable()) {
225                 kfree(tegra_context_area);
226                 tegra_context_area = NULL;
227         }
228
229         /*
230          * Initialise the present map, which describes the set of CPUs
231          * actually populated at the present time.
232          */
233         for (i = 0; i < max_cpus; i++)
234                 set_cpu_present(i, true);
235
236 #ifdef CONFIG_HOTPLUG_CPU
237         for_each_present_cpu(i) {
238                 init_completion(&per_cpu(cpu_killed, i));
239         }
240 #endif
241
242         /*
243          * Initialise the SCU if there are more than one CPU and let
244          * them know where to start. Note that, on modern versions of
245          * MILO, the "poke" doesn't actually do anything until each
246          * individual core is sent a soft interrupt to get it out of
247          * WFI
248          */
249         if (max_cpus > 1) {
250                 percpu_timer_setup();
251                 scu_enable(scu_base);
252         }
253 }
254
255 #ifdef CONFIG_HOTPLUG_CPU
256
257 extern void vfp_sync_state(struct thread_info *thread);
258
259 void __cpuinit secondary_start_kernel(void);
260
261 int platform_cpu_kill(unsigned int cpu)
262 {
263         unsigned int reg;
264         int e;
265
266         e = wait_for_completion_timeout(&per_cpu(cpu_killed, cpu), 100);
267         printk(KERN_NOTICE "CPU%u: %s shutdown\n", cpu, (e) ? "clean":"forced");
268
269         if (e) {
270                 do {
271                         reg = readl(CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET);
272                         cpu_relax();
273                 } while (!(reg & (1<<cpu)));
274         } else {
275                 writel(0x1111<<cpu, CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET);
276                 /* put flow controller in WAIT_EVENT mode */
277                 writel(2<<29, IO_ADDRESS(TEGRA_FLOW_CTRL_BASE)+0x14 + 0x8*(cpu-1));
278         }
279         spin_lock(&boot_lock);
280         reg = readl(CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
281         writel(reg | (1<<(8+cpu)), CLK_RST_CONTROLLER_CLK_CPU_CMPLX);
282         spin_unlock(&boot_lock);
283         return e;
284 }
285
286 void platform_cpu_die(unsigned int cpu)
287 {
288 #ifdef DEBUG
289         unsigned int this_cpu = hard_smp_processor_id();
290
291         if (cpu != this_cpu) {
292                 printk(KERN_CRIT "Eek! platform_cpu_die running on %u, should be %u\n",
293                            this_cpu, cpu);
294                 BUG();
295         }
296 #endif
297
298         gic_cpu_exit(0);
299         barrier();
300         complete(&per_cpu(cpu_killed, cpu));
301         flush_cache_all();
302         barrier();
303         __cortex_a9_save(0);
304
305         /* return happens from __cortex_a9_restore */
306         barrier();
307         writel(smp_processor_id(), EVP_CPU_RESET_VECTOR);
308 }
309
310 int platform_cpu_disable(unsigned int cpu)
311 {
312         /*
313          * we don't allow CPU 0 to be shutdown (it is still too special
314          * e.g. clock tick interrupts)
315          */
316         if (unlikely(!tegra_context_area))
317                 return -ENXIO;
318
319         return cpu == 0 ? -EPERM : 0;
320 }
321 #endif