2 * arch/arm/mach-tegra/cortex-a9.S
4 * CPU state save & restore routines for CPU hotplug
6 * Copyright (c) 2010, NVIDIA Corporation.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
23 #include <linux/linkage.h>
24 #include <linux/init.h>
26 #include <asm/assembler.h>
27 #include <asm/domain.h>
28 #include <asm/ptrace.h>
29 #include <asm/cache.h>
30 #include <asm/vfpmacros.h>
31 #include <asm/hardware/cache-l2x0.h>
33 #include <mach/iomap.h>
36 /* .section ".cpuinit.text", "ax"*/
38 #define TTB_FLAGS 0x6A @ IRGN_WBWA, OC_RGN_WBWA, S, NOS
41 * spooled CPU context is 1KB / CPU
54 #define CTX_DIAGNOSTIC 40
64 #define CTX_CONTEXTIDR 80
65 #define CTX_TPIDRURW 84
66 #define CTX_TPIDRURO 88
67 #define CTX_TPIDRPRW 92
70 #define CTX_SVC_LR -1 @ stored on stack
71 #define CTX_SVC_SPSR 8
74 #define CTX_SYS_LR 100
76 #define CTX_ABT_SPSR 112
77 #define CTX_ABT_SP 116
78 #define CTX_ABT_LR 120
80 #define CTX_UND_SPSR 128
81 #define CTX_UND_SP 132
82 #define CTX_UND_LR 136
84 #define CTX_IRQ_SPSR 144
85 #define CTX_IRQ_SP 148
86 #define CTX_IRQ_LR 152
88 #define CTX_FIQ_SPSR 160
89 #define CTX_FIQ_R8 164
90 #define CTX_FIQ_R9 168
91 #define CTX_FIQ_R10 172
92 #define CTX_FIQ_R11 178
93 #define CTX_FIQ_R12 180
94 #define CTX_FIQ_SP 184
95 #define CTX_FIQ_LR 188
97 /* context only relevant for master cpu */
98 #ifdef CONFIG_CACHE_L2X0
99 #define CTX_L2_CTRL 224
100 #define CTX_L2_AUX 228
101 #define CTX_L2_TAG_CTRL 232
102 #define CTX_L2_DAT_CTRL 236
103 #define CTX_L2_PREFETCH 240
106 #define CTX_VFP_REGS 256
107 #define CTX_VFP_SIZE (32 * 8)
109 #define CTX_CP14_REGS 512
110 #define CTS_CP14_DSCR 512
111 #define CTX_CP14_WFAR 516
112 #define CTX_CP14_VCR 520
113 #define CTX_CP14_CLAIM 524
115 /* Each of the folowing is 2 32-bit registers */
116 #define CTS_CP14_BKPT_0 528
117 #define CTS_CP14_BKPT_1 536
118 #define CTS_CP14_BKPT_2 544
119 #define CTS_CP14_BKPT_3 552
120 #define CTS_CP14_BKPT_4 560
121 #define CTS_CP14_BKPT_5 568
123 /* Each of the folowing is 2 32-bit registers */
124 #define CTS_CP14_WPT_0 576
125 #define CTS_CP14_WPT_1 584
126 #define CTS_CP14_WPT_2 592
127 #define CTS_CP14_WPT_3 600
130 #include "power-macros.S"
132 .macro ctx_ptr, rd, tmp
134 mov32 \rd, tegra_context_area
136 add \rd, \rd, \tmp, lsl #(CONTEXT_SIZE_BYTES_SHIFT)
139 .macro translate, pa, va, tmp
143 mcr p15, 0, \pa, c7, c8, 1
144 mrc p15, 0, \pa, c7, c4, 0
151 * __cortex_a9_save(unsigned int mode)
153 * spools out the volatile processor state to memory, so that
154 * the CPU may be safely powered down. does not preserve:
155 * - CP15 c0 registers (except cache size select 2,c0/c0,0)
156 * - CP15 c1 secure registers (c1/c1, 0-3)
157 * - CP15 c5 fault status registers (c5/c0 0&1, c5/c1 0&1)
158 * - CP15 c6 fault address registers (c6/c0 0&2)
159 * - CP15 c9 performance monitor registers (c9/c12 0-5,
160 * c9/c13 0-2, c9/c14 0-2)
161 * - CP15 c10 TLB lockdown register (c10/c0, 0)
162 * - CP15 c12 MVBAR (c12/c0, 1)
163 * - CP15 c15 TLB lockdown registers
165 .align L1_CACHE_SHIFT
166 ENTRY(__cortex_a9_save)
168 cps 0x13 @ save off svc registers
170 stmfd sp!, {r3-r12, lr}
172 bic r2, sp, #(L1_CACHE_BYTES-1)
174 1: mcr p15, 0, r2, c7, c14, 1 @ clean out dirty stack cachelines
175 add r2, r2, #L1_CACHE_BYTES
183 /* zero-out context area */
185 add r10, r8, #(CONTEXT_SIZE_BYTES)
194 2: stmia r9!, {r0-r7}
199 mov sp, r12 @ sp holds the power mode
203 mrc p15, 0, r3, c1, c0, 2 @ cpacr
205 mrc p15, 2, r0, c0, c0, 0 @ csselr
206 mrc p15, 0, r1, c1, c0, 0 @ sctlr
207 mrc p15, 0, r2, c1, c0, 1 @ actlr
208 mrc p15, 0, r4, c15, c0, 0 @ pctlr
209 add r9, r8, #CTX_CSSELR
210 stmia r9, {r0-r2, r4}
213 orr r2, r3, #0xF00000
214 mcr p15, 0, r2, c1, c0, 2 @ enable access to FPU
216 str r2, [r8, #CTX_FPEXC]
217 mov r1, #0x40000000 @ enable access to FPU
220 str r1, [r8, #CTX_FPSCR]
222 add r9, r8, #CTX_VFP_REGS
224 VFPFSTMIA r9, r12 @ save out (16 or 32)*8B of FPU registers
226 mrc p15, 0, r3, c1, c0, 2 @ restore original FPEXC/CPACR
228 mrc p15, 0, r0, c15, c0, 1 @ diag
229 str r0, [r8, #CTX_DIAGNOSTIC]
231 add r9, r8, #CTX_TTBR0
232 mrc p15, 0, r0, c2, c0, 0 @ TTBR0
233 mrc p15, 0, r1, c2, c0, 1 @ TTBR1
234 mrc p15, 0, r2, c2, c0, 2 @ TTBCR
235 mrc p15, 0, r3, c3, c0, 0 @ domain access control reg
236 mrc p15, 0, r4, c7, c4, 0 @ PAR
237 mrc p15, 0, r5, c10, c2, 0 @ PRRR
238 mrc p15, 0, r6, c10, c2, 1 @ NMRR
239 mrc p15, 0, r7, c12, c0, 0 @ VBAR
241 mrc p15, 0, r0, c13, c0, 1 @ CONTEXTIDR
242 mrc p15, 0, r1, c13, c0, 2 @ TPIDRURW
243 mrc p15, 0, r2, c13, c0, 3 @ TPIDRURO
244 mrc p15, 0, r3, c13, c0, 4 @ TPIDRPRW
248 add r9, r8, #CTX_SYS_SP
251 cps 0x17 @ Abort mode
253 add r9, r8, #CTX_ABT_SPSR
254 stmia r9, {r12,sp,lr}
258 add r9, r8, #CTX_IRQ_SPSR
259 stmia r9, {r12,sp,lr}
261 cps 0x1b @ Undefined mode
263 add r9, r8, #CTX_UND_SPSR
264 stmia r9, {r12,sp,lr}
267 add r1, r8, #CTX_FIQ_SPSR
270 stmia r1, {r7-r12,sp,lr}
272 cps 0x13 @ back to SVC
275 /* Save CP14 debug controller context */
276 add r9, r8, #CTX_CP14_REGS
277 mrc p14, 0, r0, c0, c1, 0 @ DSCR
278 mrc p14, 0, r1, c0, c6, 0 @ WFAR
279 mrc p14, 0, r2, c0, c7, 0 @ VCR
280 mrc p14, 0, r3, c7, c9, 6 @ CLAIM
283 add r9, r8, #CTS_CP14_BKPT_0
284 mrc p14, 0, r2, c0, c0, 4
285 mrc p14, 0, r3, c0, c0, 5
286 stmia r9!, {r2-r3} @ BRKPT_0
287 mrc p14, 0, r2, c0, c1, 4
288 mrc p14, 0, r3, c0, c1, 5
289 stmia r9!, {r2-r3} @ BRKPT_0
290 mrc p14, 0, r2, c0, c2, 4
291 mrc p14, 0, r3, c0, c2, 5
292 stmia r9!, {r2-r3} @ BRKPT_0
293 mrc p14, 0, r2, c0, c3, 4
294 mrc p14, 0, r3, c0, c3, 5
295 stmia r9!, {r2-r3} @ BRKPT_0
296 mrc p14, 0, r2, c0, c4, 4
297 mrc p14, 0, r3, c0, c4, 5
298 stmia r9!, {r2-r3} @ BRKPT_0
299 mrc p14, 0, r2, c0, c5, 4
300 mrc p14, 0, r3, c0, c5, 5
301 stmia r9!, {r2-r3} @ BRKPT_0
303 add r9, r8, #CTS_CP14_WPT_0
304 mrc p14, 0, r2, c0, c0, 6
305 mrc p14, 0, r3, c0, c0, 7
306 stmia r9!, {r2-r3} @ WPT_0
307 mrc p14, 0, r2, c0, c1, 6
308 mrc p14, 0, r3, c0, c1, 7
309 stmia r9!, {r2-r3} @ WPT_0
310 mrc p14, 0, r2, c0, c2, 6
311 mrc p14, 0, r3, c0, c2, 7
312 stmia r9!, {r2-r3} @ WPT_0
313 mrc p14, 0, r2, c0, c3, 6
314 mrc p14, 0, r3, c0, c3, 7
315 stmia r9!, {r2-r3} @ WPT_0
317 #ifdef CONFIG_CACHE_L2X0
320 bne __cortex_a9_save_clean_cache
321 mov32 r4, (TEGRA_ARM_PL310_BASE-IO_CPU_PHYS+IO_CPU_VIRT)
322 add r9, r8, #CTX_L2_CTRL
323 ldr r0, [r4, #L2X0_CTRL]
324 ldr r1, [r4, #L2X0_AUX_CTRL]
325 ldr r2, [r4, #L2X0_TAG_LATENCY_CTRL]
326 ldr r3, [r4, #L2X0_DATA_LATENCY_CTRL]
327 ldr r4, [r4, #L2X0_PREFETCH_OFFSET]
332 __cortex_a9_save_clean_cache:
334 add r9, r10, #(CONTEXT_SIZE_BYTES)
335 add r9, r9, #(L1_CACHE_BYTES-1)
336 bic r10, r10, #(L1_CACHE_BYTES-1)
337 bic r9, r9, #(L1_CACHE_BYTES-1)
339 3: mcr p15, 0, r10, c7, c10, 1
340 add r10, r10, #L1_CACHE_BYTES
345 translate r10, r8, r1
348 mcr p15, 0, r0, c1, c0, 1 @ exit coherency
351 mov32 r1, (TEGRA_ARM_PERIF_BASE-IO_CPU_PHYS+IO_CPU_VIRT+0xC)
355 str r2, [r1] @ invalidate SCU tags for CPU
358 bne __put_cpu_in_reset
361 ENDPROC(__cortex_a9_save)
364 * __cortex_a9_restore
366 * reloads the volatile CPU state from the context area
367 * the MMU should already be enabled using the secondary_data
368 * page tables for cpu_up before this function is called, and the
369 * CPU should be coherent with the SMP complex
371 .align L1_CACHE_SHIFT
372 ENTRY(__cortex_a9_restore)
377 add r1, r0, #CTX_FIQ_SPSR
378 ldmia r1, {r7-r12,sp,lr}
382 add r1, r0, #CTX_IRQ_SPSR
383 ldmia r1, {r12, sp, lr}
386 cps 0x17 @ abort mode
387 add r1, r0, #CTX_ABT_SPSR
388 ldmia r1, {r12, sp, lr}
392 add r1, r0, #CTX_SYS_SP
395 cps 0x1b @ Undefined mode
396 add r1, r0, #CTX_UND_SPSR
397 ldmia r1, {r12, sp, lr}
400 cps 0x13 @ back to SVC
403 add r9, r8, #CTX_CSSELR
406 mcr p15, 2, r0, c0, c0, 0 @ csselr
407 mcr p15, 0, r1, c1, c0, 0 @ sctlr
408 mcr p15, 0, r2, c1, c0, 1 @ actlr
409 mcr p15, 0, r3, c15, c0, 0 @ pctlr
411 add r9, r8, #CTX_TTBR0
414 mcr p15, 0, r4, c7, c4, 0 @ PAR
415 mcr p15, 0, r7, c12, c0, 0 @ VBAR
416 mcr p15, 0, r3, c3, c0, 0 @ domain access control reg
418 mcr p15, 0, r2, c2, c0, 2 @ TTBCR
420 mcr p15, 0, r5, c10, c2, 0 @ PRRR
422 mcr p15, 0, r6, c10, c2, 1 @ NMRR
427 mcr p15, 0, r5, c13, c0, 2 @ TPIDRURW
428 mcr p15, 0, r6, c13, c0, 3 @ TPIDRURO
429 mcr p15, 0, r7, c13, c0, 4 @ TPIDRPRW
431 ldmia r8, {r5-r7, lr}
433 /* perform context switch to previous context */
435 mcr p15, 0, r9, c13, c0, 1 @ set reserved context
437 mcr p15, 0, r0, c2, c0, 0 @ TTBR0
439 mcr p15, 0, r4, c13, c0, 1 @ CONTEXTIDR
441 mcr p15, 0, r1, c2, c0, 1 @ TTBR1
445 mcr p15, 0, r4, c8, c3, 0 @ invalidate TLB
446 mcr p15, 0, r4, c7, c5, 6 @ flush BTAC
447 mcr p15, 0, r4, c7, c5, 0 @ flush instruction cache
455 /* Restore CP14 debug controller context */
456 add r9, r8, #CTX_CP14_REGS
458 mcr p14, 0, r1, c0, c6, 0 @ WFAR
459 mcr p14, 0, r2, c0, c7, 0 @ VCR
460 mcr p14, 0, r3, c7, c8, 6 @ CLAIM
462 add r9, r8, #CTS_CP14_BKPT_0
463 ldmia r9!, {r2-r3} @ BRKPT_0
464 mcr p14, 0, r2, c0, c0, 4
465 mcr p14, 0, r3, c0, c0, 5
466 ldmia r9!, {r2-r3} @ BRKPT_0
467 mcr p14, 0, r2, c0, c1, 4
468 mcr p14, 0, r3, c0, c1, 5
469 ldmia r9!, {r2-r3} @ BRKPT_0
470 mcr p14, 0, r2, c0, c2, 4
471 mcr p14, 0, r3, c0, c2, 5
472 ldmia r9!, {r2-r3} @ BRKPT_0
473 mcr p14, 0, r2, c0, c3, 4
474 mcr p14, 0, r3, c0, c3, 5
475 ldmia r9!, {r2-r3} @ BRKPT_0
476 mcr p14, 0, r2, c0, c4, 4
477 mcr p14, 0, r3, c0, c4, 5
478 ldmia r9!, {r2-r3} @ BRKPT_0
479 mcr p14, 0, r2, c0, c5, 4
480 mcr p14, 0, r3, c0, c5, 5
482 add r9, r8, #CTS_CP14_WPT_0
483 ldmia r9!, {r2-r3} @ WPT_0
484 mcr p14, 0, r2, c0, c0, 6
485 mcr p14, 0, r3, c0, c0, 7
486 ldmia r9!, {r2-r3} @ WPT_0
487 mcr p14, 0, r2, c0, c1, 6
488 mcr p14, 0, r3, c0, c1, 7
489 ldmia r9!, {r2-r3} @ WPT_0
490 mcr p14, 0, r2, c0, c2, 6
491 mcr p14, 0, r3, c0, c2, 7
492 ldmia r9!, {r2-r3} @ WPT_0
493 mcr p14, 0, r2, c0, c3, 6
494 mcr p14, 0, r3, c0, c3, 7
496 mcr p14, 0, r0, c0, c2, 2 @ DSCR
500 orr r4, lr, #0xF00000
501 mcr p15, 0, r4, c1, c0, 2 @ enable coproc access
503 VFPFMXR FPEXC, r5 @ enable FPU access
504 add r9, r8, #CTX_VFP_REGS
505 add r7, r8, #CTX_FPEXC
511 mcr p15, 0, lr, c1, c0, 2 @ cpacr (loaded before VFP)
513 ldr r9, [r8, #CTX_DIAGNOSTIC]
514 mcr p15, 0, r9, c15, c0, 1 @ diag
516 /* finally, restore the stack and return */
517 ldmfd sp!, {r3-r12, lr}
518 msr cpsr_fsxc, r3 @ restore original processor mode
521 ENDPROC(__cortex_a9_restore)
524 * __cortex_a9_l2x0_restart(bool invalidate)
526 * Reconfigures the L2 cache following a power event.
528 .align L1_CACHE_SHIFT
529 ENTRY(__cortex_a9_l2x0_restart)
530 #ifdef CONFIG_CACHE_L2X0
532 mov32 r9, (TEGRA_ARM_PL310_BASE-IO_CPU_PHYS+IO_CPU_VIRT)
533 add r10, r8, #CTX_L2_CTRL
535 str r5, [r9, #L2X0_TAG_LATENCY_CTRL]
536 str r6, [r9, #L2X0_DATA_LATENCY_CTRL]
537 str r7, [r9, #L2X0_PREFETCH_OFFSET]
538 str r4, [r9, #L2X0_AUX_CTRL]
539 mov r4, #0x2 @ L2X0_DYNAMIC_CLK_GATING_EN
540 str r4, [r9, #L2X0_PWR_CTRL]
546 str r0, [r9, #L2X0_INV_WAY]
547 1: ldr r1, [r9, #L2X0_INV_WAY]
551 str r0, [r9, #L2X0_CACHE_SYNC]
554 mcr p15, 0, r5, c8, c3, 0 @ invalidate TLB
555 mcr p15, 0, r5, c7, c5, 6 @ flush BTAC
556 mcr p15, 0, r5, c7, c5, 0 @ flush instruction cache
559 str r3, [r9, #L2X0_CTRL]
561 b __cortex_a9_restore
564 .align L1_CACHE_SHIFT
565 ENTRY(__shut_off_mmu)
566 mrc p15, 0, r3, c1, c0, 0
567 movw r2, #(1<<12) | (1<<11) | (1<<2) | (1<<0)
570 mcr p15, 0, r3, c1, c0, 0
573 ENDPROC(__shut_off_mmu)
578 * Invalidates the L1 data cache (no clean) during initial boot of
579 * a secondary processor
581 * Corrupted registers: r0-r6
585 mcr p15, 2, r0, c0, c0, 0
586 mrc p15, 1, r0, c0, c0, 0
589 and r2, r1, r0, lsr #13
593 and r3, r1, r0, lsr #3 @ NumWays - 1
594 add r2, r2, #1 @ NumSets
597 add r0, r0, #4 @ SetShift
599 clz r1, r3 @ WayShift
600 add r4, r3, #1 @ NumWays
601 1: sub r2, r2, #1 @ NumSets--
602 mov r3, r4 @ Temp = NumWays
603 2: subs r3, r3, #1 @ Temp--
606 orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift)
607 mcr p15, 0, r5, c7, c6, 2
614 ENDPROC(__invalidate_l1)
617 * __invalidate_cpu_state
619 * Invalidates volatile CPU state (SCU tags, caches, branch address
620 * arrays, exclusive monitor, etc.) so that they can be safely enabled
621 * instruction caching and branch predicition enabled as early as
622 * possible to improve performance
624 ENTRY(__invalidate_cpu_state)
627 mcr p15, 0, r0, c1, c0, 1 @ disable SMP, prefetch, broadcast
629 mcr p15, 0, r0, c7, c5, 0 @ invalidate BTAC, i-cache
630 mcr p15, 0, r0, c7, c5, 6 @ invalidate branch pred array
631 mcr p15, 0, r0, c8, c7, 0 @ invalidate unified TLB
637 mov32 r1, (TEGRA_ARM_PERIF_BASE + 0xC)
641 strne r2, [r1] @ invalidate SCU tags for CPU
645 mcr p15, 0, r0, c1, c0, 0 @ enable branch prediction, i-cache
647 b __invalidate_l1 @ invalidate data cache
648 ENDPROC(__invalidate_cpu_state)
651 * __return_to_virtual(unsigned long pgdir, void (*ctx_restore)(void))
653 * Restores a CPU to the world of virtual addressing, using the
654 * specified page tables (which must ensure that a VA=PA mapping
655 * exists for the __enable_mmu function), and then jumps to
656 * ctx_restore to restore CPU context and return control to the OS
658 .align L1_CACHE_SHIFT
659 ENTRY(__return_to_virtual)
660 orr r8, r0, #TTB_FLAGS
661 mov lr, r1 @ "return" to ctx_restore
663 mcr p15, 0, r3, c2, c0, 2 @ TTB control register
665 mcr p15, 0, r8, c2, c0, 1 @ load TTBR1
668 mcr p15, 0, r0, c3, c0, 0 @ domain access register
676 mcr p15, 0, r0, c10, c2, 0 @ PRRR
677 mcr p15, 0, r1, c10, c2, 1 @ NMRR
678 mrc p15, 0, r0, c1, c0, 0
684 #ifdef CONFIG_ALIGNMENT_TRAP
689 mov r1, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
690 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
691 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
692 domain_val(DOMAIN_IO, DOMAIN_CLIENT))
693 mcr p15, 0, r1, c3, c0, 0 @ domain access register
694 mcr p15, 0, r8, c2, c0, 0 @ TTBR0
695 b __turn_mmu_on_again
700 ENDPROC(__return_to_virtual)
703 * __turn_mmu_on_again
705 * does exactly what it advertises: turns the MMU on, again
706 * jumps to the *virtual* address lr after the MMU is enabled.
708 .align L1_CACHE_SHIFT
711 mcr p15, 0, r0, c1, c0, 0
712 mrc p15, 0, r3, c0, c0, 0
716 ENDPROC(__turn_mmu_on_again)