2 * arch/arm/mach-tegra/tegra2_save.S
4 * CPU state save & restore routines for CPU hotplug
6 * Copyright (c) 2010, NVIDIA Corporation.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
23 #include <linux/linkage.h>
24 #include <linux/init.h>
26 #include <asm/assembler.h>
27 #include <asm/domain.h>
28 #include <asm/ptrace.h>
29 #include <asm/cache.h>
30 #include <asm/vfpmacros.h>
31 #include <asm/memory.h>
32 #include <asm/hardware/cache-l2x0.h>
34 #include <mach/iomap.h>
39 /* .section ".cpuinit.text", "ax"*/
41 #define TTB_FLAGS 0x6A @ IRGN_WBWA, OC_RGN_WBWA, S, NOS
44 #define EMC_ADR_CFG 0x10
45 #define EMC_REFRESH 0x70
47 #define EMC_SELF_REF 0xe0
48 #define EMC_REQ_CTRL 0x2b0
49 #define EMC_EMC_STATUS 0x2b4
52 #define PMC_CTRL_BFI_SHIFT 8
53 #define PMC_CTRL_BFI_WIDTH 9
54 #define PMC_SCRATCH38 0x134
55 #define PMC_SCRATCH41 0x140
57 #define CLK_RESET_CCLK_BURST 0x20
58 #define CLK_RESET_CCLK_DIVIDER 0x24
59 #define CLK_RESET_SCLK_BURST 0x28
60 #define CLK_RESET_SCLK_DIVIDER 0x2c
62 #define CLK_RESET_PLLC_BASE 0x80
63 #define CLK_RESET_PLLM_BASE 0x90
64 #define CLK_RESET_PLLP_BASE 0xa0
66 #define FLOW_CTRL_HALT_CPU_EVENTS 0x0
68 #include "power-macros.S"
70 .macro emc_device_mask, rd, base
71 ldr \rd, [\base, #EMC_ADR_CFG]
73 moveq \rd, #(0x1<<8) @ just 1 device
74 movne \rd, #(0x3<<8) @ 2 devices
79 * __tear_down_master( r8 = context_pa, sp = power state )
81 * Set the clock burst policy to the selected wakeup source
82 * Enable CPU power-request mode in the PMC
83 * Put the CPU in wait-for-event mode on the flow controller
84 * Trigger the PMC state machine to put the CPU in reset
86 ENTRY(__tear_down_master)
88 #ifdef CONFIG_CACHE_L2X0
89 /* clean out the dirtied L2 lines, since all power transitions
90 * cause the cache state to get invalidated (although LP1 & LP2
91 * preserve the data in the L2, the control words (L2X0_CTRL,
92 * L2X0_AUX_CTRL, etc.) need to be cleaned to L3 so that they
93 * will be visible on reboot. skip this for LP0, since the L2 cache
94 * will be shutdown before we reach this point */
95 tst sp, #TEGRA_POWER_EFFECT_LP0
97 mov32 r0, (TEGRA_ARM_PL310_BASE-IO_CPU_PHYS+IO_CPU_VIRT)
98 add r3, r8, #(CONTEXT_SIZE_BYTES)
101 11: str r8, [r0, #L2X0_CLEAN_LINE_PA]
105 12: ldr r1, [r0, #L2X0_CLEAN_LINE_PA]
109 str r1, [r0, #L2X0_CACHE_SYNC]
110 13: ldr r1, [r0, #L2X0_CACHE_SYNC]
116 tst sp, #TEGRA_POWER_SDRAM_SELFREFRESH
118 /* preload all the address literals that are needed for the
119 * CPU power-gating process, to avoid loads from SDRAM (which are
120 * not supported once SDRAM is put into self-refresh.
121 * LP0 / LP1 use physical address, since the MMU needs to be
122 * disabled before putting SDRAM into self-refresh to avoid
123 * memory access due to page table walks */
124 mov32 r0, (IO_APB_VIRT-IO_APB_PHYS)
125 mov32 r4, TEGRA_PMC_BASE
126 mov32 r0, (IO_PPSB_VIRT-IO_PPSB_PHYS)
127 mov32 r5, TEGRA_CLK_RESET_BASE
128 mov32 r6, TEGRA_FLOW_CTRL_BASE
129 mov32 r7, TEGRA_TMRUS_BASE
131 /* change page table pointer to tegra_pgd_phys, so that IRAM
132 * and MMU shut-off will be mapped virtual == physical */
133 adr r3, __tear_down_master_data
134 ldr r3, [r3] @ &tegra_pgd_phys
136 orr r3, r3, #TTB_FLAGS
138 mcr p15, 0, r2, c13, c0, 1 @ reserved context
140 mcr p15, 0, r3, c2, c0, 0 @ TTB 0
143 /* Obtain LP1 information.
144 * R10 = LP1 branch target */
145 mov32 r2, __tegra_lp1_reset
146 mov32 r3, __tear_down_master_sdram
148 mov32 r3, (TEGRA_IRAM_CODE_AREA)
151 mov32 r3, __shut_off_mmu
153 /* R9 = LP2 branch target */
154 mov32 r9, __tear_down_master_pll_cpu
156 /* Convert the branch targets
157 * to physical addresses */
158 sub r3, r3, #(PAGE_OFFSET - PHYS_OFFSET)
159 sub r9, r9, #(PAGE_OFFSET - PHYS_OFFSET)
162 ENDPROC(__tear_down_master)
163 .type __tear_down_master_data, %object
164 __tear_down_master_data:
166 .size __tear_down_master_data, . - __tear_down_master_data
168 /* START OF ROUTINES COPIED TO IRAM */
172 * reset vector for LP1 restore; copied into IRAM during suspend.
173 * brings the system back up to a safe starting point (SDRAM out of
174 * self-refresh, PLLC, PLLM and PLLP reenabled, CPU running on PLLP,
175 * system clock running on the same PLL that it suspended at), and
176 * jumps to tegra_lp2_startup to restore PLLX and virtual addressing.
177 * physical address of tegra_lp2_startup expected to be stored in
180 .align L1_CACHE_SHIFT
181 ENTRY(__tegra_lp1_reset)
183 /* the CPU and system bus are running at 32KHz and executing from
184 * IRAM when this code is executed; immediately switch to CLKM and
186 mov32 r0, TEGRA_CLK_RESET_BASE
188 str r1, [r0, #CLK_RESET_SCLK_BURST]
189 str r1, [r0, #CLK_RESET_CCLK_BURST]
191 str r1, [r0, #CLK_RESET_SCLK_DIVIDER]
192 str r1, [r0, #CLK_RESET_CCLK_DIVIDER]
194 ldr r1, [r0, #CLK_RESET_PLLM_BASE]
196 orreq r1, r1, #(1<<30)
197 streq r1, [r0, #CLK_RESET_PLLM_BASE]
198 ldr r1, [r0, #CLK_RESET_PLLP_BASE]
200 orreq r1, r1, #(1<<30)
201 streq r1, [r0, #CLK_RESET_PLLP_BASE]
202 ldr r1, [r0, #CLK_RESET_PLLC_BASE]
204 orreq r1, r1, #(1<<30)
205 streq r1, [r0, #CLK_RESET_PLLC_BASE]
206 mov32 r7, TEGRA_TMRUS_BASE
209 /* since the optimized settings are still in SDRAM, there is
210 * no need to store them back into the IRAM-local __lp1_pad_area */
211 add r2, pc, #__lp1_pad_area-(.+8)
212 padload:ldmia r2!, {r3-r4}
219 add r2, r2, #0x4 @ 4uS delay for DRAM pad restoration
220 wait_until r2, r7, r3
221 add r1, r1, #0xff @ 255uS delay for PLL stabilization
222 wait_until r1, r7, r3
224 str r4, [r0, #CLK_RESET_SCLK_BURST]
225 mov32 r4, ((1<<28) | (4)) @ burst policy is PLLP
226 str r4, [r0, #CLK_RESET_CCLK_BURST]
228 mov32 r0, TEGRA_EMC_BASE
229 ldr r1, [r0, #EMC_CFG]
230 bic r1, r1, #(1<<31) @ disable DRAM_CLK_STOP
231 str r1, [r0, #EMC_CFG]
234 str r1, [r0, #EMC_SELF_REF] @ take DRAM out of self refresh
236 str r1, [r0, #EMC_NOP]
237 str r1, [r0, #EMC_NOP]
238 str r1, [r0, #EMC_REFRESH]
240 emc_device_mask r1, r0
242 exit_selfrefresh_loop:
243 ldr r2, [r0, #EMC_EMC_STATUS]
245 bne exit_selfrefresh_loop
248 str r1, [r0, #EMC_REQ_CTRL]
250 mov32 r0, TEGRA_PMC_BASE
251 ldr r0, [r0, #PMC_SCRATCH41]
253 ENDPROC(__tegra_lp1_reset)
256 * __tear_down_master_sdram
258 * disables MMU, data cache, and puts SDRAM into self-refresh.
259 * must execute from IRAM.
261 .align L1_CACHE_SHIFT
262 __tear_down_master_sdram:
263 mov32 r1, TEGRA_EMC_BASE
265 str r2, [r1, #EMC_REQ_CTRL] @ stall incoming DRAM requests
267 emcidle:ldr r2, [r1, #EMC_EMC_STATUS]
272 str r2, [r1, #EMC_SELF_REF]
274 emc_device_mask r2, r1
276 emcself:ldr r3, [r1, #EMC_EMC_STATUS]
279 bne emcself @ loop until DDR in self-refresh
281 add r2, pc, #__lp1_pad_area-(.+8)
283 padsave:ldm r2, {r0-r1}
293 ldr r0, [r5, #CLK_RESET_SCLK_BURST]
296 b __tear_down_master_pll_cpu
297 ENDPROC(__tear_down_master_sdram)
299 .align L1_CACHE_SHIFT
300 .type __lp1_pad_area, %object
302 .word TEGRA_APB_MISC_BASE + 0x8c8 /* XM2CFGCPADCTRL */
304 .word TEGRA_APB_MISC_BASE + 0x8cc /* XM2CFGDPADCTRL */
306 .word TEGRA_APB_MISC_BASE + 0x8d0 /* XM2CLKCFGPADCTRL */
308 .word TEGRA_APB_MISC_BASE + 0x8d4 /* XM2COMPPADCTRL */
310 .word TEGRA_APB_MISC_BASE + 0x8d8 /* XM2VTTGENPADCTRL */
312 .word TEGRA_APB_MISC_BASE + 0x8e4 /* XM2CFGCPADCTRL2 */
314 .word TEGRA_APB_MISC_BASE + 0x8e8 /* XM2CFGDPADCTRL2 */
316 .word 0x0 /* end of list */
317 .word 0x0 /* sclk_burst_policy */
318 .size __lp1_pad_area, . - __lp1_pad_area
320 .align L1_CACHE_SHIFT
321 __tear_down_master_pll_cpu:
322 ldr r0, [r4, #PMC_CTRL]
323 bfi r0, sp, #PMC_CTRL_BFI_SHIFT, #PMC_CTRL_BFI_WIDTH
324 str r0, [r4, #PMC_CTRL]
325 tst sp, #TEGRA_POWER_SDRAM_SELFREFRESH
327 /* in LP2 idle (SDRAM active), set the CPU burst policy to PLLP */
328 moveq r0, #(2<<28) /* burst policy = run mode */
329 orreq r0, r0, #(4<<4) /* use PLLP in run mode burst */
330 streq r0, [r5, #CLK_RESET_CCLK_BURST]
332 streq r0, [r5, #CLK_RESET_CCLK_DIVIDER]
335 /* in other modes, set system & CPU burst policies to 32KHz.
336 * start by jumping to CLKM to safely disable PLLs, then jump
339 str r0, [r5, #CLK_RESET_SCLK_BURST]
340 str r0, [r5, #CLK_RESET_CCLK_BURST]
342 str r0, [r5, #CLK_RESET_CCLK_DIVIDER]
343 str r0, [r5, #CLK_RESET_SCLK_DIVIDER]
345 /* 2 us delay between changing sclk and disabling PLLs */
346 wait_for_us r1, r7, r9
348 wait_until r1, r7, r9
351 mov r0, #0 /* burst policy = 32KHz */
352 str r0, [r5, #CLK_RESET_SCLK_BURST]
354 /* disable PLLP, PLLM, PLLC in LP0 and LP1 states */
355 ldr r0, [r5, #CLK_RESET_PLLM_BASE]
357 str r0, [r5, #CLK_RESET_PLLM_BASE]
358 ldr r0, [r5, #CLK_RESET_PLLP_BASE]
360 str r0, [r5, #CLK_RESET_PLLP_BASE]
361 ldr r0, [r5, #CLK_RESET_PLLC_BASE]
363 str r0, [r5, #CLK_RESET_PLLC_BASE]
366 mov r0, #(4<<29) /* STOP_UNTIL_IRQ */
367 orr r0, r0, #(1<<10) | (1<<8) /* IRQ_0, FIQ_0 */
369 str r1, [r4, #PMC_SCRATCH38]
371 str r0, [r6, #FLOW_CTRL_HALT_CPU_EVENTS]
373 ldr r0, [r6, #FLOW_CTRL_HALT_CPU_EVENTS] /* memory barrier */
376 wfe /* CPU should be power gated here */
379 ENDPROC(__tear_down_master_pll_cpu)
382 * __put_cpu_in_reset(cpu_nr)
384 * puts the specified CPU in wait-for-event mode on the flow controller
385 * and puts the CPU in reset
387 ENTRY(__put_cpu_in_reset)
393 moveq r1, #0 @ r1 = CPUx_HALT_EVENTS register offset
394 mov32 r7, (TEGRA_FLOW_CTRL_BASE-IO_PPSB_PHYS+IO_PPSB_VIRT)
396 str r2, [r7, r1] @ put flow controller in wait event mode
401 mov32 r7, (TEGRA_CLK_RESET_BASE-IO_PPSB_PHYS+IO_PPSB_VIRT)
402 str r1, [r7, #0x340] @ put slave CPU in reset
406 ENDPROC(__put_cpu_in_reset)
408 /* dummy symbol for end of IRAM */
409 .align L1_CACHE_SHIFT
410 ENTRY(__tegra_iram_end)
413 ENDPROC(__tegra_iram_end)