2 * linux/arch/arm/boot/compressed/head.S
4 * Copyright (C) 1996-2002 Russell King
5 * Copyright (C) 2004 Hyok S. Choi (MPU support)
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/linkage.h>
12 #include <asm/assembler.h>
18 * Note that these macros must not contain any code which is not
19 * 100% relocatable. Any attempt to do so will result in a crash.
20 * Please select one of the following when turning on debugging.
24 #if defined(CONFIG_DEBUG_ICEDCC)
26 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7)
27 .macro loadsp, rb, tmp
30 mcr p14, 0, \ch, c0, c5, 0
32 #elif defined(CONFIG_CPU_XSCALE)
33 .macro loadsp, rb, tmp
36 mcr p14, 0, \ch, c8, c0, 0
39 .macro loadsp, rb, tmp
42 mcr p14, 0, \ch, c1, c0, 0
48 #include CONFIG_DEBUG_LL_INCLUDE
54 #if defined(CONFIG_ARCH_SA1100)
55 .macro loadsp, rb, tmp
56 mov \rb, #0x80000000 @ physical base address
57 #ifdef CONFIG_DEBUG_LL_SER3
58 add \rb, \rb, #0x00050000 @ Ser3
60 add \rb, \rb, #0x00010000 @ Ser1
64 .macro loadsp, rb, tmp
82 .macro debug_reloc_start
85 kphex r6, 8 /* processor id */
87 kphex r7, 8 /* architecture id */
88 #ifdef CONFIG_CPU_CP15
90 mrc p15, 0, r0, c1, c0
91 kphex r0, 8 /* control reg */
94 kphex r5, 8 /* decompressed kernel start */
96 kphex r9, 8 /* decompressed kernel end */
98 kphex r4, 8 /* kernel execution address */
103 .macro debug_reloc_end
105 kphex r5, 8 /* end of kernel */
108 bl memdump /* dump 256 bytes at start of kernel */
112 .section ".start", #alloc, #execinstr
114 * sort out different calling conventions
117 .arm @ Always enter in ARM state
119 .type start,#function
125 THUMB( adr r12, BSYM(1f) )
128 .word 0x016f2818 @ Magic numbers to help the loader
129 .word start @ absolute load/run zImage address
130 .word _edata @ zImage end address
133 ARM_BE8( setend be ) @ go BE8 if compiled for BE8
135 #ifdef CONFIG_ARM_VIRT_EXT
136 bl __hyp_stub_install @ get into SVC mode, reversibly
138 mov r7, r1 @ save architecture ID
139 mov r8, r2 @ save atags pointer
142 * Booting from Angel - need to enter SVC mode and disable
143 * FIQs/IRQs (numeric definitions from angel arm.h source).
144 * We only do this if we were in user mode on entry.
146 mrs r2, cpsr @ get current mode
147 tst r2, #3 @ not user?
149 mov r0, #0x17 @ angel_SWIreason_EnterSVC
150 ARM( swi 0x123456 ) @ angel_SWI_ARM
151 THUMB( svc 0xab ) @ angel_SWI_THUMB
153 safe_svcmode_maskall r0
154 msr spsr_cxsf, r9 @ Save the CPU boot mode in
157 * Note that some cache flushing and other stuff may
158 * be needed here - is there an Angel SWI call for this?
162 * some architecture specific code can be inserted
163 * by the linker here, but it should preserve r7, r8, and r9.
168 #ifdef CONFIG_AUTO_ZRELADDR
169 @ determine final kernel image address
171 and r4, r4, #0xf8000000
172 add r4, r4, #TEXT_OFFSET
178 * Set up a page table only if it won't overwrite ourself.
179 * That means r4 < pc && r4 - 16k page directory > &_end.
180 * Given that r4 > &_end is most unfrequent, we add a rough
181 * additional 1MB of room for a possible appended DTB.
188 orrcc r4, r4, #1 @ remember we skipped cache_on
192 ldmia r0, {r1, r2, r3, r6, r10, r11, r12}
196 * We might be running at a different address. We need
197 * to fix up various pointers.
199 sub r0, r0, r1 @ calculate the delta offset
200 add r6, r6, r0 @ _edata
201 add r10, r10, r0 @ inflated kernel size location
204 * The kernel build system appends the size of the
205 * decompressed kernel at the end of the compressed data
206 * in little-endian form.
210 orr r9, r9, lr, lsl #8
213 orr r9, r9, lr, lsl #16
214 orr r9, r9, r10, lsl #24
216 #ifndef CONFIG_ZBOOT_ROM
217 /* malloc space is above the relocated stack (64k max) */
219 add r10, sp, #0x10000
222 * With ZBOOT_ROM the bss/stack is non relocatable,
223 * but someone could still run this code from RAM,
224 * in which case our reference is _edata.
229 mov r5, #0 @ init dtb size to 0
230 #ifdef CONFIG_ARM_APPENDED_DTB
235 * r4 = final kernel address (possibly with LSB set)
236 * r5 = appended dtb size (still unknown)
238 * r7 = architecture ID
239 * r8 = atags/device tree pointer
240 * r9 = size of decompressed image
241 * r10 = end of this image, including bss/stack/malloc space if non XIP
246 * if there are device trees (dtb) appended to zImage, advance r10 so that the
247 * dtb data will get relocated along with the kernel if necessary.
252 ldr r1, =0xedfe0dd0 @ sig is 0xd00dfeed big endian
257 bne dtb_check_done @ not found
259 #ifdef CONFIG_ARM_ATAG_DTB_COMPAT
261 * OK... Let's do some funky business here.
262 * If we do have a DTB appended to zImage, and we do have
263 * an ATAG list around, we want the later to be translated
264 * and folded into the former here. To be on the safe side,
265 * let's temporarily move the stack away into the malloc
266 * area. No GOT fixup has occurred yet, but none of the
267 * code we're about to call uses any global variable.
270 stmfd sp!, {r0-r3, ip, lr}
277 * If returned value is 1, there is no ATAG at the location
278 * pointed by r8. Try the typical 0x100 offset from start
279 * of RAM and hope for the best.
282 sub r0, r4, #TEXT_OFFSET
289 ldmfd sp!, {r0-r3, ip, lr}
293 mov r8, r6 @ use the appended device tree
296 * Make sure that the DTB doesn't end up in the final
297 * kernel's .bss area. To do so, we adjust the decompressed
298 * kernel size to compensate if that .bss size is larger
299 * than the relocated code.
301 ldr r5, =_kernel_bss_size
302 adr r1, wont_overwrite
307 /* Get the dtb's size */
310 /* convert r5 (dtb size) to little endian */
311 eor r1, r5, r5, ror #16
312 bic r1, r1, #0x00ff0000
314 eor r5, r5, r1, lsr #8
317 /* preserve 64-bit alignment */
321 /* relocate some pointers past the appended dtb */
329 * Check to see if we will overwrite ourselves.
330 * r4 = final kernel address (possibly with LSB set)
331 * r9 = size of decompressed image
332 * r10 = end of this image, including bss/stack/malloc space if non XIP
334 * r4 - 16k page directory >= r10 -> OK
335 * r4 + image length <= address of wont_overwrite -> OK
336 * Note: the possible LSB in r4 is harmless here.
342 adr r9, wont_overwrite
347 * Relocate ourselves past the end of the decompressed kernel.
349 * r10 = end of the decompressed kernel
350 * Because we always copy ahead, we need to do it from the end and go
351 * backward in case the source and destination overlap.
354 * Bump to the next 256-byte boundary with the size of
355 * the relocation code added. This avoids overwriting
356 * ourself when the offset is small.
358 add r10, r10, #((reloc_code_end - restart + 256) & ~255)
361 /* Get start of code we want to copy and align it down. */
365 /* Relocate the hyp vector base if necessary */
366 #ifdef CONFIG_ARM_VIRT_EXT
368 and r0, r0, #MODE_MASK
379 sub r9, r6, r5 @ size to copy
380 add r9, r9, #31 @ rounded up to a multiple
381 bic r9, r9, #31 @ ... of 32 bytes
385 1: ldmdb r6!, {r0 - r3, r10 - r12, lr}
387 stmdb r9!, {r0 - r3, r10 - r12, lr}
390 /* Preserve offset to relocated code. */
393 #ifndef CONFIG_ZBOOT_ROM
394 /* cache_clean_flush may use the stack, so relocate it */
399 bleq cache_clean_flush
401 adr r0, BSYM(restart)
407 * If delta is zero, we are running at the address we were linked at.
411 * r4 = kernel execution address (possibly with LSB set)
412 * r5 = appended dtb size (0 if not present)
413 * r7 = architecture ID
425 #ifndef CONFIG_ZBOOT_ROM
427 * If we're running fully PIC === CONFIG_ZBOOT_ROM = n,
428 * we need to fix up pointers into the BSS region.
429 * Note that the stack pointer has already been fixed up.
435 * Relocate all entries in the GOT table.
436 * Bump bss entries to _edata + dtb size
438 1: ldr r1, [r11, #0] @ relocate entries in the GOT
439 add r1, r1, r0 @ This fixes up C references
440 cmp r1, r2 @ if entry >= bss_start &&
441 cmphs r3, r1 @ bss_end > entry
442 addhi r1, r1, r5 @ entry += dtb size
443 str r1, [r11], #4 @ next entry
447 /* bump our bss pointers too */
454 * Relocate entries in the GOT table. We only relocate
455 * the entries that are outside the (relocated) BSS region.
457 1: ldr r1, [r11, #0] @ relocate entries in the GOT
458 cmp r1, r2 @ entry < bss_start ||
459 cmphs r3, r1 @ _end < entry
460 addlo r1, r1, r0 @ table. This fixes up the
461 str r1, [r11], #4 @ C references.
466 not_relocated: mov r0, #0
467 1: str r0, [r2], #4 @ clear bss
475 * Did we skip the cache setup earlier?
476 * That is indicated by the LSB in r4.
484 * The C runtime environment should now be setup sufficiently.
485 * Set up some pointers, and start decompressing.
486 * r4 = kernel execution address
487 * r7 = architecture ID
491 mov r1, sp @ malloc space above stack
492 add r2, sp, #0x10000 @ 64k max
497 mov r1, r7 @ restore architecture number
498 mov r2, r8 @ restore atags pointer
500 #ifdef CONFIG_ARM_VIRT_EXT
501 mrs r0, spsr @ Get saved CPU boot mode
502 and r0, r0, #MODE_MASK
503 cmp r0, #HYP_MODE @ if not booted in HYP mode...
504 bne __enter_kernel @ boot kernel directly
506 adr r12, .L__hyp_reentry_vectors_offset
511 __HVC(0) @ otherwise bounce to hyp mode
513 b . @ should never be reached
516 .L__hyp_reentry_vectors_offset: .long __hyp_reentry_vectors - .
524 .word __bss_start @ r2
527 .word input_data_end - 4 @ r10 (inflated size location)
528 .word _got_start @ r11
530 .word .L_user_stack_end @ sp
531 .word _end - restart + 16384 + 1024*1024
534 #ifdef CONFIG_ARCH_RPC
536 params: ldr r0, =0x10000100 @ params_phys for RPC
543 * Turn on the cache. We need to setup some page tables so that we
544 * can have both the I and D caches on.
546 * We place the page tables 16k down from the kernel execution address,
547 * and we hope that nothing else is using it. If we're using it, we
551 * r4 = kernel execution address
552 * r7 = architecture number
555 * r0, r1, r2, r3, r9, r10, r12 corrupted
556 * This routine must preserve:
560 cache_on: mov r3, #8 @ cache_on function
564 * Initialize the highest priority protection region, PR7
565 * to cover all 32bit address and cacheable and bufferable.
567 __armv4_mpu_cache_on:
568 mov r0, #0x3f @ 4G, the whole
569 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
570 mcr p15, 0, r0, c6, c7, 1
573 mcr p15, 0, r0, c2, c0, 0 @ D-cache on
574 mcr p15, 0, r0, c2, c0, 1 @ I-cache on
575 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
578 mcr p15, 0, r0, c5, c0, 1 @ I-access permission
579 mcr p15, 0, r0, c5, c0, 0 @ D-access permission
582 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
583 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
584 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
585 mrc p15, 0, r0, c1, c0, 0 @ read control reg
586 @ ...I .... ..D. WC.M
587 orr r0, r0, #0x002d @ .... .... ..1. 11.1
588 orr r0, r0, #0x1000 @ ...1 .... .... ....
590 mcr p15, 0, r0, c1, c0, 0 @ write control reg
593 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
594 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
597 __armv3_mpu_cache_on:
598 mov r0, #0x3f @ 4G, the whole
599 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
602 mcr p15, 0, r0, c2, c0, 0 @ cache on
603 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
606 mcr p15, 0, r0, c5, c0, 0 @ access permission
609 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
611 * ?? ARMv3 MMU does not allow reading the control register,
612 * does this really work on ARMv3 MPU?
614 mrc p15, 0, r0, c1, c0, 0 @ read control reg
615 @ .... .... .... WC.M
616 orr r0, r0, #0x000d @ .... .... .... 11.1
617 /* ?? this overwrites the value constructed above? */
619 mcr p15, 0, r0, c1, c0, 0 @ write control reg
621 /* ?? invalidate for the second time? */
622 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
625 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
631 __setup_mmu: sub r3, r4, #16384 @ Page directory size
632 bic r3, r3, #0xff @ Align the pointer
635 * Initialise the page tables, turning on the cacheable and bufferable
636 * bits for the RAM area only.
640 mov r9, r9, lsl #18 @ start of RAM
641 add r10, r9, #0x10000000 @ a reasonable RAM size
642 mov r1, #0x12 @ XN|U + section mapping
643 orr r1, r1, #3 << 10 @ AP=11
645 1: cmp r1, r9 @ if virt > start of RAM
646 cmphs r10, r1 @ && end of RAM > virt
647 bic r1, r1, #0x1c @ clear XN|U + C + B
648 orrlo r1, r1, #0x10 @ Set XN|U for non-RAM
649 orrhs r1, r1, r6 @ set RAM section settings
650 str r1, [r0], #4 @ 1:1 mapping
655 * If ever we are running from Flash, then we surely want the cache
656 * to be enabled also for our execution instance... We map 2MB of it
657 * so there is no map overlap problem for up to 1 MB compressed kernel.
658 * If the execution is in RAM then we would only be duplicating the above.
660 orr r1, r6, #0x04 @ ensure B is set for this
664 orr r1, r1, r2, lsl #20
665 add r0, r3, r2, lsl #2
672 @ Enable unaligned access on v6, to allow better code generation
673 @ for the decompressor C code:
674 __armv6_mmu_cache_on:
675 mrc p15, 0, r0, c1, c0, 0 @ read SCTLR
676 bic r0, r0, #2 @ A (no unaligned access fault)
677 orr r0, r0, #1 << 22 @ U (v6 unaligned access model)
678 mcr p15, 0, r0, c1, c0, 0 @ write SCTLR
679 b __armv4_mmu_cache_on
681 __arm926ejs_mmu_cache_on:
682 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
683 mov r0, #4 @ put dcache in WT mode
684 mcr p15, 7, r0, c15, c0, 0
687 __armv4_mmu_cache_on:
690 mov r6, #CB_BITS | 0x12 @ U
693 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
694 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
695 mrc p15, 0, r0, c1, c0, 0 @ read control reg
696 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
698 ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables
699 bl __common_mmu_cache_on
701 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
705 __armv7_mmu_cache_on:
708 mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0
710 movne r6, #CB_BITS | 0x02 @ !XN
713 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
715 mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
717 mrc p15, 0, r0, c1, c0, 0 @ read control reg
718 bic r0, r0, #1 << 28 @ clear SCTLR.TRE
719 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
720 orr r0, r0, #0x003c @ write buffer
721 bic r0, r0, #2 @ A (no unaligned access fault)
722 orr r0, r0, #1 << 22 @ U (v6 unaligned access model)
723 @ (needed for ARM1176)
725 ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables
726 mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg
727 orrne r0, r0, #1 @ MMU enabled
728 movne r1, #0xfffffffd @ domain 0 = client
729 bic r6, r6, #1 << 31 @ 32-bit translation system
730 bic r6, r6, #3 << 0 @ use only ttbr0
731 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
732 mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
733 mcrne p15, 0, r6, c2, c0, 2 @ load ttb control
735 mcr p15, 0, r0, c7, c5, 4 @ ISB
736 mcr p15, 0, r0, c1, c0, 0 @ load control register
737 mrc p15, 0, r0, c1, c0, 0 @ and read it back
739 mcr p15, 0, r0, c7, c5, 4 @ ISB
744 mov r6, #CB_BITS | 0x12 @ U
747 mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache
748 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
749 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
750 mrc p15, 0, r0, c1, c0, 0 @ read control reg
751 orr r0, r0, #0x1000 @ I-cache enable
752 bl __common_mmu_cache_on
754 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
757 __common_mmu_cache_on:
758 #ifndef CONFIG_THUMB2_KERNEL
760 orr r0, r0, #0x000d @ Write buffer, mmu
763 mcr p15, 0, r3, c2, c0, 0 @ load page table pointer
764 mcr p15, 0, r1, c3, c0, 0 @ load domain access control
766 .align 5 @ cache line aligned
767 1: mcr p15, 0, r0, c1, c0, 0 @ load control register
768 mrc p15, 0, r0, c1, c0, 0 @ and read it back to
769 sub pc, lr, r0, lsr #32 @ properly flush pipeline
772 #define PROC_ENTRY_SIZE (4*5)
775 * Here follow the relocatable cache support functions for the
776 * various processors. This is a generic hook for locating an
777 * entry and jumping to an instruction at the specified offset
778 * from the start of the block. Please note this is all position
788 call_cache_fn: adr r12, proc_types
789 #ifdef CONFIG_CPU_CP15
790 mrc p15, 0, r9, c0, c0 @ get processor ID
792 ldr r9, =CONFIG_PROCESSOR_ID
794 1: ldr r1, [r12, #0] @ get value
795 ldr r2, [r12, #4] @ get mask
796 eor r1, r1, r9 @ (real ^ match)
798 ARM( addeq pc, r12, r3 ) @ call cache function
799 THUMB( addeq r12, r3 )
800 THUMB( moveq pc, r12 ) @ call cache function
801 add r12, r12, #PROC_ENTRY_SIZE
805 * Table for cache operations. This is basically:
808 * - 'cache on' method instruction
809 * - 'cache off' method instruction
810 * - 'cache flush' method instruction
812 * We match an entry using: ((real_id ^ match) & mask) == 0
814 * Writethrough caches generally only need 'on' and 'off'
815 * methods. Writeback caches _must_ have the flush method
819 .type proc_types,#object
821 .word 0x41000000 @ old ARM ID
830 .word 0x41007000 @ ARM7/710
839 .word 0x41807200 @ ARM720T (writethrough)
841 W(b) __armv4_mmu_cache_on
842 W(b) __armv4_mmu_cache_off
846 .word 0x41007400 @ ARM74x
848 W(b) __armv3_mpu_cache_on
849 W(b) __armv3_mpu_cache_off
850 W(b) __armv3_mpu_cache_flush
852 .word 0x41009400 @ ARM94x
854 W(b) __armv4_mpu_cache_on
855 W(b) __armv4_mpu_cache_off
856 W(b) __armv4_mpu_cache_flush
858 .word 0x41069260 @ ARM926EJ-S (v5TEJ)
860 W(b) __arm926ejs_mmu_cache_on
861 W(b) __armv4_mmu_cache_off
862 W(b) __armv5tej_mmu_cache_flush
864 .word 0x00007000 @ ARM7 IDs
873 @ Everything from here on will be the new ID system.
875 .word 0x4401a100 @ sa110 / sa1100
877 W(b) __armv4_mmu_cache_on
878 W(b) __armv4_mmu_cache_off
879 W(b) __armv4_mmu_cache_flush
881 .word 0x6901b110 @ sa1110
883 W(b) __armv4_mmu_cache_on
884 W(b) __armv4_mmu_cache_off
885 W(b) __armv4_mmu_cache_flush
888 .word 0xffffff00 @ PXA9xx
889 W(b) __armv4_mmu_cache_on
890 W(b) __armv4_mmu_cache_off
891 W(b) __armv4_mmu_cache_flush
893 .word 0x56158000 @ PXA168
895 W(b) __armv4_mmu_cache_on
896 W(b) __armv4_mmu_cache_off
897 W(b) __armv5tej_mmu_cache_flush
899 .word 0x56050000 @ Feroceon
901 W(b) __armv4_mmu_cache_on
902 W(b) __armv4_mmu_cache_off
903 W(b) __armv5tej_mmu_cache_flush
905 #ifdef CONFIG_CPU_FEROCEON_OLD_ID
906 /* this conflicts with the standard ARMv5TE entry */
907 .long 0x41009260 @ Old Feroceon
909 b __armv4_mmu_cache_on
910 b __armv4_mmu_cache_off
911 b __armv5tej_mmu_cache_flush
914 .word 0x66015261 @ FA526
916 W(b) __fa526_cache_on
917 W(b) __armv4_mmu_cache_off
918 W(b) __fa526_cache_flush
920 @ These match on the architecture ID
922 .word 0x00020000 @ ARMv4T
924 W(b) __armv4_mmu_cache_on
925 W(b) __armv4_mmu_cache_off
926 W(b) __armv4_mmu_cache_flush
928 .word 0x00050000 @ ARMv5TE
930 W(b) __armv4_mmu_cache_on
931 W(b) __armv4_mmu_cache_off
932 W(b) __armv4_mmu_cache_flush
934 .word 0x00060000 @ ARMv5TEJ
936 W(b) __armv4_mmu_cache_on
937 W(b) __armv4_mmu_cache_off
938 W(b) __armv5tej_mmu_cache_flush
940 .word 0x0007b000 @ ARMv6
942 W(b) __armv6_mmu_cache_on
943 W(b) __armv4_mmu_cache_off
944 W(b) __armv6_mmu_cache_flush
946 .word 0x000f0000 @ new CPU Id
948 W(b) __armv7_mmu_cache_on
949 W(b) __armv7_mmu_cache_off
950 W(b) __armv7_mmu_cache_flush
952 .word 0 @ unrecognised type
961 .size proc_types, . - proc_types
964 * If you get a "non-constant expression in ".if" statement"
965 * error from the assembler on this line, check that you have
966 * not accidentally written a "b" instruction where you should
969 .if (. - proc_types) % PROC_ENTRY_SIZE != 0
970 .error "The size of one or more proc_types entries is wrong."
974 * Turn off the Cache and MMU. ARMv3 does not support
975 * reading the control register, but ARMv4 does.
978 * r0, r1, r2, r3, r9, r12 corrupted
979 * This routine must preserve:
983 cache_off: mov r3, #12 @ cache_off function
986 __armv4_mpu_cache_off:
987 mrc p15, 0, r0, c1, c0
989 mcr p15, 0, r0, c1, c0 @ turn MPU and cache off
991 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
992 mcr p15, 0, r0, c7, c6, 0 @ flush D-Cache
993 mcr p15, 0, r0, c7, c5, 0 @ flush I-Cache
996 __armv3_mpu_cache_off:
997 mrc p15, 0, r0, c1, c0
999 mcr p15, 0, r0, c1, c0, 0 @ turn MPU and cache off
1001 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
1004 __armv4_mmu_cache_off:
1006 mrc p15, 0, r0, c1, c0
1008 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
1010 mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4
1011 mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4
1015 __armv7_mmu_cache_off:
1016 mrc p15, 0, r0, c1, c0
1022 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
1024 bl __armv7_mmu_cache_flush
1027 mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB
1029 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC
1030 mcr p15, 0, r0, c7, c10, 4 @ DSB
1031 mcr p15, 0, r0, c7, c5, 4 @ ISB
1035 * Clean and flush the cache to maintain consistency.
1038 * r1, r2, r3, r9, r10, r11, r12 corrupted
1039 * This routine must preserve:
1047 __armv4_mpu_cache_flush:
1050 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
1051 mov r1, #7 << 5 @ 8 segments
1052 1: orr r3, r1, #63 << 26 @ 64 entries
1053 2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index
1054 subs r3, r3, #1 << 26
1055 bcs 2b @ entries 63 to 0
1056 subs r1, r1, #1 << 5
1057 bcs 1b @ segments 7 to 0
1060 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
1061 mcr p15, 0, ip, c7, c10, 4 @ drain WB
1064 __fa526_cache_flush:
1066 mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache
1067 mcr p15, 0, r1, c7, c5, 0 @ flush I cache
1068 mcr p15, 0, r1, c7, c10, 4 @ drain WB
1071 __armv6_mmu_cache_flush:
1073 mcr p15, 0, r1, c7, c14, 0 @ clean+invalidate D
1074 mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB
1075 mcr p15, 0, r1, c7, c15, 0 @ clean+invalidate unified
1076 mcr p15, 0, r1, c7, c10, 4 @ drain WB
1079 __armv7_mmu_cache_flush:
1080 mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1
1081 tst r10, #0xf << 16 @ hierarchical cache (ARMv7)
1084 mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D
1087 mcr p15, 0, r10, c7, c10, 5 @ DMB
1088 stmfd sp!, {r0-r7, r9-r11}
1089 mrc p15, 1, r0, c0, c0, 1 @ read clidr
1090 ands r3, r0, #0x7000000 @ extract loc from clidr
1091 mov r3, r3, lsr #23 @ left align loc bit field
1092 beq finished @ if loc is 0, then no need to clean
1093 mov r10, #0 @ start clean at cache level 0
1095 add r2, r10, r10, lsr #1 @ work out 3x current cache level
1096 mov r1, r0, lsr r2 @ extract cache type bits from clidr
1097 and r1, r1, #7 @ mask of the bits for current cache only
1098 cmp r1, #2 @ see what cache we have at this level
1099 blt skip @ skip if no cache, or just i-cache
1100 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
1101 mcr p15, 0, r10, c7, c5, 4 @ isb to sych the new cssr&csidr
1102 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
1103 and r2, r1, #7 @ extract the length of the cache lines
1104 add r2, r2, #4 @ add 4 (line length offset)
1106 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
1107 clz r5, r4 @ find bit position of way size increment
1109 ands r7, r7, r1, lsr #13 @ extract max number of the index size
1111 mov r9, r4 @ create working copy of max way size
1113 ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
1114 ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
1115 THUMB( lsl r6, r9, r5 )
1116 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
1117 THUMB( lsl r6, r7, r2 )
1118 THUMB( orr r11, r11, r6 ) @ factor index number into r11
1119 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
1120 subs r9, r9, #1 @ decrement the way
1122 subs r7, r7, #1 @ decrement the index
1125 add r10, r10, #2 @ increment cache number
1129 ldmfd sp!, {r0-r7, r9-r11}
1130 mov r10, #0 @ swith back to cache level 0
1131 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
1133 mcr p15, 0, r10, c7, c10, 4 @ DSB
1134 mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB
1135 mcr p15, 0, r10, c7, c10, 4 @ DSB
1136 mcr p15, 0, r10, c7, c5, 4 @ ISB
1139 __armv5tej_mmu_cache_flush:
1140 1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache
1142 mcr p15, 0, r0, c7, c5, 0 @ flush I cache
1143 mcr p15, 0, r0, c7, c10, 4 @ drain WB
1146 __armv4_mmu_cache_flush:
1147 mov r2, #64*1024 @ default: 32K dcache size (*2)
1148 mov r11, #32 @ default: 32 byte line size
1149 mrc p15, 0, r3, c0, c0, 1 @ read cache type
1150 teq r3, r9 @ cache ID register present?
1155 mov r2, r2, lsl r1 @ base dcache size *2
1156 tst r3, #1 << 14 @ test M bit
1157 addne r2, r2, r2, lsr #1 @ +1/2 size if M == 1
1161 mov r11, r11, lsl r3 @ cache line size in bytes
1164 bic r1, r1, #63 @ align to longest cache line
1167 ARM( ldr r3, [r1], r11 ) @ s/w flush D cache
1168 THUMB( ldr r3, [r1] ) @ s/w flush D cache
1169 THUMB( add r1, r1, r11 )
1173 mcr p15, 0, r1, c7, c5, 0 @ flush I cache
1174 mcr p15, 0, r1, c7, c6, 0 @ flush D cache
1175 mcr p15, 0, r1, c7, c10, 4 @ drain WB
1178 __armv3_mmu_cache_flush:
1179 __armv3_mpu_cache_flush:
1181 mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3
1185 * Various debugging routines for printing hex characters and
1186 * memory, which again must be relocatable.
1190 .type phexbuf,#object
1192 .size phexbuf, . - phexbuf
1194 @ phex corrupts {r0, r1, r2, r3}
1195 phex: adr r3, phexbuf
1209 @ puts corrupts {r0, r1, r2, r3}
1211 1: ldrb r2, [r0], #1
1224 @ putc corrupts {r0, r1, r2, r3}
1231 @ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr}
1232 memdump: mov r12, r0
1235 2: mov r0, r11, lsl #2
1243 ldr r0, [r12, r11, lsl #2]
1263 #ifdef CONFIG_ARM_VIRT_EXT
1265 __hyp_reentry_vectors:
1271 W(b) __enter_kernel @ hyp
1274 #endif /* CONFIG_ARM_VIRT_EXT */
1277 mov r0, #0 @ must be 0
1278 ARM( mov pc, r4 ) @ call kernel
1279 THUMB( bx r4 ) @ entry point is always ARM
1284 .section ".stack", "aw", %nobits
1285 .L_user_stack: .space 4096