2 * linux/arch/arm/boot/compressed/head.S
4 * Copyright (C) 1996-2002 Russell King
5 * Copyright (C) 2004 Hyok S. Choi (MPU support)
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/linkage.h>
12 #include <asm/assembler.h>
18 * Note that these macros must not contain any code which is not
19 * 100% relocatable. Any attempt to do so will result in a crash.
20 * Please select one of the following when turning on debugging.
24 #if defined(CONFIG_DEBUG_ICEDCC)
26 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7)
27 .macro loadsp, rb, tmp
30 mcr p14, 0, \ch, c0, c5, 0
32 #elif defined(CONFIG_CPU_XSCALE)
33 .macro loadsp, rb, tmp
36 mcr p14, 0, \ch, c8, c0, 0
39 .macro loadsp, rb, tmp
42 mcr p14, 0, \ch, c1, c0, 0
48 #include CONFIG_DEBUG_LL_INCLUDE
54 #if defined(CONFIG_ARCH_SA1100)
55 .macro loadsp, rb, tmp
56 mov \rb, #0x80000000 @ physical base address
57 #ifdef CONFIG_DEBUG_LL_SER3
58 add \rb, \rb, #0x00050000 @ Ser3
60 add \rb, \rb, #0x00010000 @ Ser1
64 .macro loadsp, rb, tmp
82 .macro debug_reloc_start
85 kphex r6, 8 /* processor id */
87 kphex r7, 8 /* architecture id */
88 #ifdef CONFIG_CPU_CP15
90 mrc p15, 0, r0, c1, c0
91 kphex r0, 8 /* control reg */
94 kphex r5, 8 /* decompressed kernel start */
96 kphex r9, 8 /* decompressed kernel end */
98 kphex r4, 8 /* kernel execution address */
103 .macro debug_reloc_end
105 kphex r5, 8 /* end of kernel */
108 bl memdump /* dump 256 bytes at start of kernel */
112 .section ".start", #alloc, #execinstr
114 * sort out different calling conventions
117 .arm @ Always enter in ARM state
119 .type start,#function
125 THUMB( adr r12, BSYM(1f) )
128 .word _magic_sig @ Magic numbers to help the loader
129 .word _magic_start @ absolute load/run zImage address
130 .word _magic_end @ zImage end address
131 .word 0x04030201 @ endianness flag
135 ARM_BE8( setend be ) @ go BE8 if compiled for BE8
137 #ifdef CONFIG_ARM_VIRT_EXT
138 bl __hyp_stub_install @ get into SVC mode, reversibly
140 mov r7, r1 @ save architecture ID
141 mov r8, r2 @ save atags pointer
144 * Booting from Angel - need to enter SVC mode and disable
145 * FIQs/IRQs (numeric definitions from angel arm.h source).
146 * We only do this if we were in user mode on entry.
148 mrs r2, cpsr @ get current mode
149 tst r2, #3 @ not user?
151 mov r0, #0x17 @ angel_SWIreason_EnterSVC
152 ARM( swi 0x123456 ) @ angel_SWI_ARM
153 THUMB( svc 0xab ) @ angel_SWI_THUMB
155 safe_svcmode_maskall r0
156 msr spsr_cxsf, r9 @ Save the CPU boot mode in
159 * Note that some cache flushing and other stuff may
160 * be needed here - is there an Angel SWI call for this?
164 * some architecture specific code can be inserted
165 * by the linker here, but it should preserve r7, r8, and r9.
170 #ifdef CONFIG_AUTO_ZRELADDR
171 @ determine final kernel image address
173 and r4, r4, #0xf8000000
174 add r4, r4, #TEXT_OFFSET
180 * Set up a page table only if it won't overwrite ourself.
181 * That means r4 < pc && r4 - 16k page directory > &_end.
182 * Given that r4 > &_end is most unfrequent, we add a rough
183 * additional 1MB of room for a possible appended DTB.
190 orrcc r4, r4, #1 @ remember we skipped cache_on
194 ldmia r0, {r1, r2, r3, r6, r10, r11, r12}
198 * We might be running at a different address. We need
199 * to fix up various pointers.
201 sub r0, r0, r1 @ calculate the delta offset
202 add r6, r6, r0 @ _edata
203 add r10, r10, r0 @ inflated kernel size location
206 * The kernel build system appends the size of the
207 * decompressed kernel at the end of the compressed data
208 * in little-endian form.
212 orr r9, r9, lr, lsl #8
215 orr r9, r9, lr, lsl #16
216 orr r9, r9, r10, lsl #24
218 #ifndef CONFIG_ZBOOT_ROM
219 /* malloc space is above the relocated stack (64k max) */
221 add r10, sp, #0x10000
224 * With ZBOOT_ROM the bss/stack is non relocatable,
225 * but someone could still run this code from RAM,
226 * in which case our reference is _edata.
231 mov r5, #0 @ init dtb size to 0
232 #ifdef CONFIG_ARM_APPENDED_DTB
237 * r4 = final kernel address (possibly with LSB set)
238 * r5 = appended dtb size (still unknown)
240 * r7 = architecture ID
241 * r8 = atags/device tree pointer
242 * r9 = size of decompressed image
243 * r10 = end of this image, including bss/stack/malloc space if non XIP
248 * if there are device trees (dtb) appended to zImage, advance r10 so that the
249 * dtb data will get relocated along with the kernel if necessary.
254 ldr r1, =0xedfe0dd0 @ sig is 0xd00dfeed big endian
259 bne dtb_check_done @ not found
261 #ifdef CONFIG_ARM_ATAG_DTB_COMPAT
263 * OK... Let's do some funky business here.
264 * If we do have a DTB appended to zImage, and we do have
265 * an ATAG list around, we want the later to be translated
266 * and folded into the former here. No GOT fixup has occurred
267 * yet, but none of the code we're about to call uses any
271 /* Get the initial DTB size */
274 /* convert to little endian */
275 eor r1, r5, r5, ror #16
276 bic r1, r1, #0x00ff0000
278 eor r5, r5, r1, lsr #8
280 /* 50% DTB growth should be good enough */
281 add r5, r5, r5, lsr #1
282 /* preserve 64-bit alignment */
285 /* clamp to 32KB min and 1MB max */
290 /* temporarily relocate the stack past the DTB work space */
293 stmfd sp!, {r0-r3, ip, lr}
300 * If returned value is 1, there is no ATAG at the location
301 * pointed by r8. Try the typical 0x100 offset from start
302 * of RAM and hope for the best.
305 sub r0, r4, #TEXT_OFFSET
312 ldmfd sp!, {r0-r3, ip, lr}
316 mov r8, r6 @ use the appended device tree
319 * Make sure that the DTB doesn't end up in the final
320 * kernel's .bss area. To do so, we adjust the decompressed
321 * kernel size to compensate if that .bss size is larger
322 * than the relocated code.
324 ldr r5, =_kernel_bss_size
325 adr r1, wont_overwrite
330 /* Get the current DTB size */
333 /* convert r5 (dtb size) to little endian */
334 eor r1, r5, r5, ror #16
335 bic r1, r1, #0x00ff0000
337 eor r5, r5, r1, lsr #8
340 /* preserve 64-bit alignment */
344 /* relocate some pointers past the appended dtb */
352 * Check to see if we will overwrite ourselves.
353 * r4 = final kernel address (possibly with LSB set)
354 * r9 = size of decompressed image
355 * r10 = end of this image, including bss/stack/malloc space if non XIP
357 * r4 - 16k page directory >= r10 -> OK
358 * r4 + image length <= address of wont_overwrite -> OK
359 * Note: the possible LSB in r4 is harmless here.
365 adr r9, wont_overwrite
370 * Relocate ourselves past the end of the decompressed kernel.
372 * r10 = end of the decompressed kernel
373 * Because we always copy ahead, we need to do it from the end and go
374 * backward in case the source and destination overlap.
377 * Bump to the next 256-byte boundary with the size of
378 * the relocation code added. This avoids overwriting
379 * ourself when the offset is small.
381 add r10, r10, #((reloc_code_end - restart + 256) & ~255)
384 /* Get start of code we want to copy and align it down. */
388 /* Relocate the hyp vector base if necessary */
389 #ifdef CONFIG_ARM_VIRT_EXT
391 and r0, r0, #MODE_MASK
402 sub r9, r6, r5 @ size to copy
403 add r9, r9, #31 @ rounded up to a multiple
404 bic r9, r9, #31 @ ... of 32 bytes
408 1: ldmdb r6!, {r0 - r3, r10 - r12, lr}
410 stmdb r9!, {r0 - r3, r10 - r12, lr}
413 /* Preserve offset to relocated code. */
416 #ifndef CONFIG_ZBOOT_ROM
417 /* cache_clean_flush may use the stack, so relocate it */
423 adr r0, BSYM(restart)
429 * If delta is zero, we are running at the address we were linked at.
433 * r4 = kernel execution address (possibly with LSB set)
434 * r5 = appended dtb size (0 if not present)
435 * r7 = architecture ID
447 #ifndef CONFIG_ZBOOT_ROM
449 * If we're running fully PIC === CONFIG_ZBOOT_ROM = n,
450 * we need to fix up pointers into the BSS region.
451 * Note that the stack pointer has already been fixed up.
457 * Relocate all entries in the GOT table.
458 * Bump bss entries to _edata + dtb size
460 1: ldr r1, [r11, #0] @ relocate entries in the GOT
461 add r1, r1, r0 @ This fixes up C references
462 cmp r1, r2 @ if entry >= bss_start &&
463 cmphs r3, r1 @ bss_end > entry
464 addhi r1, r1, r5 @ entry += dtb size
465 str r1, [r11], #4 @ next entry
469 /* bump our bss pointers too */
476 * Relocate entries in the GOT table. We only relocate
477 * the entries that are outside the (relocated) BSS region.
479 1: ldr r1, [r11, #0] @ relocate entries in the GOT
480 cmp r1, r2 @ entry < bss_start ||
481 cmphs r3, r1 @ _end < entry
482 addlo r1, r1, r0 @ table. This fixes up the
483 str r1, [r11], #4 @ C references.
488 not_relocated: mov r0, #0
489 1: str r0, [r2], #4 @ clear bss
497 * Did we skip the cache setup earlier?
498 * That is indicated by the LSB in r4.
506 * The C runtime environment should now be setup sufficiently.
507 * Set up some pointers, and start decompressing.
508 * r4 = kernel execution address
509 * r7 = architecture ID
513 mov r1, sp @ malloc space above stack
514 add r2, sp, #0x10000 @ 64k max
519 mov r1, r7 @ restore architecture number
520 mov r2, r8 @ restore atags pointer
522 #ifdef CONFIG_ARM_VIRT_EXT
523 mrs r0, spsr @ Get saved CPU boot mode
524 and r0, r0, #MODE_MASK
525 cmp r0, #HYP_MODE @ if not booted in HYP mode...
526 bne __enter_kernel @ boot kernel directly
528 adr r12, .L__hyp_reentry_vectors_offset
533 __HVC(0) @ otherwise bounce to hyp mode
535 b . @ should never be reached
538 .L__hyp_reentry_vectors_offset: .long __hyp_reentry_vectors - .
546 .word __bss_start @ r2
549 .word input_data_end - 4 @ r10 (inflated size location)
550 .word _got_start @ r11
552 .word .L_user_stack_end @ sp
553 .word _end - restart + 16384 + 1024*1024
556 #ifdef CONFIG_ARCH_RPC
558 params: ldr r0, =0x10000100 @ params_phys for RPC
565 * Turn on the cache. We need to setup some page tables so that we
566 * can have both the I and D caches on.
568 * We place the page tables 16k down from the kernel execution address,
569 * and we hope that nothing else is using it. If we're using it, we
573 * r4 = kernel execution address
574 * r7 = architecture number
577 * r0, r1, r2, r3, r9, r10, r12 corrupted
578 * This routine must preserve:
582 cache_on: mov r3, #8 @ cache_on function
586 * Initialize the highest priority protection region, PR7
587 * to cover all 32bit address and cacheable and bufferable.
589 __armv4_mpu_cache_on:
590 mov r0, #0x3f @ 4G, the whole
591 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
592 mcr p15, 0, r0, c6, c7, 1
595 mcr p15, 0, r0, c2, c0, 0 @ D-cache on
596 mcr p15, 0, r0, c2, c0, 1 @ I-cache on
597 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
600 mcr p15, 0, r0, c5, c0, 1 @ I-access permission
601 mcr p15, 0, r0, c5, c0, 0 @ D-access permission
604 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
605 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
606 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
607 mrc p15, 0, r0, c1, c0, 0 @ read control reg
608 @ ...I .... ..D. WC.M
609 orr r0, r0, #0x002d @ .... .... ..1. 11.1
610 orr r0, r0, #0x1000 @ ...1 .... .... ....
612 mcr p15, 0, r0, c1, c0, 0 @ write control reg
615 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
616 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
619 __armv3_mpu_cache_on:
620 mov r0, #0x3f @ 4G, the whole
621 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
624 mcr p15, 0, r0, c2, c0, 0 @ cache on
625 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
628 mcr p15, 0, r0, c5, c0, 0 @ access permission
631 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
633 * ?? ARMv3 MMU does not allow reading the control register,
634 * does this really work on ARMv3 MPU?
636 mrc p15, 0, r0, c1, c0, 0 @ read control reg
637 @ .... .... .... WC.M
638 orr r0, r0, #0x000d @ .... .... .... 11.1
639 /* ?? this overwrites the value constructed above? */
641 mcr p15, 0, r0, c1, c0, 0 @ write control reg
643 /* ?? invalidate for the second time? */
644 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
647 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
653 __setup_mmu: sub r3, r4, #16384 @ Page directory size
654 bic r3, r3, #0xff @ Align the pointer
657 * Initialise the page tables, turning on the cacheable and bufferable
658 * bits for the RAM area only.
662 mov r9, r9, lsl #18 @ start of RAM
663 add r10, r9, #0x10000000 @ a reasonable RAM size
664 mov r1, #0x12 @ XN|U + section mapping
665 orr r1, r1, #3 << 10 @ AP=11
667 1: cmp r1, r9 @ if virt > start of RAM
668 cmphs r10, r1 @ && end of RAM > virt
669 bic r1, r1, #0x1c @ clear XN|U + C + B
670 orrlo r1, r1, #0x10 @ Set XN|U for non-RAM
671 orrhs r1, r1, r6 @ set RAM section settings
672 str r1, [r0], #4 @ 1:1 mapping
677 * If ever we are running from Flash, then we surely want the cache
678 * to be enabled also for our execution instance... We map 2MB of it
679 * so there is no map overlap problem for up to 1 MB compressed kernel.
680 * If the execution is in RAM then we would only be duplicating the above.
682 orr r1, r6, #0x04 @ ensure B is set for this
686 orr r1, r1, r2, lsl #20
687 add r0, r3, r2, lsl #2
694 @ Enable unaligned access on v6, to allow better code generation
695 @ for the decompressor C code:
696 __armv6_mmu_cache_on:
697 mrc p15, 0, r0, c1, c0, 0 @ read SCTLR
698 bic r0, r0, #2 @ A (no unaligned access fault)
699 orr r0, r0, #1 << 22 @ U (v6 unaligned access model)
700 mcr p15, 0, r0, c1, c0, 0 @ write SCTLR
701 b __armv4_mmu_cache_on
703 __arm926ejs_mmu_cache_on:
704 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
705 mov r0, #4 @ put dcache in WT mode
706 mcr p15, 7, r0, c15, c0, 0
709 __armv4_mmu_cache_on:
712 mov r6, #CB_BITS | 0x12 @ U
715 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
716 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
717 mrc p15, 0, r0, c1, c0, 0 @ read control reg
718 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
720 ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables
721 bl __common_mmu_cache_on
723 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
727 __armv7_mmu_cache_on:
730 mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0
732 movne r6, #CB_BITS | 0x02 @ !XN
735 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
737 mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
739 mrc p15, 0, r0, c1, c0, 0 @ read control reg
740 bic r0, r0, #1 << 28 @ clear SCTLR.TRE
741 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
742 orr r0, r0, #0x003c @ write buffer
743 bic r0, r0, #2 @ A (no unaligned access fault)
744 orr r0, r0, #1 << 22 @ U (v6 unaligned access model)
745 @ (needed for ARM1176)
747 ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables
748 mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg
749 orrne r0, r0, #1 @ MMU enabled
750 movne r1, #0xfffffffd @ domain 0 = client
751 bic r6, r6, #1 << 31 @ 32-bit translation system
752 bic r6, r6, #3 << 0 @ use only ttbr0
753 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
754 mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
755 mcrne p15, 0, r6, c2, c0, 2 @ load ttb control
757 mcr p15, 0, r0, c7, c5, 4 @ ISB
758 mcr p15, 0, r0, c1, c0, 0 @ load control register
759 mrc p15, 0, r0, c1, c0, 0 @ and read it back
761 mcr p15, 0, r0, c7, c5, 4 @ ISB
766 mov r6, #CB_BITS | 0x12 @ U
769 mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache
770 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
771 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
772 mrc p15, 0, r0, c1, c0, 0 @ read control reg
773 orr r0, r0, #0x1000 @ I-cache enable
774 bl __common_mmu_cache_on
776 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
779 __common_mmu_cache_on:
780 #ifndef CONFIG_THUMB2_KERNEL
782 orr r0, r0, #0x000d @ Write buffer, mmu
785 mcr p15, 0, r3, c2, c0, 0 @ load page table pointer
786 mcr p15, 0, r1, c3, c0, 0 @ load domain access control
788 .align 5 @ cache line aligned
789 1: mcr p15, 0, r0, c1, c0, 0 @ load control register
790 mrc p15, 0, r0, c1, c0, 0 @ and read it back to
791 sub pc, lr, r0, lsr #32 @ properly flush pipeline
794 #define PROC_ENTRY_SIZE (4*5)
797 * Here follow the relocatable cache support functions for the
798 * various processors. This is a generic hook for locating an
799 * entry and jumping to an instruction at the specified offset
800 * from the start of the block. Please note this is all position
810 call_cache_fn: adr r12, proc_types
811 #ifdef CONFIG_CPU_CP15
812 mrc p15, 0, r9, c0, c0 @ get processor ID
814 ldr r9, =CONFIG_PROCESSOR_ID
816 1: ldr r1, [r12, #0] @ get value
817 ldr r2, [r12, #4] @ get mask
818 eor r1, r1, r9 @ (real ^ match)
820 ARM( addeq pc, r12, r3 ) @ call cache function
821 THUMB( addeq r12, r3 )
822 THUMB( moveq pc, r12 ) @ call cache function
823 add r12, r12, #PROC_ENTRY_SIZE
827 * Table for cache operations. This is basically:
830 * - 'cache on' method instruction
831 * - 'cache off' method instruction
832 * - 'cache flush' method instruction
834 * We match an entry using: ((real_id ^ match) & mask) == 0
836 * Writethrough caches generally only need 'on' and 'off'
837 * methods. Writeback caches _must_ have the flush method
841 .type proc_types,#object
843 .word 0x41000000 @ old ARM ID
852 .word 0x41007000 @ ARM7/710
861 .word 0x41807200 @ ARM720T (writethrough)
863 W(b) __armv4_mmu_cache_on
864 W(b) __armv4_mmu_cache_off
868 .word 0x41007400 @ ARM74x
870 W(b) __armv3_mpu_cache_on
871 W(b) __armv3_mpu_cache_off
872 W(b) __armv3_mpu_cache_flush
874 .word 0x41009400 @ ARM94x
876 W(b) __armv4_mpu_cache_on
877 W(b) __armv4_mpu_cache_off
878 W(b) __armv4_mpu_cache_flush
880 .word 0x41069260 @ ARM926EJ-S (v5TEJ)
882 W(b) __arm926ejs_mmu_cache_on
883 W(b) __armv4_mmu_cache_off
884 W(b) __armv5tej_mmu_cache_flush
886 .word 0x00007000 @ ARM7 IDs
895 @ Everything from here on will be the new ID system.
897 .word 0x4401a100 @ sa110 / sa1100
899 W(b) __armv4_mmu_cache_on
900 W(b) __armv4_mmu_cache_off
901 W(b) __armv4_mmu_cache_flush
903 .word 0x6901b110 @ sa1110
905 W(b) __armv4_mmu_cache_on
906 W(b) __armv4_mmu_cache_off
907 W(b) __armv4_mmu_cache_flush
910 .word 0xffffff00 @ PXA9xx
911 W(b) __armv4_mmu_cache_on
912 W(b) __armv4_mmu_cache_off
913 W(b) __armv4_mmu_cache_flush
915 .word 0x56158000 @ PXA168
917 W(b) __armv4_mmu_cache_on
918 W(b) __armv4_mmu_cache_off
919 W(b) __armv5tej_mmu_cache_flush
921 .word 0x56050000 @ Feroceon
923 W(b) __armv4_mmu_cache_on
924 W(b) __armv4_mmu_cache_off
925 W(b) __armv5tej_mmu_cache_flush
927 #ifdef CONFIG_CPU_FEROCEON_OLD_ID
928 /* this conflicts with the standard ARMv5TE entry */
929 .long 0x41009260 @ Old Feroceon
931 b __armv4_mmu_cache_on
932 b __armv4_mmu_cache_off
933 b __armv5tej_mmu_cache_flush
936 .word 0x66015261 @ FA526
938 W(b) __fa526_cache_on
939 W(b) __armv4_mmu_cache_off
940 W(b) __fa526_cache_flush
942 @ These match on the architecture ID
944 .word 0x00020000 @ ARMv4T
946 W(b) __armv4_mmu_cache_on
947 W(b) __armv4_mmu_cache_off
948 W(b) __armv4_mmu_cache_flush
950 .word 0x00050000 @ ARMv5TE
952 W(b) __armv4_mmu_cache_on
953 W(b) __armv4_mmu_cache_off
954 W(b) __armv4_mmu_cache_flush
956 .word 0x00060000 @ ARMv5TEJ
958 W(b) __armv4_mmu_cache_on
959 W(b) __armv4_mmu_cache_off
960 W(b) __armv5tej_mmu_cache_flush
962 .word 0x0007b000 @ ARMv6
964 W(b) __armv6_mmu_cache_on
965 W(b) __armv4_mmu_cache_off
966 W(b) __armv6_mmu_cache_flush
968 .word 0x000f0000 @ new CPU Id
970 W(b) __armv7_mmu_cache_on
971 W(b) __armv7_mmu_cache_off
972 W(b) __armv7_mmu_cache_flush
974 .word 0 @ unrecognised type
983 .size proc_types, . - proc_types
986 * If you get a "non-constant expression in ".if" statement"
987 * error from the assembler on this line, check that you have
988 * not accidentally written a "b" instruction where you should
991 .if (. - proc_types) % PROC_ENTRY_SIZE != 0
992 .error "The size of one or more proc_types entries is wrong."
996 * Turn off the Cache and MMU. ARMv3 does not support
997 * reading the control register, but ARMv4 does.
1000 * r0, r1, r2, r3, r9, r12 corrupted
1001 * This routine must preserve:
1005 cache_off: mov r3, #12 @ cache_off function
1008 __armv4_mpu_cache_off:
1009 mrc p15, 0, r0, c1, c0
1011 mcr p15, 0, r0, c1, c0 @ turn MPU and cache off
1013 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
1014 mcr p15, 0, r0, c7, c6, 0 @ flush D-Cache
1015 mcr p15, 0, r0, c7, c5, 0 @ flush I-Cache
1018 __armv3_mpu_cache_off:
1019 mrc p15, 0, r0, c1, c0
1021 mcr p15, 0, r0, c1, c0, 0 @ turn MPU and cache off
1023 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
1026 __armv4_mmu_cache_off:
1028 mrc p15, 0, r0, c1, c0
1030 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
1032 mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4
1033 mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4
1037 __armv7_mmu_cache_off:
1038 mrc p15, 0, r0, c1, c0
1044 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
1046 bl __armv7_mmu_cache_flush
1049 mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB
1051 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC
1052 mcr p15, 0, r0, c7, c10, 4 @ DSB
1053 mcr p15, 0, r0, c7, c5, 4 @ ISB
1057 * Clean and flush the cache to maintain consistency.
1060 * r1, r2, r3, r9, r10, r11, r12 corrupted
1061 * This routine must preserve:
1069 __armv4_mpu_cache_flush:
1074 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
1075 mov r1, #7 << 5 @ 8 segments
1076 1: orr r3, r1, #63 << 26 @ 64 entries
1077 2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index
1078 subs r3, r3, #1 << 26
1079 bcs 2b @ entries 63 to 0
1080 subs r1, r1, #1 << 5
1081 bcs 1b @ segments 7 to 0
1084 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
1085 mcr p15, 0, ip, c7, c10, 4 @ drain WB
1088 __fa526_cache_flush:
1092 mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache
1093 mcr p15, 0, r1, c7, c5, 0 @ flush I cache
1094 mcr p15, 0, r1, c7, c10, 4 @ drain WB
1097 __armv6_mmu_cache_flush:
1100 mcreq p15, 0, r1, c7, c14, 0 @ clean+invalidate D
1101 mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB
1102 mcreq p15, 0, r1, c7, c15, 0 @ clean+invalidate unified
1103 mcr p15, 0, r1, c7, c10, 4 @ drain WB
1106 __armv7_mmu_cache_flush:
1109 mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1
1110 tst r10, #0xf << 16 @ hierarchical cache (ARMv7)
1113 mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D
1116 mcr p15, 0, r10, c7, c10, 5 @ DMB
1117 stmfd sp!, {r0-r7, r9-r11}
1118 mrc p15, 1, r0, c0, c0, 1 @ read clidr
1119 ands r3, r0, #0x7000000 @ extract loc from clidr
1120 mov r3, r3, lsr #23 @ left align loc bit field
1121 beq finished @ if loc is 0, then no need to clean
1122 mov r10, #0 @ start clean at cache level 0
1124 add r2, r10, r10, lsr #1 @ work out 3x current cache level
1125 mov r1, r0, lsr r2 @ extract cache type bits from clidr
1126 and r1, r1, #7 @ mask of the bits for current cache only
1127 cmp r1, #2 @ see what cache we have at this level
1128 blt skip @ skip if no cache, or just i-cache
1129 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
1130 mcr p15, 0, r10, c7, c5, 4 @ isb to sych the new cssr&csidr
1131 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
1132 and r2, r1, #7 @ extract the length of the cache lines
1133 add r2, r2, #4 @ add 4 (line length offset)
1135 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
1136 clz r5, r4 @ find bit position of way size increment
1138 ands r7, r7, r1, lsr #13 @ extract max number of the index size
1140 mov r9, r4 @ create working copy of max way size
1142 ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
1143 ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
1144 THUMB( lsl r6, r9, r5 )
1145 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
1146 THUMB( lsl r6, r7, r2 )
1147 THUMB( orr r11, r11, r6 ) @ factor index number into r11
1148 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
1149 subs r9, r9, #1 @ decrement the way
1151 subs r7, r7, #1 @ decrement the index
1154 add r10, r10, #2 @ increment cache number
1158 ldmfd sp!, {r0-r7, r9-r11}
1159 mov r10, #0 @ swith back to cache level 0
1160 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
1162 mcr p15, 0, r10, c7, c10, 4 @ DSB
1163 mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB
1164 mcr p15, 0, r10, c7, c10, 4 @ DSB
1165 mcr p15, 0, r10, c7, c5, 4 @ ISB
1168 __armv5tej_mmu_cache_flush:
1171 1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache
1173 mcr p15, 0, r0, c7, c5, 0 @ flush I cache
1174 mcr p15, 0, r0, c7, c10, 4 @ drain WB
1177 __armv4_mmu_cache_flush:
1180 mov r2, #64*1024 @ default: 32K dcache size (*2)
1181 mov r11, #32 @ default: 32 byte line size
1182 mrc p15, 0, r3, c0, c0, 1 @ read cache type
1183 teq r3, r9 @ cache ID register present?
1188 mov r2, r2, lsl r1 @ base dcache size *2
1189 tst r3, #1 << 14 @ test M bit
1190 addne r2, r2, r2, lsr #1 @ +1/2 size if M == 1
1194 mov r11, r11, lsl r3 @ cache line size in bytes
1197 bic r1, r1, #63 @ align to longest cache line
1200 ARM( ldr r3, [r1], r11 ) @ s/w flush D cache
1201 THUMB( ldr r3, [r1] ) @ s/w flush D cache
1202 THUMB( add r1, r1, r11 )
1206 mcr p15, 0, r1, c7, c5, 0 @ flush I cache
1207 mcr p15, 0, r1, c7, c6, 0 @ flush D cache
1208 mcr p15, 0, r1, c7, c10, 4 @ drain WB
1211 __armv3_mmu_cache_flush:
1212 __armv3_mpu_cache_flush:
1216 mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3
1220 * Various debugging routines for printing hex characters and
1221 * memory, which again must be relocatable.
1225 .type phexbuf,#object
1227 .size phexbuf, . - phexbuf
1229 @ phex corrupts {r0, r1, r2, r3}
1230 phex: adr r3, phexbuf
1244 @ puts corrupts {r0, r1, r2, r3}
1246 1: ldrb r2, [r0], #1
1259 @ putc corrupts {r0, r1, r2, r3}
1266 @ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr}
1267 memdump: mov r12, r0
1270 2: mov r0, r11, lsl #2
1278 ldr r0, [r12, r11, lsl #2]
1298 #ifdef CONFIG_ARM_VIRT_EXT
1300 __hyp_reentry_vectors:
1306 W(b) __enter_kernel @ hyp
1309 #endif /* CONFIG_ARM_VIRT_EXT */
1312 mov r0, #0 @ must be 0
1313 ARM( mov pc, r4 ) @ call kernel
1314 THUMB( bx r4 ) @ entry point is always ARM
1319 .section ".stack", "aw", %nobits
1320 .L_user_stack: .space 4096