2 * linux/arch/arm/kernel/entry-common.S
4 * Copyright (C) 2000 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <asm/unistd.h>
12 #include <asm/ftrace.h>
13 #include <asm/unwind.h>
15 #ifdef CONFIG_NEED_RET_TO_USER
16 #include <mach/entry-macro.S>
18 .macro arch_ret_to_user, tmp1, tmp2
22 #include "entry-header.S"
27 * This is the fast syscall return path. We do as little as
28 * possible here, and this includes saving r0 back into the SVC
34 disable_irq @ disable interrupts
35 ldr r1, [tsk, #TI_FLAGS]
36 tst r1, #_TIF_WORK_MASK
38 #if defined(CONFIG_IRQSOFF_TRACER)
42 /* perform architecture specific actions before user return */
43 arch_ret_to_user r1, lr
45 restore_user_regs fast = 1, offset = S_OFF
49 * Ok, we need to do extra processing, enter the slow path.
52 str r0, [sp, #S_R0+S_OFF]! @ returned r0
55 mov r2, why @ 'syscall'
59 movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
60 ldmia sp, {r0 - r6} @ have to reload r0 - r6
61 b local_restart @ ... and off we go
64 * "slow" syscall return path. "why" tells us if this was a real syscall.
68 disable_irq @ disable interrupts
69 ENTRY(ret_to_user_from_irq)
70 ldr r1, [tsk, #TI_FLAGS]
71 tst r1, #_TIF_WORK_MASK
74 #if defined(CONFIG_IRQSOFF_TRACER)
77 /* perform architecture specific actions before user return */
78 arch_ret_to_user r1, lr
80 restore_user_regs fast = 0, offset = 0
81 ENDPROC(ret_to_user_from_irq)
85 * This is how we return from a fork.
92 ENDPROC(ret_from_fork)
94 ENTRY(ret_from_kernel_thread)
99 adr lr, BSYM(1f) @ kernel threads should not exit
104 ENDPROC(ret_from_kernel_thread)
107 * turn a kernel thread into userland process
108 * use: ret_from_kernel_execve(struct pt_regs *normal)
110 ENTRY(ret_from_kernel_execve)
111 mov why, #0 @ not a syscall
112 str why, [r0, #S_R0] @ ... and we want 0 in ->ARM_r0 as well
113 get_thread_info tsk @ thread structure
114 mov sp, r0 @ stack pointer just under pt_regs
116 ENDPROC(ret_from_kernel_execve)
119 #define CALL(x) .equ NR_syscalls,NR_syscalls+1
122 #define CALL(x) .long x
124 #ifdef CONFIG_FUNCTION_TRACER
126 * When compiling with -pg, gcc inserts a call to the mcount routine at the
127 * start of every function. In mcount, apart from the function's address (in
128 * lr), we need to get hold of the function's caller's address.
130 * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this:
134 * These versions have the limitation that in order for the mcount routine to
135 * be able to determine the function's caller's address, an APCS-style frame
136 * pointer (which is set up with something like the code below) is required.
139 * push {fp, ip, lr, pc}
142 * With EABI, these frame pointers are not available unless -mapcs-frame is
143 * specified, and if building as Thumb-2, not even then.
145 * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount,
146 * with call sites like:
151 * With these compilers, frame pointers are not necessary.
153 * mcount can be thought of as a function called in the middle of a subroutine
154 * call. As such, it needs to be transparent for both the caller and the
155 * callee: the original lr needs to be restored when leaving mcount, and no
156 * registers should be clobbered. (In the __gnu_mcount_nc implementation, we
157 * clobber the ip register. This is OK because the ARM calling convention
158 * allows it to be clobbered in subroutines and doesn't use it to hold
161 * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0"
162 * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see
163 * arch/arm/kernel/ftrace.c).
166 #ifndef CONFIG_OLD_MCOUNT
167 #if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4))
168 #error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0.
172 .macro mcount_adjust_addr rd, rn
173 bic \rd, \rn, #1 @ clear the Thumb bit if present
174 sub \rd, \rd, #MCOUNT_INSN_SIZE
177 .macro __mcount suffix
179 ldr r0, =ftrace_trace_function
181 adr r0, .Lftrace_stub
185 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
186 ldr r1, =ftrace_graph_return
189 bne ftrace_graph_caller\suffix
191 ldr r1, =ftrace_graph_entry
193 ldr r0, =ftrace_graph_entry_stub
195 bne ftrace_graph_caller\suffix
200 1: mcount_get_lr r1 @ lr of instrumented func
201 mcount_adjust_addr r0, lr @ instrumented function
207 .macro __ftrace_caller suffix
210 mcount_get_lr r1 @ lr of instrumented func
211 mcount_adjust_addr r0, lr @ instrumented function
213 .globl ftrace_call\suffix
217 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
218 .globl ftrace_graph_call\suffix
219 ftrace_graph_call\suffix:
226 .macro __ftrace_graph_caller
227 sub r0, fp, #4 @ &lr of instrumented routine (&parent)
228 #ifdef CONFIG_DYNAMIC_FTRACE
229 @ called from __ftrace_caller, saved in mcount_enter
230 ldr r1, [sp, #16] @ instrumented routine (func)
231 mcount_adjust_addr r1, r1
233 @ called from __mcount, untouched in lr
234 mcount_adjust_addr r1, lr @ instrumented routine (func)
236 mov r2, fp @ frame pointer
237 bl prepare_ftrace_return
241 #ifdef CONFIG_OLD_MCOUNT
247 stmdb sp!, {r0-r3, lr}
250 .macro mcount_get_lr reg
256 ldmia sp!, {r0-r3, pc}
260 #ifdef CONFIG_DYNAMIC_FTRACE
269 #ifdef CONFIG_DYNAMIC_FTRACE
270 ENTRY(ftrace_caller_old)
272 ENDPROC(ftrace_caller_old)
275 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
276 ENTRY(ftrace_graph_caller_old)
277 __ftrace_graph_caller
278 ENDPROC(ftrace_graph_caller_old)
282 .purgem mcount_get_lr
291 stmdb sp!, {r0-r3, lr}
294 .macro mcount_get_lr reg
299 ldmia sp!, {r0-r3, ip, lr}
303 ENTRY(__gnu_mcount_nc)
304 #ifdef CONFIG_DYNAMIC_FTRACE
311 ENDPROC(__gnu_mcount_nc)
313 #ifdef CONFIG_DYNAMIC_FTRACE
316 ENDPROC(ftrace_caller)
319 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
320 ENTRY(ftrace_graph_caller)
321 __ftrace_graph_caller
322 ENDPROC(ftrace_graph_caller)
326 .purgem mcount_get_lr
329 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
330 .globl return_to_handler
333 mov r0, fp @ frame pointer
334 bl ftrace_return_to_handler
335 mov lr, r0 @ r0 has real ret addr
345 #endif /* CONFIG_FUNCTION_TRACER */
347 /*=============================================================================
349 *-----------------------------------------------------------------------------
354 sub sp, sp, #S_FRAME_SIZE
355 stmia sp, {r0 - r12} @ Calling r0 - r12
356 ARM( add r8, sp, #S_PC )
357 ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr
359 THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr
360 mrs r8, spsr @ called from non-FIQ mode, so ok.
361 str lr, [sp, #S_PC] @ Save calling PC
362 str r8, [sp, #S_PSR] @ Save CPSR
363 str r0, [sp, #S_OLD_R0] @ Save OLD_R0
367 * Get the system call number.
370 #if defined(CONFIG_OABI_COMPAT)
373 * If we have CONFIG_OABI_COMPAT then we need to look at the swi
374 * value to determine if it is an EABI or an old ABI call.
376 #ifdef CONFIG_ARM_THUMB
378 movne r10, #0 @ no thumb OABI emulation
379 ldreq r10, [lr, #-4] @ get SWI instruction
381 ldr r10, [lr, #-4] @ get SWI instruction
383 #ifdef CONFIG_CPU_ENDIAN_BE8
384 rev r10, r10 @ little endian instruction
387 #elif defined(CONFIG_AEABI)
390 * Pure EABI user space always put syscall number into scno (r7).
392 #elif defined(CONFIG_ARM_THUMB)
393 /* Legacy ABI only, possibly thumb mode. */
394 tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs
395 addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in
396 ldreq scno, [lr, #-4]
399 /* Legacy ABI only. */
400 ldr scno, [lr, #-4] @ get SWI instruction
403 #ifdef CONFIG_ALIGNMENT_TRAP
404 ldr ip, __cr_alignment
406 mcr p15, 0, ip, c1, c0 @ update control register
411 adr tbl, sys_call_table @ load syscall table pointer
413 #if defined(CONFIG_OABI_COMPAT)
415 * If the swi argument is zero, this is an EABI call and we do nothing.
417 * If this is an old ABI call, get the syscall number into scno and
418 * get the old ABI syscall table address.
420 bics r10, r10, #0xff000000
421 eorne scno, r10, #__NR_OABI_SYSCALL_BASE
422 ldrne tbl, =sys_oabi_call_table
423 #elif !defined(CONFIG_AEABI)
424 bic scno, scno, #0xff000000 @ mask off SWI op-code
425 eor scno, scno, #__NR_SYSCALL_BASE @ check OS number
429 ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing
430 stmdb sp!, {r4, r5} @ push fifth and sixth args
432 #ifdef CONFIG_SECCOMP
433 tst r10, #_TIF_SECCOMP
436 bl __secure_computing
437 add r0, sp, #S_R0 + S_OFF @ pointer to regs
438 ldmia r0, {r0 - r3} @ have to reload r0 - r3
442 tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
445 cmp scno, #NR_syscalls @ check upper syscall limit
446 adr lr, BSYM(ret_fast_syscall) @ return address
447 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
450 2: mov why, #0 @ no longer a real syscall
451 cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
452 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
454 b sys_ni_syscall @ not private func
458 * This is the really slow path. We're going to be doing
459 * context switches, and waiting for our parent to respond.
464 bl syscall_trace_enter
466 adr lr, BSYM(__sys_trace_return) @ return address
467 mov scno, r0 @ syscall number (possibly new)
468 add r1, sp, #S_R0 + S_OFF @ pointer to regs
469 cmp scno, #NR_syscalls @ check upper syscall limit
470 ldmccia r1, {r0 - r6} @ have to reload r0 - r6
471 stmccia sp, {r4, r5} @ and update the stack args
472 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
476 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
479 bl syscall_trace_exit
483 #ifdef CONFIG_ALIGNMENT_TRAP
484 .type __cr_alignment, #object
491 * This is the syscall table declaration for native ABI syscalls.
492 * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
494 #define ABI(native, compat) native
496 #define OBSOLETE(syscall) sys_ni_syscall
498 #define OBSOLETE(syscall) syscall
501 .type sys_call_table, #object
502 ENTRY(sys_call_table)
507 /*============================================================================
508 * Special system call wrappers
510 @ r0 = syscall number
513 bic scno, r0, #__NR_OABI_SYSCALL_BASE
514 cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
515 cmpne scno, #NR_syscalls @ check range
516 stmloia sp, {r5, r6} @ shuffle args
521 ldrlo pc, [tbl, scno, lsl #2]
528 ENDPROC(sys_fork_wrapper)
533 ENDPROC(sys_vfork_wrapper)
539 ENDPROC(sys_clone_wrapper)
541 sys_sigreturn_wrapper:
543 mov why, #0 @ prevent syscall restart handling
545 ENDPROC(sys_sigreturn_wrapper)
547 sys_rt_sigreturn_wrapper:
549 mov why, #0 @ prevent syscall restart handling
551 ENDPROC(sys_rt_sigreturn_wrapper)
553 sys_sigaltstack_wrapper:
554 ldr r2, [sp, #S_OFF + S_SP]
556 ENDPROC(sys_sigaltstack_wrapper)
558 sys_statfs64_wrapper:
562 ENDPROC(sys_statfs64_wrapper)
564 sys_fstatfs64_wrapper:
568 ENDPROC(sys_fstatfs64_wrapper)
571 * Note: off_4k (r5) is always units of 4K. If we can't do the requested
572 * offset, we return EINVAL.
577 moveq r5, r5, lsr #PAGE_SHIFT - 12
588 #ifdef CONFIG_OABI_COMPAT
591 * These are syscalls with argument register differences
597 ENDPROC(sys_oabi_pread64)
602 ENDPROC(sys_oabi_pwrite64)
608 ENDPROC(sys_oabi_truncate64)
610 sys_oabi_ftruncate64:
614 ENDPROC(sys_oabi_ftruncate64)
621 ENDPROC(sys_oabi_readahead)
624 * Let's declare a second syscall table for old ABI binaries
625 * using the compatibility syscall entries.
627 #define ABI(native, compat) compat
628 #define OBSOLETE(syscall) syscall
630 .type sys_oabi_call_table, #object
631 ENTRY(sys_oabi_call_table)