s390: remove "64" suffix from a couple of files
authorHeiko Carstens <heiko.carstens@de.ibm.com>
Fri, 13 Feb 2015 12:04:39 +0000 (13:04 +0100)
committerMartin Schwidefsky <schwidefsky@de.ibm.com>
Wed, 25 Mar 2015 10:49:34 +0000 (11:49 +0100)
Rename a couple of files to get rid of the "64" suffix.
"git blame" will still work.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
arch/s390/boot/compressed/Makefile
arch/s390/boot/compressed/head.S [new file with mode: 0644]
arch/s390/boot/compressed/head64.S [deleted file]
arch/s390/kernel/Makefile
arch/s390/kernel/entry.S [new file with mode: 0644]
arch/s390/kernel/entry64.S [deleted file]
arch/s390/kernel/reipl.S [new file with mode: 0644]
arch/s390/kernel/reipl64.S [deleted file]
arch/s390/kernel/relocate_kernel.S [new file with mode: 0644]
arch/s390/kernel/relocate_kernel64.S [deleted file]

index 254fb05c5d6c833ea248bf55c370f0076fa1c444..d4788111c16171135422a0ef29e23e2eb866236d 100644 (file)
@@ -6,7 +6,7 @@
 
 targets        := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
 targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
-targets += misc.o piggy.o sizes.h head64.o
+targets += misc.o piggy.o sizes.h head.o
 
 KBUILD_CFLAGS := -m64 -D__KERNEL__ $(LINUX_INCLUDE) -O2
 KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
@@ -17,7 +17,7 @@ KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
 GCOV_PROFILE := n
 
 OBJECTS := $(addprefix $(objtree)/arch/s390/kernel/, head.o sclp.o ebcdic.o)
-OBJECTS += $(obj)/head64.o $(obj)/misc.o $(obj)/piggy.o
+OBJECTS += $(obj)/head.o $(obj)/misc.o $(obj)/piggy.o
 
 LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T
 $(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS)
@@ -32,8 +32,8 @@ quiet_cmd_sizes = GEN $@
 $(obj)/sizes.h: vmlinux
        $(call if_changed,sizes)
 
-AFLAGS_head64.o += -I$(obj)
-$(obj)/head64.o: $(obj)/sizes.h
+AFLAGS_head.o += -I$(obj)
+$(obj)/head.o: $(obj)/sizes.h
 
 CFLAGS_misc.o += -I$(obj)
 $(obj)/misc.o: $(obj)/sizes.h
diff --git a/arch/s390/boot/compressed/head.S b/arch/s390/boot/compressed/head.S
new file mode 100644 (file)
index 0000000..f86a4ee
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * Startup glue code to uncompress the kernel
+ *
+ * Copyright IBM Corp. 2010
+ *
+ *   Author(s):        Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
+#include <asm/page.h>
+#include "sizes.h"
+
+__HEAD
+ENTRY(startup_continue)
+       basr    %r13,0                  # get base
+.LPG1:
+       # setup stack
+       lg      %r15,.Lstack-.LPG1(%r13)
+       aghi    %r15,-160
+       brasl   %r14,decompress_kernel
+       # setup registers for memory mover & branch to target
+       lgr     %r4,%r2
+       lg      %r2,.Loffset-.LPG1(%r13)
+       la      %r4,0(%r2,%r4)
+       lg      %r3,.Lmvsize-.LPG1(%r13)
+       lgr     %r5,%r3
+       # move the memory mover someplace safe
+       la      %r1,0x200
+       mvc     0(mover_end-mover,%r1),mover-.LPG1(%r13)
+       # decompress image is started at 0x11000
+       lgr     %r6,%r2
+       br      %r1
+mover:
+       mvcle   %r2,%r4,0
+       jo      mover
+       br      %r6
+mover_end:
+
+       .align  8
+.Lstack:
+       .quad   0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER))
+.Loffset:
+       .quad   0x11000
+.Lmvsize:
+       .quad   SZ__bss_start
diff --git a/arch/s390/boot/compressed/head64.S b/arch/s390/boot/compressed/head64.S
deleted file mode 100644 (file)
index f86a4ee..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Startup glue code to uncompress the kernel
- *
- * Copyright IBM Corp. 2010
- *
- *   Author(s):        Martin Schwidefsky <schwidefsky@de.ibm.com>
- */
-
-#include <linux/init.h>
-#include <linux/linkage.h>
-#include <asm/asm-offsets.h>
-#include <asm/thread_info.h>
-#include <asm/page.h>
-#include "sizes.h"
-
-__HEAD
-ENTRY(startup_continue)
-       basr    %r13,0                  # get base
-.LPG1:
-       # setup stack
-       lg      %r15,.Lstack-.LPG1(%r13)
-       aghi    %r15,-160
-       brasl   %r14,decompress_kernel
-       # setup registers for memory mover & branch to target
-       lgr     %r4,%r2
-       lg      %r2,.Loffset-.LPG1(%r13)
-       la      %r4,0(%r2,%r4)
-       lg      %r3,.Lmvsize-.LPG1(%r13)
-       lgr     %r5,%r3
-       # move the memory mover someplace safe
-       la      %r1,0x200
-       mvc     0(mover_end-mover,%r1),mover-.LPG1(%r13)
-       # decompress image is started at 0x11000
-       lgr     %r6,%r2
-       br      %r1
-mover:
-       mvcle   %r2,%r4,0
-       jo      mover
-       br      %r6
-mover_end:
-
-       .align  8
-.Lstack:
-       .quad   0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER))
-.Loffset:
-       .quad   0x11000
-.Lmvsize:
-       .quad   SZ__bss_start
index d94cbba95c508fc6dfc5a44fbb92fc1528d335e7..a6d0b9dd05e6e0a668c949af1e33c485c1e8d4c6 100644 (file)
@@ -33,7 +33,7 @@ obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
 obj-y  += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o
 obj-y  += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
 obj-y  += runtime_instr.o cache.o dumpstack.o
-obj-y  += entry64.o reipl64.o relocate_kernel64.o
+obj-y  += entry.o reipl.o relocate_kernel.o
 
 extra-y                                += head.o head64.o vmlinux.lds
 
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
new file mode 100644 (file)
index 0000000..c329446
--- /dev/null
@@ -0,0 +1,1059 @@
+/*
+ *    S390 low-level entry points.
+ *
+ *    Copyright IBM Corp. 1999, 2012
+ *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ *              Hartmut Penner (hp@de.ibm.com),
+ *              Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
+ *              Heiko Carstens <heiko.carstens@de.ibm.com>
+ */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/processor.h>
+#include <asm/cache.h>
+#include <asm/errno.h>
+#include <asm/ptrace.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+#include <asm/unistd.h>
+#include <asm/page.h>
+#include <asm/sigp.h>
+#include <asm/irq.h>
+
+__PT_R0      = __PT_GPRS
+__PT_R1      = __PT_GPRS + 8
+__PT_R2      = __PT_GPRS + 16
+__PT_R3      = __PT_GPRS + 24
+__PT_R4      = __PT_GPRS + 32
+__PT_R5      = __PT_GPRS + 40
+__PT_R6      = __PT_GPRS + 48
+__PT_R7      = __PT_GPRS + 56
+__PT_R8      = __PT_GPRS + 64
+__PT_R9      = __PT_GPRS + 72
+__PT_R10     = __PT_GPRS + 80
+__PT_R11     = __PT_GPRS + 88
+__PT_R12     = __PT_GPRS + 96
+__PT_R13     = __PT_GPRS + 104
+__PT_R14     = __PT_GPRS + 112
+__PT_R15     = __PT_GPRS + 120
+
+STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
+STACK_SIZE  = 1 << STACK_SHIFT
+STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
+
+_TIF_WORK      = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
+                  _TIF_UPROBE)
+_TIF_TRACE     = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
+                  _TIF_SYSCALL_TRACEPOINT)
+_CIF_WORK      = (_CIF_MCCK_PENDING | _CIF_ASCE)
+_PIF_WORK      = (_PIF_PER_TRAP)
+
+#define BASED(name) name-system_call(%r13)
+
+       .macro  TRACE_IRQS_ON
+#ifdef CONFIG_TRACE_IRQFLAGS
+       basr    %r2,%r0
+       brasl   %r14,trace_hardirqs_on_caller
+#endif
+       .endm
+
+       .macro  TRACE_IRQS_OFF
+#ifdef CONFIG_TRACE_IRQFLAGS
+       basr    %r2,%r0
+       brasl   %r14,trace_hardirqs_off_caller
+#endif
+       .endm
+
+       .macro  LOCKDEP_SYS_EXIT
+#ifdef CONFIG_LOCKDEP
+       tm      __PT_PSW+1(%r11),0x01   # returning to user ?
+       jz      .+10
+       brasl   %r14,lockdep_sys_exit
+#endif
+       .endm
+
+       .macro LPP newpp
+#if IS_ENABLED(CONFIG_KVM)
+       tm      __LC_MACHINE_FLAGS+6,0x20       # MACHINE_FLAG_LPP
+       jz      .+8
+       .insn   s,0xb2800000,\newpp
+#endif
+       .endm
+
+       .macro  HANDLE_SIE_INTERCEPT scratch,reason
+#if IS_ENABLED(CONFIG_KVM)
+       tmhh    %r8,0x0001              # interrupting from user ?
+       jnz     .+62
+       lgr     \scratch,%r9
+       slg     \scratch,BASED(.Lsie_critical)
+       clg     \scratch,BASED(.Lsie_critical_length)
+       .if     \reason==1
+       # Some program interrupts are suppressing (e.g. protection).
+       # We must also check the instruction after SIE in that case.
+       # do_protection_exception will rewind to .Lrewind_pad
+       jh      .+42
+       .else
+       jhe     .+42
+       .endif
+       lg      %r14,__SF_EMPTY(%r15)           # get control block pointer
+       LPP     __SF_EMPTY+16(%r15)             # set host id
+       ni      __SIE_PROG0C+3(%r14),0xfe       # no longer in SIE
+       lctlg   %c1,%c1,__LC_USER_ASCE          # load primary asce
+       larl    %r9,sie_exit                    # skip forward to sie_exit
+       mvi     __SF_EMPTY+31(%r15),\reason     # set exit reason
+#endif
+       .endm
+
+       .macro  CHECK_STACK stacksize,savearea
+#ifdef CONFIG_CHECK_STACK
+       tml     %r15,\stacksize - CONFIG_STACK_GUARD
+       lghi    %r14,\savearea
+       jz      stack_overflow
+#endif
+       .endm
+
+       .macro  SWITCH_ASYNC savearea,stack,shift
+       tmhh    %r8,0x0001              # interrupting from user ?
+       jnz     1f
+       lgr     %r14,%r9
+       slg     %r14,BASED(.Lcritical_start)
+       clg     %r14,BASED(.Lcritical_length)
+       jhe     0f
+       lghi    %r11,\savearea          # inside critical section, do cleanup
+       brasl   %r14,cleanup_critical
+       tmhh    %r8,0x0001              # retest problem state after cleanup
+       jnz     1f
+0:     lg      %r14,\stack             # are we already on the target stack?
+       slgr    %r14,%r15
+       srag    %r14,%r14,\shift
+       jnz     1f
+       CHECK_STACK 1<<\shift,\savearea
+       aghi    %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+       j       2f
+1:     lg      %r15,\stack             # load target stack
+2:     la      %r11,STACK_FRAME_OVERHEAD(%r15)
+       .endm
+
+       .macro UPDATE_VTIME scratch,enter_timer
+       lg      \scratch,__LC_EXIT_TIMER
+       slg     \scratch,\enter_timer
+       alg     \scratch,__LC_USER_TIMER
+       stg     \scratch,__LC_USER_TIMER
+       lg      \scratch,__LC_LAST_UPDATE_TIMER
+       slg     \scratch,__LC_EXIT_TIMER
+       alg     \scratch,__LC_SYSTEM_TIMER
+       stg     \scratch,__LC_SYSTEM_TIMER
+       mvc     __LC_LAST_UPDATE_TIMER(8),\enter_timer
+       .endm
+
+       .macro  LAST_BREAK scratch
+       srag    \scratch,%r10,23
+       jz      .+10
+       stg     %r10,__TI_last_break(%r12)
+       .endm
+
+       .macro REENABLE_IRQS
+       stg     %r8,__LC_RETURN_PSW
+       ni      __LC_RETURN_PSW,0xbf
+       ssm     __LC_RETURN_PSW
+       .endm
+
+       .macro STCK savearea
+#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
+       .insn   s,0xb27c0000,\savearea          # store clock fast
+#else
+       .insn   s,0xb2050000,\savearea          # store clock
+#endif
+       .endm
+
+       .section .kprobes.text, "ax"
+
+/*
+ * Scheduler resume function, called by switch_to
+ *  gpr2 = (task_struct *) prev
+ *  gpr3 = (task_struct *) next
+ * Returns:
+ *  gpr2 = prev
+ */
+ENTRY(__switch_to)
+       stmg    %r6,%r15,__SF_GPRS(%r15)        # store gprs of prev task
+       stg     %r15,__THREAD_ksp(%r2)          # store kernel stack of prev
+       lg      %r4,__THREAD_info(%r2)          # get thread_info of prev
+       lg      %r5,__THREAD_info(%r3)          # get thread_info of next
+       lgr     %r15,%r5
+       aghi    %r15,STACK_INIT                 # end of kernel stack of next
+       stg     %r3,__LC_CURRENT                # store task struct of next
+       stg     %r5,__LC_THREAD_INFO            # store thread info of next
+       stg     %r15,__LC_KERNEL_STACK          # store end of kernel stack
+       lctl    %c4,%c4,__TASK_pid(%r3)         # load pid to control reg. 4
+       mvc     __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next
+       lg      %r15,__THREAD_ksp(%r3)          # load kernel stack of next
+       lmg     %r6,%r15,__SF_GPRS(%r15)        # load gprs of next task
+       br      %r14
+
+.L__critical_start:
+/*
+ * SVC interrupt handler routine. System calls are synchronous events and
+ * are executed with interrupts enabled.
+ */
+
+ENTRY(system_call)
+       stpt    __LC_SYNC_ENTER_TIMER
+.Lsysc_stmg:
+       stmg    %r8,%r15,__LC_SAVE_AREA_SYNC
+       lg      %r10,__LC_LAST_BREAK
+       lg      %r12,__LC_THREAD_INFO
+       lghi    %r14,_PIF_SYSCALL
+.Lsysc_per:
+       lg      %r15,__LC_KERNEL_STACK
+       la      %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
+.Lsysc_vtime:
+       UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER
+       LAST_BREAK %r13
+       stmg    %r0,%r7,__PT_R0(%r11)
+       mvc     __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
+       mvc     __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
+       mvc     __PT_INT_CODE(4,%r11),__LC_SVC_ILC
+       stg     %r14,__PT_FLAGS(%r11)
+.Lsysc_do_svc:
+       lg      %r10,__TI_sysc_table(%r12)      # address of system call table
+       llgh    %r8,__PT_INT_CODE+2(%r11)
+       slag    %r8,%r8,2                       # shift and test for svc 0
+       jnz     .Lsysc_nr_ok
+       # svc 0: system call number in %r1
+       llgfr   %r1,%r1                         # clear high word in r1
+       cghi    %r1,NR_syscalls
+       jnl     .Lsysc_nr_ok
+       sth     %r1,__PT_INT_CODE+2(%r11)
+       slag    %r8,%r1,2
+.Lsysc_nr_ok:
+       xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+       stg     %r2,__PT_ORIG_GPR2(%r11)
+       stg     %r7,STACK_FRAME_OVERHEAD(%r15)
+       lgf     %r9,0(%r8,%r10)                 # get system call add.
+       tm      __TI_flags+7(%r12),_TIF_TRACE
+       jnz     .Lsysc_tracesys
+       basr    %r14,%r9                        # call sys_xxxx
+       stg     %r2,__PT_R2(%r11)               # store return value
+
+.Lsysc_return:
+       LOCKDEP_SYS_EXIT
+.Lsysc_tif:
+       tm      __PT_PSW+1(%r11),0x01           # returning to user ?
+       jno     .Lsysc_restore
+       tm      __PT_FLAGS+7(%r11),_PIF_WORK
+       jnz     .Lsysc_work
+       tm      __TI_flags+7(%r12),_TIF_WORK
+       jnz     .Lsysc_work                     # check for work
+       tm      __LC_CPU_FLAGS+7,_CIF_WORK
+       jnz     .Lsysc_work
+.Lsysc_restore:
+       lg      %r14,__LC_VDSO_PER_CPU
+       lmg     %r0,%r10,__PT_R0(%r11)
+       mvc     __LC_RETURN_PSW(16),__PT_PSW(%r11)
+       stpt    __LC_EXIT_TIMER
+       mvc     __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+       lmg     %r11,%r15,__PT_R11(%r11)
+       lpswe   __LC_RETURN_PSW
+.Lsysc_done:
+
+#
+# One of the work bits is on. Find out which one.
+#
+.Lsysc_work:
+       tm      __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
+       jo      .Lsysc_mcck_pending
+       tm      __TI_flags+7(%r12),_TIF_NEED_RESCHED
+       jo      .Lsysc_reschedule
+#ifdef CONFIG_UPROBES
+       tm      __TI_flags+7(%r12),_TIF_UPROBE
+       jo      .Lsysc_uprobe_notify
+#endif
+       tm      __PT_FLAGS+7(%r11),_PIF_PER_TRAP
+       jo      .Lsysc_singlestep
+       tm      __TI_flags+7(%r12),_TIF_SIGPENDING
+       jo      .Lsysc_sigpending
+       tm      __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
+       jo      .Lsysc_notify_resume
+       tm      __LC_CPU_FLAGS+7,_CIF_ASCE
+       jo      .Lsysc_uaccess
+       j       .Lsysc_return           # beware of critical section cleanup
+
+#
+# _TIF_NEED_RESCHED is set, call schedule
+#
+.Lsysc_reschedule:
+       larl    %r14,.Lsysc_return
+       jg      schedule
+
+#
+# _CIF_MCCK_PENDING is set, call handler
+#
+.Lsysc_mcck_pending:
+       larl    %r14,.Lsysc_return
+       jg      s390_handle_mcck        # TIF bit will be cleared by handler
+
+#
+# _CIF_ASCE is set, load user space asce
+#
+.Lsysc_uaccess:
+       ni      __LC_CPU_FLAGS+7,255-_CIF_ASCE
+       lctlg   %c1,%c1,__LC_USER_ASCE          # load primary asce
+       j       .Lsysc_return
+
+#
+# _TIF_SIGPENDING is set, call do_signal
+#
+.Lsysc_sigpending:
+       lgr     %r2,%r11                # pass pointer to pt_regs
+       brasl   %r14,do_signal
+       tm      __PT_FLAGS+7(%r11),_PIF_SYSCALL
+       jno     .Lsysc_return
+       lmg     %r2,%r7,__PT_R2(%r11)   # load svc arguments
+       lg      %r10,__TI_sysc_table(%r12)      # address of system call table
+       lghi    %r8,0                   # svc 0 returns -ENOSYS
+       llgh    %r1,__PT_INT_CODE+2(%r11)       # load new svc number
+       cghi    %r1,NR_syscalls
+       jnl     .Lsysc_nr_ok            # invalid svc number -> do svc 0
+       slag    %r8,%r1,2
+       j       .Lsysc_nr_ok            # restart svc
+
+#
+# _TIF_NOTIFY_RESUME is set, call do_notify_resume
+#
+.Lsysc_notify_resume:
+       lgr     %r2,%r11                # pass pointer to pt_regs
+       larl    %r14,.Lsysc_return
+       jg      do_notify_resume
+
+#
+# _TIF_UPROBE is set, call uprobe_notify_resume
+#
+#ifdef CONFIG_UPROBES
+.Lsysc_uprobe_notify:
+       lgr     %r2,%r11                # pass pointer to pt_regs
+       larl    %r14,.Lsysc_return
+       jg      uprobe_notify_resume
+#endif
+
+#
+# _PIF_PER_TRAP is set, call do_per_trap
+#
+.Lsysc_singlestep:
+       ni      __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP
+       lgr     %r2,%r11                # pass pointer to pt_regs
+       larl    %r14,.Lsysc_return
+       jg      do_per_trap
+
+#
+# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
+# and after the system call
+#
+.Lsysc_tracesys:
+       lgr     %r2,%r11                # pass pointer to pt_regs
+       la      %r3,0
+       llgh    %r0,__PT_INT_CODE+2(%r11)
+       stg     %r0,__PT_R2(%r11)
+       brasl   %r14,do_syscall_trace_enter
+       lghi    %r0,NR_syscalls
+       clgr    %r0,%r2
+       jnh     .Lsysc_tracenogo
+       sllg    %r8,%r2,2
+       lgf     %r9,0(%r8,%r10)
+.Lsysc_tracego:
+       lmg     %r3,%r7,__PT_R3(%r11)
+       stg     %r7,STACK_FRAME_OVERHEAD(%r15)
+       lg      %r2,__PT_ORIG_GPR2(%r11)
+       basr    %r14,%r9                # call sys_xxx
+       stg     %r2,__PT_R2(%r11)       # store return value
+.Lsysc_tracenogo:
+       tm      __TI_flags+7(%r12),_TIF_TRACE
+       jz      .Lsysc_return
+       lgr     %r2,%r11                # pass pointer to pt_regs
+       larl    %r14,.Lsysc_return
+       jg      do_syscall_trace_exit
+
+#
+# a new process exits the kernel with ret_from_fork
+#
+ENTRY(ret_from_fork)
+       la      %r11,STACK_FRAME_OVERHEAD(%r15)
+       lg      %r12,__LC_THREAD_INFO
+       brasl   %r14,schedule_tail
+       TRACE_IRQS_ON
+       ssm     __LC_SVC_NEW_PSW        # reenable interrupts
+       tm      __PT_PSW+1(%r11),0x01   # forking a kernel thread ?
+       jne     .Lsysc_tracenogo
+       # it's a kernel thread
+       lmg     %r9,%r10,__PT_R9(%r11)  # load gprs
+ENTRY(kernel_thread_starter)
+       la      %r2,0(%r10)
+       basr    %r14,%r9
+       j       .Lsysc_tracenogo
+
+/*
+ * Program check handler routine
+ */
+
+ENTRY(pgm_check_handler)
+       stpt    __LC_SYNC_ENTER_TIMER
+       stmg    %r8,%r15,__LC_SAVE_AREA_SYNC
+       lg      %r10,__LC_LAST_BREAK
+       lg      %r12,__LC_THREAD_INFO
+       larl    %r13,system_call
+       lmg     %r8,%r9,__LC_PGM_OLD_PSW
+       HANDLE_SIE_INTERCEPT %r14,1
+       tmhh    %r8,0x0001              # test problem state bit
+       jnz     1f                      # -> fault in user space
+       tmhh    %r8,0x4000              # PER bit set in old PSW ?
+       jnz     0f                      # -> enabled, can't be a double fault
+       tm      __LC_PGM_ILC+3,0x80     # check for per exception
+       jnz     .Lpgm_svcper            # -> single stepped svc
+0:     CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
+       aghi    %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+       j       2f
+1:     UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER
+       LAST_BREAK %r14
+       lg      %r15,__LC_KERNEL_STACK
+       lg      %r14,__TI_task(%r12)
+       lghi    %r13,__LC_PGM_TDB
+       tm      __LC_PGM_ILC+2,0x02     # check for transaction abort
+       jz      2f
+       mvc     __THREAD_trap_tdb(256,%r14),0(%r13)
+2:     la      %r11,STACK_FRAME_OVERHEAD(%r15)
+       stmg    %r0,%r7,__PT_R0(%r11)
+       mvc     __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
+       stmg    %r8,%r9,__PT_PSW(%r11)
+       mvc     __PT_INT_CODE(4,%r11),__LC_PGM_ILC
+       mvc     __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE
+       xc      __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
+       stg     %r10,__PT_ARGS(%r11)
+       tm      __LC_PGM_ILC+3,0x80     # check for per exception
+       jz      0f
+       tmhh    %r8,0x0001              # kernel per event ?
+       jz      .Lpgm_kprobe
+       oi      __PT_FLAGS+7(%r11),_PIF_PER_TRAP
+       mvc     __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
+       mvc     __THREAD_per_cause(2,%r14),__LC_PER_CODE
+       mvc     __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
+0:     REENABLE_IRQS
+       xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+       larl    %r1,pgm_check_table
+       llgh    %r10,__PT_INT_CODE+2(%r11)
+       nill    %r10,0x007f
+       sll     %r10,2
+       je      .Lsysc_return
+       lgf     %r1,0(%r10,%r1)         # load address of handler routine
+       lgr     %r2,%r11                # pass pointer to pt_regs
+       basr    %r14,%r1                # branch to interrupt-handler
+       j       .Lsysc_return
+
+#
+# PER event in supervisor state, must be kprobes
+#
+.Lpgm_kprobe:
+       REENABLE_IRQS
+       xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+       lgr     %r2,%r11                # pass pointer to pt_regs
+       brasl   %r14,do_per_trap
+       j       .Lsysc_return
+
+#
+# single stepped system call
+#
+.Lpgm_svcper:
+       mvc     __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
+       larl    %r14,.Lsysc_per
+       stg     %r14,__LC_RETURN_PSW+8
+       lghi    %r14,_PIF_SYSCALL | _PIF_PER_TRAP
+       lpswe   __LC_RETURN_PSW         # branch to .Lsysc_per and enable irqs
+
+/*
+ * IO interrupt handler routine
+ */
+ENTRY(io_int_handler)
+       STCK    __LC_INT_CLOCK
+       stpt    __LC_ASYNC_ENTER_TIMER
+       stmg    %r8,%r15,__LC_SAVE_AREA_ASYNC
+       lg      %r10,__LC_LAST_BREAK
+       lg      %r12,__LC_THREAD_INFO
+       larl    %r13,system_call
+       lmg     %r8,%r9,__LC_IO_OLD_PSW
+       HANDLE_SIE_INTERCEPT %r14,2
+       SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
+       tmhh    %r8,0x0001              # interrupting from user?
+       jz      .Lio_skip
+       UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER
+       LAST_BREAK %r14
+.Lio_skip:
+       stmg    %r0,%r7,__PT_R0(%r11)
+       mvc     __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
+       stmg    %r8,%r9,__PT_PSW(%r11)
+       mvc     __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
+       xc      __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
+       TRACE_IRQS_OFF
+       xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+.Lio_loop:
+       lgr     %r2,%r11                # pass pointer to pt_regs
+       lghi    %r3,IO_INTERRUPT
+       tm      __PT_INT_CODE+8(%r11),0x80      # adapter interrupt ?
+       jz      .Lio_call
+       lghi    %r3,THIN_INTERRUPT
+.Lio_call:
+       brasl   %r14,do_IRQ
+       tm      __LC_MACHINE_FLAGS+6,0x10       # MACHINE_FLAG_LPAR
+       jz      .Lio_return
+       tpi     0
+       jz      .Lio_return
+       mvc     __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
+       j       .Lio_loop
+.Lio_return:
+       LOCKDEP_SYS_EXIT
+       TRACE_IRQS_ON
+.Lio_tif:
+       tm      __TI_flags+7(%r12),_TIF_WORK
+       jnz     .Lio_work               # there is work to do (signals etc.)
+       tm      __LC_CPU_FLAGS+7,_CIF_WORK
+       jnz     .Lio_work
+.Lio_restore:
+       lg      %r14,__LC_VDSO_PER_CPU
+       lmg     %r0,%r10,__PT_R0(%r11)
+       mvc     __LC_RETURN_PSW(16),__PT_PSW(%r11)
+       stpt    __LC_EXIT_TIMER
+       mvc     __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+       lmg     %r11,%r15,__PT_R11(%r11)
+       lpswe   __LC_RETURN_PSW
+.Lio_done:
+
+#
+# There is work todo, find out in which context we have been interrupted:
+# 1) if we return to user space we can do all _TIF_WORK work
+# 2) if we return to kernel code and kvm is enabled check if we need to
+#    modify the psw to leave SIE
+# 3) if we return to kernel code and preemptive scheduling is enabled check
+#    the preemption counter and if it is zero call preempt_schedule_irq
+# Before any work can be done, a switch to the kernel stack is required.
+#
+.Lio_work:
+       tm      __PT_PSW+1(%r11),0x01   # returning to user ?
+       jo      .Lio_work_user          # yes -> do resched & signal
+#ifdef CONFIG_PREEMPT
+       # check for preemptive scheduling
+       icm     %r0,15,__TI_precount(%r12)
+       jnz     .Lio_restore            # preemption is disabled
+       tm      __TI_flags+7(%r12),_TIF_NEED_RESCHED
+       jno     .Lio_restore
+       # switch to kernel stack
+       lg      %r1,__PT_R15(%r11)
+       aghi    %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+       mvc     STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
+       xc      __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
+       la      %r11,STACK_FRAME_OVERHEAD(%r1)
+       lgr     %r15,%r1
+       # TRACE_IRQS_ON already done at .Lio_return, call
+       # TRACE_IRQS_OFF to keep things symmetrical
+       TRACE_IRQS_OFF
+       brasl   %r14,preempt_schedule_irq
+       j       .Lio_return
+#else
+       j       .Lio_restore
+#endif
+
+#
+# Need to do work before returning to userspace, switch to kernel stack
+#
+.Lio_work_user:
+       lg      %r1,__LC_KERNEL_STACK
+       mvc     STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
+       xc      __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
+       la      %r11,STACK_FRAME_OVERHEAD(%r1)
+       lgr     %r15,%r1
+
+#
+# One of the work bits is on. Find out which one.
+#
+.Lio_work_tif:
+       tm      __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
+       jo      .Lio_mcck_pending
+       tm      __TI_flags+7(%r12),_TIF_NEED_RESCHED
+       jo      .Lio_reschedule
+       tm      __TI_flags+7(%r12),_TIF_SIGPENDING
+       jo      .Lio_sigpending
+       tm      __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
+       jo      .Lio_notify_resume
+       tm      __LC_CPU_FLAGS+7,_CIF_ASCE
+       jo      .Lio_uaccess
+       j       .Lio_return             # beware of critical section cleanup
+
+#
+# _CIF_MCCK_PENDING is set, call handler
+#
+.Lio_mcck_pending:
+       # TRACE_IRQS_ON already done at .Lio_return
+       brasl   %r14,s390_handle_mcck   # TIF bit will be cleared by handler
+       TRACE_IRQS_OFF
+       j       .Lio_return
+
+#
+# _CIF_ASCE is set, load user space asce
+#
+.Lio_uaccess:
+       ni      __LC_CPU_FLAGS+7,255-_CIF_ASCE
+       lctlg   %c1,%c1,__LC_USER_ASCE          # load primary asce
+       j       .Lio_return
+
+#
+# _TIF_NEED_RESCHED is set, call schedule
+#
+.Lio_reschedule:
+       # TRACE_IRQS_ON already done at .Lio_return
+       ssm     __LC_SVC_NEW_PSW        # reenable interrupts
+       brasl   %r14,schedule           # call scheduler
+       ssm     __LC_PGM_NEW_PSW        # disable I/O and ext. interrupts
+       TRACE_IRQS_OFF
+       j       .Lio_return
+
+#
+# _TIF_SIGPENDING or is set, call do_signal
+#
+.Lio_sigpending:
+       # TRACE_IRQS_ON already done at .Lio_return
+       ssm     __LC_SVC_NEW_PSW        # reenable interrupts
+       lgr     %r2,%r11                # pass pointer to pt_regs
+       brasl   %r14,do_signal
+       ssm     __LC_PGM_NEW_PSW        # disable I/O and ext. interrupts
+       TRACE_IRQS_OFF
+       j       .Lio_return
+
+#
+# _TIF_NOTIFY_RESUME or is set, call do_notify_resume
+#
+.Lio_notify_resume:
+       # TRACE_IRQS_ON already done at .Lio_return
+       ssm     __LC_SVC_NEW_PSW        # reenable interrupts
+       lgr     %r2,%r11                # pass pointer to pt_regs
+       brasl   %r14,do_notify_resume
+       ssm     __LC_PGM_NEW_PSW        # disable I/O and ext. interrupts
+       TRACE_IRQS_OFF
+       j       .Lio_return
+
+/*
+ * External interrupt handler routine
+ */
+ENTRY(ext_int_handler)
+       STCK    __LC_INT_CLOCK
+       stpt    __LC_ASYNC_ENTER_TIMER
+       stmg    %r8,%r15,__LC_SAVE_AREA_ASYNC
+       lg      %r10,__LC_LAST_BREAK
+       lg      %r12,__LC_THREAD_INFO
+       larl    %r13,system_call
+       lmg     %r8,%r9,__LC_EXT_OLD_PSW
+       HANDLE_SIE_INTERCEPT %r14,3
+       SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
+       tmhh    %r8,0x0001              # interrupting from user ?
+       jz      .Lext_skip
+       UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER
+       LAST_BREAK %r14
+.Lext_skip:
+       stmg    %r0,%r7,__PT_R0(%r11)
+       mvc     __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
+       stmg    %r8,%r9,__PT_PSW(%r11)
+       lghi    %r1,__LC_EXT_PARAMS2
+       mvc     __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
+       mvc     __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
+       mvc     __PT_INT_PARM_LONG(8,%r11),0(%r1)
+       xc      __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
+       TRACE_IRQS_OFF
+       xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+       lgr     %r2,%r11                # pass pointer to pt_regs
+       lghi    %r3,EXT_INTERRUPT
+       brasl   %r14,do_IRQ
+       j       .Lio_return
+
+/*
+ * Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
+ */
+ENTRY(psw_idle)
+       stg     %r3,__SF_EMPTY(%r15)
+       larl    %r1,.Lpsw_idle_lpsw+4
+       stg     %r1,__SF_EMPTY+8(%r15)
+       STCK    __CLOCK_IDLE_ENTER(%r2)
+       stpt    __TIMER_IDLE_ENTER(%r2)
+.Lpsw_idle_lpsw:
+       lpswe   __SF_EMPTY(%r15)
+       br      %r14
+.Lpsw_idle_end:
+
+.L__critical_end:
+
+/*
+ * Machine check handler routines
+ */
+ENTRY(mcck_int_handler)
+       STCK    __LC_MCCK_CLOCK
+       la      %r1,4095                # revalidate r1
+       spt     __LC_CPU_TIMER_SAVE_AREA-4095(%r1)      # revalidate cpu timer
+       lmg     %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
+       lg      %r10,__LC_LAST_BREAK
+       lg      %r12,__LC_THREAD_INFO
+       larl    %r13,system_call
+       lmg     %r8,%r9,__LC_MCK_OLD_PSW
+       HANDLE_SIE_INTERCEPT %r14,4
+       tm      __LC_MCCK_CODE,0x80     # system damage?
+       jo      .Lmcck_panic            # yes -> rest of mcck code invalid
+       lghi    %r14,__LC_CPU_TIMER_SAVE_AREA
+       mvc     __LC_MCCK_ENTER_TIMER(8),0(%r14)
+       tm      __LC_MCCK_CODE+5,0x02   # stored cpu timer value valid?
+       jo      3f
+       la      %r14,__LC_SYNC_ENTER_TIMER
+       clc     0(8,%r14),__LC_ASYNC_ENTER_TIMER
+       jl      0f
+       la      %r14,__LC_ASYNC_ENTER_TIMER
+0:     clc     0(8,%r14),__LC_EXIT_TIMER
+       jl      1f
+       la      %r14,__LC_EXIT_TIMER
+1:     clc     0(8,%r14),__LC_LAST_UPDATE_TIMER
+       jl      2f
+       la      %r14,__LC_LAST_UPDATE_TIMER
+2:     spt     0(%r14)
+       mvc     __LC_MCCK_ENTER_TIMER(8),0(%r14)
+3:     tm      __LC_MCCK_CODE+2,0x09   # mwp + ia of old psw valid?
+       jno     .Lmcck_panic            # no -> skip cleanup critical
+       SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_PANIC_STACK,PAGE_SHIFT
+       tm      %r8,0x0001              # interrupting from user ?
+       jz      .Lmcck_skip
+       UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER
+       LAST_BREAK %r14
+.Lmcck_skip:
+       lghi    %r14,__LC_GPREGS_SAVE_AREA+64
+       stmg    %r0,%r7,__PT_R0(%r11)
+       mvc     __PT_R8(64,%r11),0(%r14)
+       stmg    %r8,%r9,__PT_PSW(%r11)
+       xc      __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
+       xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+       lgr     %r2,%r11                # pass pointer to pt_regs
+       brasl   %r14,s390_do_machine_check
+       tm      __PT_PSW+1(%r11),0x01   # returning to user ?
+       jno     .Lmcck_return
+       lg      %r1,__LC_KERNEL_STACK   # switch to kernel stack
+       mvc     STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
+       xc      __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
+       la      %r11,STACK_FRAME_OVERHEAD(%r1)
+       lgr     %r15,%r1
+       ssm     __LC_PGM_NEW_PSW        # turn dat on, keep irqs off
+       tm      __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
+       jno     .Lmcck_return
+       TRACE_IRQS_OFF
+       brasl   %r14,s390_handle_mcck
+       TRACE_IRQS_ON
+.Lmcck_return:
+       lg      %r14,__LC_VDSO_PER_CPU
+       lmg     %r0,%r10,__PT_R0(%r11)
+       mvc     __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
+       tm      __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
+       jno     0f
+       stpt    __LC_EXIT_TIMER
+       mvc     __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+0:     lmg     %r11,%r15,__PT_R11(%r11)
+       lpswe   __LC_RETURN_MCCK_PSW
+
+.Lmcck_panic:
+       lg      %r14,__LC_PANIC_STACK
+       slgr    %r14,%r15
+       srag    %r14,%r14,PAGE_SHIFT
+       jz      0f
+       lg      %r15,__LC_PANIC_STACK
+0:     aghi    %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+       j       .Lmcck_skip
+
+#
+# PSW restart interrupt handler
+#
+ENTRY(restart_int_handler)
+       stg     %r15,__LC_SAVE_AREA_RESTART
+       lg      %r15,__LC_RESTART_STACK
+       aghi    %r15,-__PT_SIZE                 # create pt_regs on stack
+       xc      0(__PT_SIZE,%r15),0(%r15)
+       stmg    %r0,%r14,__PT_R0(%r15)
+       mvc     __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
+       mvc     __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw
+       aghi    %r15,-STACK_FRAME_OVERHEAD      # create stack frame on stack
+       xc      0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
+       lg      %r1,__LC_RESTART_FN             # load fn, parm & source cpu
+       lg      %r2,__LC_RESTART_DATA
+       lg      %r3,__LC_RESTART_SOURCE
+       ltgr    %r3,%r3                         # test source cpu address
+       jm      1f                              # negative -> skip source stop
+0:     sigp    %r4,%r3,SIGP_SENSE              # sigp sense to source cpu
+       brc     10,0b                           # wait for status stored
+1:     basr    %r14,%r1                        # call function
+       stap    __SF_EMPTY(%r15)                # store cpu address
+       llgh    %r3,__SF_EMPTY(%r15)
+2:     sigp    %r4,%r3,SIGP_STOP               # sigp stop to current cpu
+       brc     2,2b
+3:     j       3b
+
+       .section .kprobes.text, "ax"
+
+#ifdef CONFIG_CHECK_STACK
+/*
+ * The synchronous or the asynchronous stack overflowed. We are dead.
+ * No need to properly save the registers, we are going to panic anyway.
+ * Setup a pt_regs so that show_trace can provide a good call trace.
+ */
+stack_overflow:
+       lg      %r15,__LC_PANIC_STACK   # change to panic stack
+       la      %r11,STACK_FRAME_OVERHEAD(%r15)
+       stmg    %r0,%r7,__PT_R0(%r11)
+       stmg    %r8,%r9,__PT_PSW(%r11)
+       mvc     __PT_R8(64,%r11),0(%r14)
+       stg     %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
+       xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+       lgr     %r2,%r11                # pass pointer to pt_regs
+       jg      kernel_stack_overflow
+#endif
+
+       .align  8
+.Lcleanup_table:
+       .quad   system_call
+       .quad   .Lsysc_do_svc
+       .quad   .Lsysc_tif
+       .quad   .Lsysc_restore
+       .quad   .Lsysc_done
+       .quad   .Lio_tif
+       .quad   .Lio_restore
+       .quad   .Lio_done
+       .quad   psw_idle
+       .quad   .Lpsw_idle_end
+
+cleanup_critical:
+       clg     %r9,BASED(.Lcleanup_table)      # system_call
+       jl      0f
+       clg     %r9,BASED(.Lcleanup_table+8)    # .Lsysc_do_svc
+       jl      .Lcleanup_system_call
+       clg     %r9,BASED(.Lcleanup_table+16)   # .Lsysc_tif
+       jl      0f
+       clg     %r9,BASED(.Lcleanup_table+24)   # .Lsysc_restore
+       jl      .Lcleanup_sysc_tif
+       clg     %r9,BASED(.Lcleanup_table+32)   # .Lsysc_done
+       jl      .Lcleanup_sysc_restore
+       clg     %r9,BASED(.Lcleanup_table+40)   # .Lio_tif
+       jl      0f
+       clg     %r9,BASED(.Lcleanup_table+48)   # .Lio_restore
+       jl      .Lcleanup_io_tif
+       clg     %r9,BASED(.Lcleanup_table+56)   # .Lio_done
+       jl      .Lcleanup_io_restore
+       clg     %r9,BASED(.Lcleanup_table+64)   # psw_idle
+       jl      0f
+       clg     %r9,BASED(.Lcleanup_table+72)   # .Lpsw_idle_end
+       jl      .Lcleanup_idle
+0:     br      %r14
+
+
+.Lcleanup_system_call:
+       # check if stpt has been executed
+       clg     %r9,BASED(.Lcleanup_system_call_insn)
+       jh      0f
+       mvc     __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
+       cghi    %r11,__LC_SAVE_AREA_ASYNC
+       je      0f
+       mvc     __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
+0:     # check if stmg has been executed
+       clg     %r9,BASED(.Lcleanup_system_call_insn+8)
+       jh      0f
+       mvc     __LC_SAVE_AREA_SYNC(64),0(%r11)
+0:     # check if base register setup + TIF bit load has been done
+       clg     %r9,BASED(.Lcleanup_system_call_insn+16)
+       jhe     0f
+       # set up saved registers r10 and r12
+       stg     %r10,16(%r11)           # r10 last break
+       stg     %r12,32(%r11)           # r12 thread-info pointer
+0:     # check if the user time update has been done
+       clg     %r9,BASED(.Lcleanup_system_call_insn+24)
+       jh      0f
+       lg      %r15,__LC_EXIT_TIMER
+       slg     %r15,__LC_SYNC_ENTER_TIMER
+       alg     %r15,__LC_USER_TIMER
+       stg     %r15,__LC_USER_TIMER
+0:     # check if the system time update has been done
+       clg     %r9,BASED(.Lcleanup_system_call_insn+32)
+       jh      0f
+       lg      %r15,__LC_LAST_UPDATE_TIMER
+       slg     %r15,__LC_EXIT_TIMER
+       alg     %r15,__LC_SYSTEM_TIMER
+       stg     %r15,__LC_SYSTEM_TIMER
+0:     # update accounting time stamp
+       mvc     __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+       # do LAST_BREAK
+       lg      %r9,16(%r11)
+       srag    %r9,%r9,23
+       jz      0f
+       mvc     __TI_last_break(8,%r12),16(%r11)
+0:     # set up saved register r11
+       lg      %r15,__LC_KERNEL_STACK
+       la      %r9,STACK_FRAME_OVERHEAD(%r15)
+       stg     %r9,24(%r11)            # r11 pt_regs pointer
+       # fill pt_regs
+       mvc     __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC
+       stmg    %r0,%r7,__PT_R0(%r9)
+       mvc     __PT_PSW(16,%r9),__LC_SVC_OLD_PSW
+       mvc     __PT_INT_CODE(4,%r9),__LC_SVC_ILC
+       xc      __PT_FLAGS(8,%r9),__PT_FLAGS(%r9)
+       mvi     __PT_FLAGS+7(%r9),_PIF_SYSCALL
+       # setup saved register r15
+       stg     %r15,56(%r11)           # r15 stack pointer
+       # set new psw address and exit
+       larl    %r9,.Lsysc_do_svc
+       br      %r14
+.Lcleanup_system_call_insn:
+       .quad   system_call
+       .quad   .Lsysc_stmg
+       .quad   .Lsysc_per
+       .quad   .Lsysc_vtime+18
+       .quad   .Lsysc_vtime+42
+
+.Lcleanup_sysc_tif:
+       larl    %r9,.Lsysc_tif
+       br      %r14
+
+.Lcleanup_sysc_restore:
+       clg     %r9,BASED(.Lcleanup_sysc_restore_insn)
+       je      0f
+       lg      %r9,24(%r11)            # get saved pointer to pt_regs
+       mvc     __LC_RETURN_PSW(16),__PT_PSW(%r9)
+       mvc     0(64,%r11),__PT_R8(%r9)
+       lmg     %r0,%r7,__PT_R0(%r9)
+0:     lmg     %r8,%r9,__LC_RETURN_PSW
+       br      %r14
+.Lcleanup_sysc_restore_insn:
+       .quad   .Lsysc_done - 4
+
+.Lcleanup_io_tif:
+       larl    %r9,.Lio_tif
+       br      %r14
+
+.Lcleanup_io_restore:
+       clg     %r9,BASED(.Lcleanup_io_restore_insn)
+       je      0f
+       lg      %r9,24(%r11)            # get saved r11 pointer to pt_regs
+       mvc     __LC_RETURN_PSW(16),__PT_PSW(%r9)
+       mvc     0(64,%r11),__PT_R8(%r9)
+       lmg     %r0,%r7,__PT_R0(%r9)
+0:     lmg     %r8,%r9,__LC_RETURN_PSW
+       br      %r14
+.Lcleanup_io_restore_insn:
+       .quad   .Lio_done - 4
+
+.Lcleanup_idle:
+       # copy interrupt clock & cpu timer
+       mvc     __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
+       mvc     __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
+       cghi    %r11,__LC_SAVE_AREA_ASYNC
+       je      0f
+       mvc     __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
+       mvc     __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
+0:     # check if stck & stpt have been executed
+       clg     %r9,BASED(.Lcleanup_idle_insn)
+       jhe     1f
+       mvc     __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
+       mvc     __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
+1:     # account system time going idle
+       lg      %r9,__LC_STEAL_TIMER
+       alg     %r9,__CLOCK_IDLE_ENTER(%r2)
+       slg     %r9,__LC_LAST_UPDATE_CLOCK
+       stg     %r9,__LC_STEAL_TIMER
+       mvc     __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
+       lg      %r9,__LC_SYSTEM_TIMER
+       alg     %r9,__LC_LAST_UPDATE_TIMER
+       slg     %r9,__TIMER_IDLE_ENTER(%r2)
+       stg     %r9,__LC_SYSTEM_TIMER
+       mvc     __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
+       # prepare return psw
+       nihh    %r8,0xfcfd              # clear irq & wait state bits
+       lg      %r9,48(%r11)            # return from psw_idle
+       br      %r14
+.Lcleanup_idle_insn:
+       .quad   .Lpsw_idle_lpsw
+
+/*
+ * Integer constants
+ */
+       .align  8
+.Lcritical_start:
+       .quad   .L__critical_start
+.Lcritical_length:
+       .quad   .L__critical_end - .L__critical_start
+
+
+#if IS_ENABLED(CONFIG_KVM)
+/*
+ * sie64a calling convention:
+ * %r2 pointer to sie control block
+ * %r3 guest register save area
+ */
+ENTRY(sie64a)
+       stmg    %r6,%r14,__SF_GPRS(%r15)        # save kernel registers
+       stg     %r2,__SF_EMPTY(%r15)            # save control block pointer
+       stg     %r3,__SF_EMPTY+8(%r15)          # save guest register save area
+       xc      __SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason
+       lmg     %r0,%r13,0(%r3)                 # load guest gprs 0-13
+       lg      %r14,__LC_GMAP                  # get gmap pointer
+       ltgr    %r14,%r14
+       jz      .Lsie_gmap
+       lctlg   %c1,%c1,__GMAP_ASCE(%r14)       # load primary asce
+.Lsie_gmap:
+       lg      %r14,__SF_EMPTY(%r15)           # get control block pointer
+       oi      __SIE_PROG0C+3(%r14),1          # we are going into SIE now
+       tm      __SIE_PROG20+3(%r14),1          # last exit...
+       jnz     .Lsie_done
+       LPP     __SF_EMPTY(%r15)                # set guest id
+       sie     0(%r14)
+.Lsie_done:
+       LPP     __SF_EMPTY+16(%r15)             # set host id
+       ni      __SIE_PROG0C+3(%r14),0xfe       # no longer in SIE
+       lctlg   %c1,%c1,__LC_USER_ASCE          # load primary asce
+# some program checks are suppressing. C code (e.g. do_protection_exception)
+# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
+# instructions between sie64a and .Lsie_done should not cause program
+# interrupts. So lets use a nop (47 00 00 00) as a landing pad.
+# See also HANDLE_SIE_INTERCEPT
+.Lrewind_pad:
+       nop     0
+       .globl sie_exit
+sie_exit:
+       lg      %r14,__SF_EMPTY+8(%r15)         # load guest register save area
+       stmg    %r0,%r13,0(%r14)                # save guest gprs 0-13
+       lmg     %r6,%r14,__SF_GPRS(%r15)        # restore kernel registers
+       lg      %r2,__SF_EMPTY+24(%r15)         # return exit reason code
+       br      %r14
+.Lsie_fault:
+       lghi    %r14,-EFAULT
+       stg     %r14,__SF_EMPTY+24(%r15)        # set exit reason code
+       j       sie_exit
+
+       .align  8
+.Lsie_critical:
+       .quad   .Lsie_gmap
+.Lsie_critical_length:
+       .quad   .Lsie_done - .Lsie_gmap
+
+       EX_TABLE(.Lrewind_pad,.Lsie_fault)
+       EX_TABLE(sie_exit,.Lsie_fault)
+#endif
+
+               .section .rodata, "a"
+#define SYSCALL(esa,esame,emu) .long esame
+       .globl  sys_call_table
+sys_call_table:
+#include "syscalls.S"
+#undef SYSCALL
+
+#ifdef CONFIG_COMPAT
+
+#define SYSCALL(esa,esame,emu) .long emu
+       .globl  sys_call_table_emu
+sys_call_table_emu:
+#include "syscalls.S"
+#undef SYSCALL
+#endif
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
deleted file mode 100644 (file)
index c329446..0000000
+++ /dev/null
@@ -1,1059 +0,0 @@
-/*
- *    S390 low-level entry points.
- *
- *    Copyright IBM Corp. 1999, 2012
- *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
- *              Hartmut Penner (hp@de.ibm.com),
- *              Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
- *              Heiko Carstens <heiko.carstens@de.ibm.com>
- */
-
-#include <linux/init.h>
-#include <linux/linkage.h>
-#include <asm/processor.h>
-#include <asm/cache.h>
-#include <asm/errno.h>
-#include <asm/ptrace.h>
-#include <asm/thread_info.h>
-#include <asm/asm-offsets.h>
-#include <asm/unistd.h>
-#include <asm/page.h>
-#include <asm/sigp.h>
-#include <asm/irq.h>
-
-__PT_R0      = __PT_GPRS
-__PT_R1      = __PT_GPRS + 8
-__PT_R2      = __PT_GPRS + 16
-__PT_R3      = __PT_GPRS + 24
-__PT_R4      = __PT_GPRS + 32
-__PT_R5      = __PT_GPRS + 40
-__PT_R6      = __PT_GPRS + 48
-__PT_R7      = __PT_GPRS + 56
-__PT_R8      = __PT_GPRS + 64
-__PT_R9      = __PT_GPRS + 72
-__PT_R10     = __PT_GPRS + 80
-__PT_R11     = __PT_GPRS + 88
-__PT_R12     = __PT_GPRS + 96
-__PT_R13     = __PT_GPRS + 104
-__PT_R14     = __PT_GPRS + 112
-__PT_R15     = __PT_GPRS + 120
-
-STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
-STACK_SIZE  = 1 << STACK_SHIFT
-STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
-
-_TIF_WORK      = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
-                  _TIF_UPROBE)
-_TIF_TRACE     = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
-                  _TIF_SYSCALL_TRACEPOINT)
-_CIF_WORK      = (_CIF_MCCK_PENDING | _CIF_ASCE)
-_PIF_WORK      = (_PIF_PER_TRAP)
-
-#define BASED(name) name-system_call(%r13)
-
-       .macro  TRACE_IRQS_ON
-#ifdef CONFIG_TRACE_IRQFLAGS
-       basr    %r2,%r0
-       brasl   %r14,trace_hardirqs_on_caller
-#endif
-       .endm
-
-       .macro  TRACE_IRQS_OFF
-#ifdef CONFIG_TRACE_IRQFLAGS
-       basr    %r2,%r0
-       brasl   %r14,trace_hardirqs_off_caller
-#endif
-       .endm
-
-       .macro  LOCKDEP_SYS_EXIT
-#ifdef CONFIG_LOCKDEP
-       tm      __PT_PSW+1(%r11),0x01   # returning to user ?
-       jz      .+10
-       brasl   %r14,lockdep_sys_exit
-#endif
-       .endm
-
-       .macro LPP newpp
-#if IS_ENABLED(CONFIG_KVM)
-       tm      __LC_MACHINE_FLAGS+6,0x20       # MACHINE_FLAG_LPP
-       jz      .+8
-       .insn   s,0xb2800000,\newpp
-#endif
-       .endm
-
-       .macro  HANDLE_SIE_INTERCEPT scratch,reason
-#if IS_ENABLED(CONFIG_KVM)
-       tmhh    %r8,0x0001              # interrupting from user ?
-       jnz     .+62
-       lgr     \scratch,%r9
-       slg     \scratch,BASED(.Lsie_critical)
-       clg     \scratch,BASED(.Lsie_critical_length)
-       .if     \reason==1
-       # Some program interrupts are suppressing (e.g. protection).
-       # We must also check the instruction after SIE in that case.
-       # do_protection_exception will rewind to .Lrewind_pad
-       jh      .+42
-       .else
-       jhe     .+42
-       .endif
-       lg      %r14,__SF_EMPTY(%r15)           # get control block pointer
-       LPP     __SF_EMPTY+16(%r15)             # set host id
-       ni      __SIE_PROG0C+3(%r14),0xfe       # no longer in SIE
-       lctlg   %c1,%c1,__LC_USER_ASCE          # load primary asce
-       larl    %r9,sie_exit                    # skip forward to sie_exit
-       mvi     __SF_EMPTY+31(%r15),\reason     # set exit reason
-#endif
-       .endm
-
-       .macro  CHECK_STACK stacksize,savearea
-#ifdef CONFIG_CHECK_STACK
-       tml     %r15,\stacksize - CONFIG_STACK_GUARD
-       lghi    %r14,\savearea
-       jz      stack_overflow
-#endif
-       .endm
-
-       .macro  SWITCH_ASYNC savearea,stack,shift
-       tmhh    %r8,0x0001              # interrupting from user ?
-       jnz     1f
-       lgr     %r14,%r9
-       slg     %r14,BASED(.Lcritical_start)
-       clg     %r14,BASED(.Lcritical_length)
-       jhe     0f
-       lghi    %r11,\savearea          # inside critical section, do cleanup
-       brasl   %r14,cleanup_critical
-       tmhh    %r8,0x0001              # retest problem state after cleanup
-       jnz     1f
-0:     lg      %r14,\stack             # are we already on the target stack?
-       slgr    %r14,%r15
-       srag    %r14,%r14,\shift
-       jnz     1f
-       CHECK_STACK 1<<\shift,\savearea
-       aghi    %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
-       j       2f
-1:     lg      %r15,\stack             # load target stack
-2:     la      %r11,STACK_FRAME_OVERHEAD(%r15)
-       .endm
-
-       .macro UPDATE_VTIME scratch,enter_timer
-       lg      \scratch,__LC_EXIT_TIMER
-       slg     \scratch,\enter_timer
-       alg     \scratch,__LC_USER_TIMER
-       stg     \scratch,__LC_USER_TIMER
-       lg      \scratch,__LC_LAST_UPDATE_TIMER
-       slg     \scratch,__LC_EXIT_TIMER
-       alg     \scratch,__LC_SYSTEM_TIMER
-       stg     \scratch,__LC_SYSTEM_TIMER
-       mvc     __LC_LAST_UPDATE_TIMER(8),\enter_timer
-       .endm
-
-       .macro  LAST_BREAK scratch
-       srag    \scratch,%r10,23
-       jz      .+10
-       stg     %r10,__TI_last_break(%r12)
-       .endm
-
-       .macro REENABLE_IRQS
-       stg     %r8,__LC_RETURN_PSW
-       ni      __LC_RETURN_PSW,0xbf
-       ssm     __LC_RETURN_PSW
-       .endm
-
-       .macro STCK savearea
-#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
-       .insn   s,0xb27c0000,\savearea          # store clock fast
-#else
-       .insn   s,0xb2050000,\savearea          # store clock
-#endif
-       .endm
-
-       .section .kprobes.text, "ax"
-
-/*
- * Scheduler resume function, called by switch_to
- *  gpr2 = (task_struct *) prev
- *  gpr3 = (task_struct *) next
- * Returns:
- *  gpr2 = prev
- */
-ENTRY(__switch_to)
-       stmg    %r6,%r15,__SF_GPRS(%r15)        # store gprs of prev task
-       stg     %r15,__THREAD_ksp(%r2)          # store kernel stack of prev
-       lg      %r4,__THREAD_info(%r2)          # get thread_info of prev
-       lg      %r5,__THREAD_info(%r3)          # get thread_info of next
-       lgr     %r15,%r5
-       aghi    %r15,STACK_INIT                 # end of kernel stack of next
-       stg     %r3,__LC_CURRENT                # store task struct of next
-       stg     %r5,__LC_THREAD_INFO            # store thread info of next
-       stg     %r15,__LC_KERNEL_STACK          # store end of kernel stack
-       lctl    %c4,%c4,__TASK_pid(%r3)         # load pid to control reg. 4
-       mvc     __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next
-       lg      %r15,__THREAD_ksp(%r3)          # load kernel stack of next
-       lmg     %r6,%r15,__SF_GPRS(%r15)        # load gprs of next task
-       br      %r14
-
-.L__critical_start:
-/*
- * SVC interrupt handler routine. System calls are synchronous events and
- * are executed with interrupts enabled.
- */
-
-ENTRY(system_call)
-       stpt    __LC_SYNC_ENTER_TIMER
-.Lsysc_stmg:
-       stmg    %r8,%r15,__LC_SAVE_AREA_SYNC
-       lg      %r10,__LC_LAST_BREAK
-       lg      %r12,__LC_THREAD_INFO
-       lghi    %r14,_PIF_SYSCALL
-.Lsysc_per:
-       lg      %r15,__LC_KERNEL_STACK
-       la      %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
-.Lsysc_vtime:
-       UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER
-       LAST_BREAK %r13
-       stmg    %r0,%r7,__PT_R0(%r11)
-       mvc     __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
-       mvc     __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
-       mvc     __PT_INT_CODE(4,%r11),__LC_SVC_ILC
-       stg     %r14,__PT_FLAGS(%r11)
-.Lsysc_do_svc:
-       lg      %r10,__TI_sysc_table(%r12)      # address of system call table
-       llgh    %r8,__PT_INT_CODE+2(%r11)
-       slag    %r8,%r8,2                       # shift and test for svc 0
-       jnz     .Lsysc_nr_ok
-       # svc 0: system call number in %r1
-       llgfr   %r1,%r1                         # clear high word in r1
-       cghi    %r1,NR_syscalls
-       jnl     .Lsysc_nr_ok
-       sth     %r1,__PT_INT_CODE+2(%r11)
-       slag    %r8,%r1,2
-.Lsysc_nr_ok:
-       xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
-       stg     %r2,__PT_ORIG_GPR2(%r11)
-       stg     %r7,STACK_FRAME_OVERHEAD(%r15)
-       lgf     %r9,0(%r8,%r10)                 # get system call add.
-       tm      __TI_flags+7(%r12),_TIF_TRACE
-       jnz     .Lsysc_tracesys
-       basr    %r14,%r9                        # call sys_xxxx
-       stg     %r2,__PT_R2(%r11)               # store return value
-
-.Lsysc_return:
-       LOCKDEP_SYS_EXIT
-.Lsysc_tif:
-       tm      __PT_PSW+1(%r11),0x01           # returning to user ?
-       jno     .Lsysc_restore
-       tm      __PT_FLAGS+7(%r11),_PIF_WORK
-       jnz     .Lsysc_work
-       tm      __TI_flags+7(%r12),_TIF_WORK
-       jnz     .Lsysc_work                     # check for work
-       tm      __LC_CPU_FLAGS+7,_CIF_WORK
-       jnz     .Lsysc_work
-.Lsysc_restore:
-       lg      %r14,__LC_VDSO_PER_CPU
-       lmg     %r0,%r10,__PT_R0(%r11)
-       mvc     __LC_RETURN_PSW(16),__PT_PSW(%r11)
-       stpt    __LC_EXIT_TIMER
-       mvc     __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
-       lmg     %r11,%r15,__PT_R11(%r11)
-       lpswe   __LC_RETURN_PSW
-.Lsysc_done:
-
-#
-# One of the work bits is on. Find out which one.
-#
-.Lsysc_work:
-       tm      __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
-       jo      .Lsysc_mcck_pending
-       tm      __TI_flags+7(%r12),_TIF_NEED_RESCHED
-       jo      .Lsysc_reschedule
-#ifdef CONFIG_UPROBES
-       tm      __TI_flags+7(%r12),_TIF_UPROBE
-       jo      .Lsysc_uprobe_notify
-#endif
-       tm      __PT_FLAGS+7(%r11),_PIF_PER_TRAP
-       jo      .Lsysc_singlestep
-       tm      __TI_flags+7(%r12),_TIF_SIGPENDING
-       jo      .Lsysc_sigpending
-       tm      __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
-       jo      .Lsysc_notify_resume
-       tm      __LC_CPU_FLAGS+7,_CIF_ASCE
-       jo      .Lsysc_uaccess
-       j       .Lsysc_return           # beware of critical section cleanup
-
-#
-# _TIF_NEED_RESCHED is set, call schedule
-#
-.Lsysc_reschedule:
-       larl    %r14,.Lsysc_return
-       jg      schedule
-
-#
-# _CIF_MCCK_PENDING is set, call handler
-#
-.Lsysc_mcck_pending:
-       larl    %r14,.Lsysc_return
-       jg      s390_handle_mcck        # TIF bit will be cleared by handler
-
-#
-# _CIF_ASCE is set, load user space asce
-#
-.Lsysc_uaccess:
-       ni      __LC_CPU_FLAGS+7,255-_CIF_ASCE
-       lctlg   %c1,%c1,__LC_USER_ASCE          # load primary asce
-       j       .Lsysc_return
-
-#
-# _TIF_SIGPENDING is set, call do_signal
-#
-.Lsysc_sigpending:
-       lgr     %r2,%r11                # pass pointer to pt_regs
-       brasl   %r14,do_signal
-       tm      __PT_FLAGS+7(%r11),_PIF_SYSCALL
-       jno     .Lsysc_return
-       lmg     %r2,%r7,__PT_R2(%r11)   # load svc arguments
-       lg      %r10,__TI_sysc_table(%r12)      # address of system call table
-       lghi    %r8,0                   # svc 0 returns -ENOSYS
-       llgh    %r1,__PT_INT_CODE+2(%r11)       # load new svc number
-       cghi    %r1,NR_syscalls
-       jnl     .Lsysc_nr_ok            # invalid svc number -> do svc 0
-       slag    %r8,%r1,2
-       j       .Lsysc_nr_ok            # restart svc
-
-#
-# _TIF_NOTIFY_RESUME is set, call do_notify_resume
-#
-.Lsysc_notify_resume:
-       lgr     %r2,%r11                # pass pointer to pt_regs
-       larl    %r14,.Lsysc_return
-       jg      do_notify_resume
-
-#
-# _TIF_UPROBE is set, call uprobe_notify_resume
-#
-#ifdef CONFIG_UPROBES
-.Lsysc_uprobe_notify:
-       lgr     %r2,%r11                # pass pointer to pt_regs
-       larl    %r14,.Lsysc_return
-       jg      uprobe_notify_resume
-#endif
-
-#
-# _PIF_PER_TRAP is set, call do_per_trap
-#
-.Lsysc_singlestep:
-       ni      __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP
-       lgr     %r2,%r11                # pass pointer to pt_regs
-       larl    %r14,.Lsysc_return
-       jg      do_per_trap
-
-#
-# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
-# and after the system call
-#
-.Lsysc_tracesys:
-       lgr     %r2,%r11                # pass pointer to pt_regs
-       la      %r3,0
-       llgh    %r0,__PT_INT_CODE+2(%r11)
-       stg     %r0,__PT_R2(%r11)
-       brasl   %r14,do_syscall_trace_enter
-       lghi    %r0,NR_syscalls
-       clgr    %r0,%r2
-       jnh     .Lsysc_tracenogo
-       sllg    %r8,%r2,2
-       lgf     %r9,0(%r8,%r10)
-.Lsysc_tracego:
-       lmg     %r3,%r7,__PT_R3(%r11)
-       stg     %r7,STACK_FRAME_OVERHEAD(%r15)
-       lg      %r2,__PT_ORIG_GPR2(%r11)
-       basr    %r14,%r9                # call sys_xxx
-       stg     %r2,__PT_R2(%r11)       # store return value
-.Lsysc_tracenogo:
-       tm      __TI_flags+7(%r12),_TIF_TRACE
-       jz      .Lsysc_return
-       lgr     %r2,%r11                # pass pointer to pt_regs
-       larl    %r14,.Lsysc_return
-       jg      do_syscall_trace_exit
-
-#
-# a new process exits the kernel with ret_from_fork
-#
-ENTRY(ret_from_fork)
-       la      %r11,STACK_FRAME_OVERHEAD(%r15)
-       lg      %r12,__LC_THREAD_INFO
-       brasl   %r14,schedule_tail
-       TRACE_IRQS_ON
-       ssm     __LC_SVC_NEW_PSW        # reenable interrupts
-       tm      __PT_PSW+1(%r11),0x01   # forking a kernel thread ?
-       jne     .Lsysc_tracenogo
-       # it's a kernel thread
-       lmg     %r9,%r10,__PT_R9(%r11)  # load gprs
-ENTRY(kernel_thread_starter)
-       la      %r2,0(%r10)
-       basr    %r14,%r9
-       j       .Lsysc_tracenogo
-
-/*
- * Program check handler routine
- */
-
-ENTRY(pgm_check_handler)
-       stpt    __LC_SYNC_ENTER_TIMER
-       stmg    %r8,%r15,__LC_SAVE_AREA_SYNC
-       lg      %r10,__LC_LAST_BREAK
-       lg      %r12,__LC_THREAD_INFO
-       larl    %r13,system_call
-       lmg     %r8,%r9,__LC_PGM_OLD_PSW
-       HANDLE_SIE_INTERCEPT %r14,1
-       tmhh    %r8,0x0001              # test problem state bit
-       jnz     1f                      # -> fault in user space
-       tmhh    %r8,0x4000              # PER bit set in old PSW ?
-       jnz     0f                      # -> enabled, can't be a double fault
-       tm      __LC_PGM_ILC+3,0x80     # check for per exception
-       jnz     .Lpgm_svcper            # -> single stepped svc
-0:     CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
-       aghi    %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
-       j       2f
-1:     UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER
-       LAST_BREAK %r14
-       lg      %r15,__LC_KERNEL_STACK
-       lg      %r14,__TI_task(%r12)
-       lghi    %r13,__LC_PGM_TDB
-       tm      __LC_PGM_ILC+2,0x02     # check for transaction abort
-       jz      2f
-       mvc     __THREAD_trap_tdb(256,%r14),0(%r13)
-2:     la      %r11,STACK_FRAME_OVERHEAD(%r15)
-       stmg    %r0,%r7,__PT_R0(%r11)
-       mvc     __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
-       stmg    %r8,%r9,__PT_PSW(%r11)
-       mvc     __PT_INT_CODE(4,%r11),__LC_PGM_ILC
-       mvc     __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE
-       xc      __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
-       stg     %r10,__PT_ARGS(%r11)
-       tm      __LC_PGM_ILC+3,0x80     # check for per exception
-       jz      0f
-       tmhh    %r8,0x0001              # kernel per event ?
-       jz      .Lpgm_kprobe
-       oi      __PT_FLAGS+7(%r11),_PIF_PER_TRAP
-       mvc     __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
-       mvc     __THREAD_per_cause(2,%r14),__LC_PER_CODE
-       mvc     __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
-0:     REENABLE_IRQS
-       xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
-       larl    %r1,pgm_check_table
-       llgh    %r10,__PT_INT_CODE+2(%r11)
-       nill    %r10,0x007f
-       sll     %r10,2
-       je      .Lsysc_return
-       lgf     %r1,0(%r10,%r1)         # load address of handler routine
-       lgr     %r2,%r11                # pass pointer to pt_regs
-       basr    %r14,%r1                # branch to interrupt-handler
-       j       .Lsysc_return
-
-#
-# PER event in supervisor state, must be kprobes
-#
-.Lpgm_kprobe:
-       REENABLE_IRQS
-       xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
-       lgr     %r2,%r11                # pass pointer to pt_regs
-       brasl   %r14,do_per_trap
-       j       .Lsysc_return
-
-#
-# single stepped system call
-#
-.Lpgm_svcper:
-       mvc     __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
-       larl    %r14,.Lsysc_per
-       stg     %r14,__LC_RETURN_PSW+8
-       lghi    %r14,_PIF_SYSCALL | _PIF_PER_TRAP
-       lpswe   __LC_RETURN_PSW         # branch to .Lsysc_per and enable irqs
-
-/*
- * IO interrupt handler routine
- */
-ENTRY(io_int_handler)
-       STCK    __LC_INT_CLOCK
-       stpt    __LC_ASYNC_ENTER_TIMER
-       stmg    %r8,%r15,__LC_SAVE_AREA_ASYNC
-       lg      %r10,__LC_LAST_BREAK
-       lg      %r12,__LC_THREAD_INFO
-       larl    %r13,system_call
-       lmg     %r8,%r9,__LC_IO_OLD_PSW
-       HANDLE_SIE_INTERCEPT %r14,2
-       SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
-       tmhh    %r8,0x0001              # interrupting from user?
-       jz      .Lio_skip
-       UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER
-       LAST_BREAK %r14
-.Lio_skip:
-       stmg    %r0,%r7,__PT_R0(%r11)
-       mvc     __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
-       stmg    %r8,%r9,__PT_PSW(%r11)
-       mvc     __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
-       xc      __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
-       TRACE_IRQS_OFF
-       xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
-.Lio_loop:
-       lgr     %r2,%r11                # pass pointer to pt_regs
-       lghi    %r3,IO_INTERRUPT
-       tm      __PT_INT_CODE+8(%r11),0x80      # adapter interrupt ?
-       jz      .Lio_call
-       lghi    %r3,THIN_INTERRUPT
-.Lio_call:
-       brasl   %r14,do_IRQ
-       tm      __LC_MACHINE_FLAGS+6,0x10       # MACHINE_FLAG_LPAR
-       jz      .Lio_return
-       tpi     0
-       jz      .Lio_return
-       mvc     __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
-       j       .Lio_loop
-.Lio_return:
-       LOCKDEP_SYS_EXIT
-       TRACE_IRQS_ON
-.Lio_tif:
-       tm      __TI_flags+7(%r12),_TIF_WORK
-       jnz     .Lio_work               # there is work to do (signals etc.)
-       tm      __LC_CPU_FLAGS+7,_CIF_WORK
-       jnz     .Lio_work
-.Lio_restore:
-       lg      %r14,__LC_VDSO_PER_CPU
-       lmg     %r0,%r10,__PT_R0(%r11)
-       mvc     __LC_RETURN_PSW(16),__PT_PSW(%r11)
-       stpt    __LC_EXIT_TIMER
-       mvc     __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
-       lmg     %r11,%r15,__PT_R11(%r11)
-       lpswe   __LC_RETURN_PSW
-.Lio_done:
-
-#
-# There is work todo, find out in which context we have been interrupted:
-# 1) if we return to user space we can do all _TIF_WORK work
-# 2) if we return to kernel code and kvm is enabled check if we need to
-#    modify the psw to leave SIE
-# 3) if we return to kernel code and preemptive scheduling is enabled check
-#    the preemption counter and if it is zero call preempt_schedule_irq
-# Before any work can be done, a switch to the kernel stack is required.
-#
-.Lio_work:
-       tm      __PT_PSW+1(%r11),0x01   # returning to user ?
-       jo      .Lio_work_user          # yes -> do resched & signal
-#ifdef CONFIG_PREEMPT
-       # check for preemptive scheduling
-       icm     %r0,15,__TI_precount(%r12)
-       jnz     .Lio_restore            # preemption is disabled
-       tm      __TI_flags+7(%r12),_TIF_NEED_RESCHED
-       jno     .Lio_restore
-       # switch to kernel stack
-       lg      %r1,__PT_R15(%r11)
-       aghi    %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
-       mvc     STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
-       xc      __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
-       la      %r11,STACK_FRAME_OVERHEAD(%r1)
-       lgr     %r15,%r1
-       # TRACE_IRQS_ON already done at .Lio_return, call
-       # TRACE_IRQS_OFF to keep things symmetrical
-       TRACE_IRQS_OFF
-       brasl   %r14,preempt_schedule_irq
-       j       .Lio_return
-#else
-       j       .Lio_restore
-#endif
-
-#
-# Need to do work before returning to userspace, switch to kernel stack
-#
-.Lio_work_user:
-       lg      %r1,__LC_KERNEL_STACK
-       mvc     STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
-       xc      __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
-       la      %r11,STACK_FRAME_OVERHEAD(%r1)
-       lgr     %r15,%r1
-
-#
-# One of the work bits is on. Find out which one.
-#
-.Lio_work_tif:
-       tm      __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
-       jo      .Lio_mcck_pending
-       tm      __TI_flags+7(%r12),_TIF_NEED_RESCHED
-       jo      .Lio_reschedule
-       tm      __TI_flags+7(%r12),_TIF_SIGPENDING
-       jo      .Lio_sigpending
-       tm      __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
-       jo      .Lio_notify_resume
-       tm      __LC_CPU_FLAGS+7,_CIF_ASCE
-       jo      .Lio_uaccess
-       j       .Lio_return             # beware of critical section cleanup
-
-#
-# _CIF_MCCK_PENDING is set, call handler
-#
-.Lio_mcck_pending:
-       # TRACE_IRQS_ON already done at .Lio_return
-       brasl   %r14,s390_handle_mcck   # TIF bit will be cleared by handler
-       TRACE_IRQS_OFF
-       j       .Lio_return
-
-#
-# _CIF_ASCE is set, load user space asce
-#
-.Lio_uaccess:
-       ni      __LC_CPU_FLAGS+7,255-_CIF_ASCE
-       lctlg   %c1,%c1,__LC_USER_ASCE          # load primary asce
-       j       .Lio_return
-
-#
-# _TIF_NEED_RESCHED is set, call schedule
-#
-.Lio_reschedule:
-       # TRACE_IRQS_ON already done at .Lio_return
-       ssm     __LC_SVC_NEW_PSW        # reenable interrupts
-       brasl   %r14,schedule           # call scheduler
-       ssm     __LC_PGM_NEW_PSW        # disable I/O and ext. interrupts
-       TRACE_IRQS_OFF
-       j       .Lio_return
-
-#
-# _TIF_SIGPENDING or is set, call do_signal
-#
-.Lio_sigpending:
-       # TRACE_IRQS_ON already done at .Lio_return
-       ssm     __LC_SVC_NEW_PSW        # reenable interrupts
-       lgr     %r2,%r11                # pass pointer to pt_regs
-       brasl   %r14,do_signal
-       ssm     __LC_PGM_NEW_PSW        # disable I/O and ext. interrupts
-       TRACE_IRQS_OFF
-       j       .Lio_return
-
-#
-# _TIF_NOTIFY_RESUME or is set, call do_notify_resume
-#
-.Lio_notify_resume:
-       # TRACE_IRQS_ON already done at .Lio_return
-       ssm     __LC_SVC_NEW_PSW        # reenable interrupts
-       lgr     %r2,%r11                # pass pointer to pt_regs
-       brasl   %r14,do_notify_resume
-       ssm     __LC_PGM_NEW_PSW        # disable I/O and ext. interrupts
-       TRACE_IRQS_OFF
-       j       .Lio_return
-
-/*
- * External interrupt handler routine
- */
-ENTRY(ext_int_handler)
-       STCK    __LC_INT_CLOCK
-       stpt    __LC_ASYNC_ENTER_TIMER
-       stmg    %r8,%r15,__LC_SAVE_AREA_ASYNC
-       lg      %r10,__LC_LAST_BREAK
-       lg      %r12,__LC_THREAD_INFO
-       larl    %r13,system_call
-       lmg     %r8,%r9,__LC_EXT_OLD_PSW
-       HANDLE_SIE_INTERCEPT %r14,3
-       SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
-       tmhh    %r8,0x0001              # interrupting from user ?
-       jz      .Lext_skip
-       UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER
-       LAST_BREAK %r14
-.Lext_skip:
-       stmg    %r0,%r7,__PT_R0(%r11)
-       mvc     __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
-       stmg    %r8,%r9,__PT_PSW(%r11)
-       lghi    %r1,__LC_EXT_PARAMS2
-       mvc     __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
-       mvc     __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
-       mvc     __PT_INT_PARM_LONG(8,%r11),0(%r1)
-       xc      __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
-       TRACE_IRQS_OFF
-       xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
-       lgr     %r2,%r11                # pass pointer to pt_regs
-       lghi    %r3,EXT_INTERRUPT
-       brasl   %r14,do_IRQ
-       j       .Lio_return
-
-/*
- * Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
- */
-ENTRY(psw_idle)
-       stg     %r3,__SF_EMPTY(%r15)
-       larl    %r1,.Lpsw_idle_lpsw+4
-       stg     %r1,__SF_EMPTY+8(%r15)
-       STCK    __CLOCK_IDLE_ENTER(%r2)
-       stpt    __TIMER_IDLE_ENTER(%r2)
-.Lpsw_idle_lpsw:
-       lpswe   __SF_EMPTY(%r15)
-       br      %r14
-.Lpsw_idle_end:
-
-.L__critical_end:
-
-/*
- * Machine check handler routines
- */
-ENTRY(mcck_int_handler)
-       STCK    __LC_MCCK_CLOCK
-       la      %r1,4095                # revalidate r1
-       spt     __LC_CPU_TIMER_SAVE_AREA-4095(%r1)      # revalidate cpu timer
-       lmg     %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
-       lg      %r10,__LC_LAST_BREAK
-       lg      %r12,__LC_THREAD_INFO
-       larl    %r13,system_call
-       lmg     %r8,%r9,__LC_MCK_OLD_PSW
-       HANDLE_SIE_INTERCEPT %r14,4
-       tm      __LC_MCCK_CODE,0x80     # system damage?
-       jo      .Lmcck_panic            # yes -> rest of mcck code invalid
-       lghi    %r14,__LC_CPU_TIMER_SAVE_AREA
-       mvc     __LC_MCCK_ENTER_TIMER(8),0(%r14)
-       tm      __LC_MCCK_CODE+5,0x02   # stored cpu timer value valid?
-       jo      3f
-       la      %r14,__LC_SYNC_ENTER_TIMER
-       clc     0(8,%r14),__LC_ASYNC_ENTER_TIMER
-       jl      0f
-       la      %r14,__LC_ASYNC_ENTER_TIMER
-0:     clc     0(8,%r14),__LC_EXIT_TIMER
-       jl      1f
-       la      %r14,__LC_EXIT_TIMER
-1:     clc     0(8,%r14),__LC_LAST_UPDATE_TIMER
-       jl      2f
-       la      %r14,__LC_LAST_UPDATE_TIMER
-2:     spt     0(%r14)
-       mvc     __LC_MCCK_ENTER_TIMER(8),0(%r14)
-3:     tm      __LC_MCCK_CODE+2,0x09   # mwp + ia of old psw valid?
-       jno     .Lmcck_panic            # no -> skip cleanup critical
-       SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_PANIC_STACK,PAGE_SHIFT
-       tm      %r8,0x0001              # interrupting from user ?
-       jz      .Lmcck_skip
-       UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER
-       LAST_BREAK %r14
-.Lmcck_skip:
-       lghi    %r14,__LC_GPREGS_SAVE_AREA+64
-       stmg    %r0,%r7,__PT_R0(%r11)
-       mvc     __PT_R8(64,%r11),0(%r14)
-       stmg    %r8,%r9,__PT_PSW(%r11)
-       xc      __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
-       xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
-       lgr     %r2,%r11                # pass pointer to pt_regs
-       brasl   %r14,s390_do_machine_check
-       tm      __PT_PSW+1(%r11),0x01   # returning to user ?
-       jno     .Lmcck_return
-       lg      %r1,__LC_KERNEL_STACK   # switch to kernel stack
-       mvc     STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
-       xc      __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
-       la      %r11,STACK_FRAME_OVERHEAD(%r1)
-       lgr     %r15,%r1
-       ssm     __LC_PGM_NEW_PSW        # turn dat on, keep irqs off
-       tm      __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
-       jno     .Lmcck_return
-       TRACE_IRQS_OFF
-       brasl   %r14,s390_handle_mcck
-       TRACE_IRQS_ON
-.Lmcck_return:
-       lg      %r14,__LC_VDSO_PER_CPU
-       lmg     %r0,%r10,__PT_R0(%r11)
-       mvc     __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
-       tm      __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
-       jno     0f
-       stpt    __LC_EXIT_TIMER
-       mvc     __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
-0:     lmg     %r11,%r15,__PT_R11(%r11)
-       lpswe   __LC_RETURN_MCCK_PSW
-
-.Lmcck_panic:
-       lg      %r14,__LC_PANIC_STACK
-       slgr    %r14,%r15
-       srag    %r14,%r14,PAGE_SHIFT
-       jz      0f
-       lg      %r15,__LC_PANIC_STACK
-0:     aghi    %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
-       j       .Lmcck_skip
-
-#
-# PSW restart interrupt handler
-#
-ENTRY(restart_int_handler)
-       stg     %r15,__LC_SAVE_AREA_RESTART
-       lg      %r15,__LC_RESTART_STACK
-       aghi    %r15,-__PT_SIZE                 # create pt_regs on stack
-       xc      0(__PT_SIZE,%r15),0(%r15)
-       stmg    %r0,%r14,__PT_R0(%r15)
-       mvc     __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
-       mvc     __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw
-       aghi    %r15,-STACK_FRAME_OVERHEAD      # create stack frame on stack
-       xc      0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
-       lg      %r1,__LC_RESTART_FN             # load fn, parm & source cpu
-       lg      %r2,__LC_RESTART_DATA
-       lg      %r3,__LC_RESTART_SOURCE
-       ltgr    %r3,%r3                         # test source cpu address
-       jm      1f                              # negative -> skip source stop
-0:     sigp    %r4,%r3,SIGP_SENSE              # sigp sense to source cpu
-       brc     10,0b                           # wait for status stored
-1:     basr    %r14,%r1                        # call function
-       stap    __SF_EMPTY(%r15)                # store cpu address
-       llgh    %r3,__SF_EMPTY(%r15)
-2:     sigp    %r4,%r3,SIGP_STOP               # sigp stop to current cpu
-       brc     2,2b
-3:     j       3b
-
-       .section .kprobes.text, "ax"
-
-#ifdef CONFIG_CHECK_STACK
-/*
- * The synchronous or the asynchronous stack overflowed. We are dead.
- * No need to properly save the registers, we are going to panic anyway.
- * Setup a pt_regs so that show_trace can provide a good call trace.
- */
-stack_overflow:
-       lg      %r15,__LC_PANIC_STACK   # change to panic stack
-       la      %r11,STACK_FRAME_OVERHEAD(%r15)
-       stmg    %r0,%r7,__PT_R0(%r11)
-       stmg    %r8,%r9,__PT_PSW(%r11)
-       mvc     __PT_R8(64,%r11),0(%r14)
-       stg     %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
-       xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
-       lgr     %r2,%r11                # pass pointer to pt_regs
-       jg      kernel_stack_overflow
-#endif
-
-       .align  8
-.Lcleanup_table:
-       .quad   system_call
-       .quad   .Lsysc_do_svc
-       .quad   .Lsysc_tif
-       .quad   .Lsysc_restore
-       .quad   .Lsysc_done
-       .quad   .Lio_tif
-       .quad   .Lio_restore
-       .quad   .Lio_done
-       .quad   psw_idle
-       .quad   .Lpsw_idle_end
-
-cleanup_critical:
-       clg     %r9,BASED(.Lcleanup_table)      # system_call
-       jl      0f
-       clg     %r9,BASED(.Lcleanup_table+8)    # .Lsysc_do_svc
-       jl      .Lcleanup_system_call
-       clg     %r9,BASED(.Lcleanup_table+16)   # .Lsysc_tif
-       jl      0f
-       clg     %r9,BASED(.Lcleanup_table+24)   # .Lsysc_restore
-       jl      .Lcleanup_sysc_tif
-       clg     %r9,BASED(.Lcleanup_table+32)   # .Lsysc_done
-       jl      .Lcleanup_sysc_restore
-       clg     %r9,BASED(.Lcleanup_table+40)   # .Lio_tif
-       jl      0f
-       clg     %r9,BASED(.Lcleanup_table+48)   # .Lio_restore
-       jl      .Lcleanup_io_tif
-       clg     %r9,BASED(.Lcleanup_table+56)   # .Lio_done
-       jl      .Lcleanup_io_restore
-       clg     %r9,BASED(.Lcleanup_table+64)   # psw_idle
-       jl      0f
-       clg     %r9,BASED(.Lcleanup_table+72)   # .Lpsw_idle_end
-       jl      .Lcleanup_idle
-0:     br      %r14
-
-
-.Lcleanup_system_call:
-       # check if stpt has been executed
-       clg     %r9,BASED(.Lcleanup_system_call_insn)
-       jh      0f
-       mvc     __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
-       cghi    %r11,__LC_SAVE_AREA_ASYNC
-       je      0f
-       mvc     __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
-0:     # check if stmg has been executed
-       clg     %r9,BASED(.Lcleanup_system_call_insn+8)
-       jh      0f
-       mvc     __LC_SAVE_AREA_SYNC(64),0(%r11)
-0:     # check if base register setup + TIF bit load has been done
-       clg     %r9,BASED(.Lcleanup_system_call_insn+16)
-       jhe     0f
-       # set up saved registers r10 and r12
-       stg     %r10,16(%r11)           # r10 last break
-       stg     %r12,32(%r11)           # r12 thread-info pointer
-0:     # check if the user time update has been done
-       clg     %r9,BASED(.Lcleanup_system_call_insn+24)
-       jh      0f
-       lg      %r15,__LC_EXIT_TIMER
-       slg     %r15,__LC_SYNC_ENTER_TIMER
-       alg     %r15,__LC_USER_TIMER
-       stg     %r15,__LC_USER_TIMER
-0:     # check if the system time update has been done
-       clg     %r9,BASED(.Lcleanup_system_call_insn+32)
-       jh      0f
-       lg      %r15,__LC_LAST_UPDATE_TIMER
-       slg     %r15,__LC_EXIT_TIMER
-       alg     %r15,__LC_SYSTEM_TIMER
-       stg     %r15,__LC_SYSTEM_TIMER
-0:     # update accounting time stamp
-       mvc     __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
-       # do LAST_BREAK
-       lg      %r9,16(%r11)
-       srag    %r9,%r9,23
-       jz      0f
-       mvc     __TI_last_break(8,%r12),16(%r11)
-0:     # set up saved register r11
-       lg      %r15,__LC_KERNEL_STACK
-       la      %r9,STACK_FRAME_OVERHEAD(%r15)
-       stg     %r9,24(%r11)            # r11 pt_regs pointer
-       # fill pt_regs
-       mvc     __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC
-       stmg    %r0,%r7,__PT_R0(%r9)
-       mvc     __PT_PSW(16,%r9),__LC_SVC_OLD_PSW
-       mvc     __PT_INT_CODE(4,%r9),__LC_SVC_ILC
-       xc      __PT_FLAGS(8,%r9),__PT_FLAGS(%r9)
-       mvi     __PT_FLAGS+7(%r9),_PIF_SYSCALL
-       # setup saved register r15
-       stg     %r15,56(%r11)           # r15 stack pointer
-       # set new psw address and exit
-       larl    %r9,.Lsysc_do_svc
-       br      %r14
-.Lcleanup_system_call_insn:
-       .quad   system_call
-       .quad   .Lsysc_stmg
-       .quad   .Lsysc_per
-       .quad   .Lsysc_vtime+18
-       .quad   .Lsysc_vtime+42
-
-.Lcleanup_sysc_tif:
-       larl    %r9,.Lsysc_tif
-       br      %r14
-
-.Lcleanup_sysc_restore:
-       clg     %r9,BASED(.Lcleanup_sysc_restore_insn)
-       je      0f
-       lg      %r9,24(%r11)            # get saved pointer to pt_regs
-       mvc     __LC_RETURN_PSW(16),__PT_PSW(%r9)
-       mvc     0(64,%r11),__PT_R8(%r9)
-       lmg     %r0,%r7,__PT_R0(%r9)
-0:     lmg     %r8,%r9,__LC_RETURN_PSW
-       br      %r14
-.Lcleanup_sysc_restore_insn:
-       .quad   .Lsysc_done - 4
-
-.Lcleanup_io_tif:
-       larl    %r9,.Lio_tif
-       br      %r14
-
-.Lcleanup_io_restore:
-       clg     %r9,BASED(.Lcleanup_io_restore_insn)
-       je      0f
-       lg      %r9,24(%r11)            # get saved r11 pointer to pt_regs
-       mvc     __LC_RETURN_PSW(16),__PT_PSW(%r9)
-       mvc     0(64,%r11),__PT_R8(%r9)
-       lmg     %r0,%r7,__PT_R0(%r9)
-0:     lmg     %r8,%r9,__LC_RETURN_PSW
-       br      %r14
-.Lcleanup_io_restore_insn:
-       .quad   .Lio_done - 4
-
-.Lcleanup_idle:
-       # copy interrupt clock & cpu timer
-       mvc     __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
-       mvc     __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
-       cghi    %r11,__LC_SAVE_AREA_ASYNC
-       je      0f
-       mvc     __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
-       mvc     __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
-0:     # check if stck & stpt have been executed
-       clg     %r9,BASED(.Lcleanup_idle_insn)
-       jhe     1f
-       mvc     __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
-       mvc     __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
-1:     # account system time going idle
-       lg      %r9,__LC_STEAL_TIMER
-       alg     %r9,__CLOCK_IDLE_ENTER(%r2)
-       slg     %r9,__LC_LAST_UPDATE_CLOCK
-       stg     %r9,__LC_STEAL_TIMER
-       mvc     __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
-       lg      %r9,__LC_SYSTEM_TIMER
-       alg     %r9,__LC_LAST_UPDATE_TIMER
-       slg     %r9,__TIMER_IDLE_ENTER(%r2)
-       stg     %r9,__LC_SYSTEM_TIMER
-       mvc     __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
-       # prepare return psw
-       nihh    %r8,0xfcfd              # clear irq & wait state bits
-       lg      %r9,48(%r11)            # return from psw_idle
-       br      %r14
-.Lcleanup_idle_insn:
-       .quad   .Lpsw_idle_lpsw
-
-/*
- * Integer constants
- */
-       .align  8
-.Lcritical_start:
-       .quad   .L__critical_start
-.Lcritical_length:
-       .quad   .L__critical_end - .L__critical_start
-
-
-#if IS_ENABLED(CONFIG_KVM)
-/*
- * sie64a calling convention:
- * %r2 pointer to sie control block
- * %r3 guest register save area
- */
-ENTRY(sie64a)
-       stmg    %r6,%r14,__SF_GPRS(%r15)        # save kernel registers
-       stg     %r2,__SF_EMPTY(%r15)            # save control block pointer
-       stg     %r3,__SF_EMPTY+8(%r15)          # save guest register save area
-       xc      __SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason
-       lmg     %r0,%r13,0(%r3)                 # load guest gprs 0-13
-       lg      %r14,__LC_GMAP                  # get gmap pointer
-       ltgr    %r14,%r14
-       jz      .Lsie_gmap
-       lctlg   %c1,%c1,__GMAP_ASCE(%r14)       # load primary asce
-.Lsie_gmap:
-       lg      %r14,__SF_EMPTY(%r15)           # get control block pointer
-       oi      __SIE_PROG0C+3(%r14),1          # we are going into SIE now
-       tm      __SIE_PROG20+3(%r14),1          # last exit...
-       jnz     .Lsie_done
-       LPP     __SF_EMPTY(%r15)                # set guest id
-       sie     0(%r14)
-.Lsie_done:
-       LPP     __SF_EMPTY+16(%r15)             # set host id
-       ni      __SIE_PROG0C+3(%r14),0xfe       # no longer in SIE
-       lctlg   %c1,%c1,__LC_USER_ASCE          # load primary asce
-# some program checks are suppressing. C code (e.g. do_protection_exception)
-# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
-# instructions between sie64a and .Lsie_done should not cause program
-# interrupts. So lets use a nop (47 00 00 00) as a landing pad.
-# See also HANDLE_SIE_INTERCEPT
-.Lrewind_pad:
-       nop     0
-       .globl sie_exit
-sie_exit:
-       lg      %r14,__SF_EMPTY+8(%r15)         # load guest register save area
-       stmg    %r0,%r13,0(%r14)                # save guest gprs 0-13
-       lmg     %r6,%r14,__SF_GPRS(%r15)        # restore kernel registers
-       lg      %r2,__SF_EMPTY+24(%r15)         # return exit reason code
-       br      %r14
-.Lsie_fault:
-       lghi    %r14,-EFAULT
-       stg     %r14,__SF_EMPTY+24(%r15)        # set exit reason code
-       j       sie_exit
-
-       .align  8
-.Lsie_critical:
-       .quad   .Lsie_gmap
-.Lsie_critical_length:
-       .quad   .Lsie_done - .Lsie_gmap
-
-       EX_TABLE(.Lrewind_pad,.Lsie_fault)
-       EX_TABLE(sie_exit,.Lsie_fault)
-#endif
-
-               .section .rodata, "a"
-#define SYSCALL(esa,esame,emu) .long esame
-       .globl  sys_call_table
-sys_call_table:
-#include "syscalls.S"
-#undef SYSCALL
-
-#ifdef CONFIG_COMPAT
-
-#define SYSCALL(esa,esame,emu) .long emu
-       .globl  sys_call_table_emu
-sys_call_table_emu:
-#include "syscalls.S"
-#undef SYSCALL
-#endif
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
new file mode 100644 (file)
index 0000000..52aab0b
--- /dev/null
@@ -0,0 +1,155 @@
+/*
+ *    Copyright IBM Corp 2000, 2011
+ *    Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>,
+ *              Denis Joseph Barrow,
+ */
+
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/sigp.h>
+
+#
+# store_status
+#
+# Prerequisites to run this function:
+# - Prefix register is set to zero
+# - Original prefix register is stored in "dump_prefix_page"
+# - Lowcore protection is off
+#
+ENTRY(store_status)
+       /* Save register one and load save area base */
+       stg     %r1,__LC_SAVE_AREA_RESTART
+       lghi    %r1,SAVE_AREA_BASE
+       /* General purpose registers */
+       stmg    %r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+       lg      %r2,__LC_SAVE_AREA_RESTART
+       stg     %r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1)
+       /* Control registers */
+       stctg   %c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+       /* Access registers */
+       stam    %a0,%a15,__LC_AREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+       /* Floating point registers */
+       std     %f0, 0x00 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+       std     %f1, 0x08 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+       std     %f2, 0x10 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+       std     %f3, 0x18 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+       std     %f4, 0x20 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+       std     %f5, 0x28 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+       std     %f6, 0x30 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+       std     %f7, 0x38 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+       std     %f8, 0x40 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+       std     %f9, 0x48 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+       std     %f10,0x50 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+       std     %f11,0x58 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+       std     %f12,0x60 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+       std     %f13,0x68 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+       std     %f14,0x70 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+       std     %f15,0x78 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+       /* Floating point control register */
+       stfpc   __LC_FP_CREG_SAVE_AREA-SAVE_AREA_BASE(%r1)
+       /* CPU timer */
+       stpt    __LC_CPU_TIMER_SAVE_AREA-SAVE_AREA_BASE(%r1)
+       /* Saved prefix register */
+       larl    %r2,dump_prefix_page
+       mvc     __LC_PREFIX_SAVE_AREA-SAVE_AREA_BASE(4,%r1),0(%r2)
+       /* Clock comparator - seven bytes */
+       larl    %r2,.Lclkcmp
+       stckc   0(%r2)
+       mvc     __LC_CLOCK_COMP_SAVE_AREA-SAVE_AREA_BASE + 1(7,%r1),1(%r2)
+       /* Program status word */
+       epsw    %r2,%r3
+       st      %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 0(%r1)
+       st      %r3,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 4(%r1)
+       larl    %r2,store_status
+       stg     %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1)
+       br      %r14
+
+       .section .bss
+       .align  8
+.Lclkcmp:      .quad   0x0000000000000000
+       .previous
+
+#
+# do_reipl_asm
+# Parameter: r2 = schid of reipl device
+#
+
+ENTRY(do_reipl_asm)
+               basr    %r13,0
+.Lpg0:         lpswe   .Lnewpsw-.Lpg0(%r13)
+.Lpg1:         brasl   %r14,store_status
+
+               lctlg   %c6,%c6,.Lall-.Lpg0(%r13)
+               lgr     %r1,%r2
+               mvc     __LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13)
+               stsch   .Lschib-.Lpg0(%r13)
+               oi      .Lschib+5-.Lpg0(%r13),0x84
+.Lecs:         xi      .Lschib+27-.Lpg0(%r13),0x01
+               msch    .Lschib-.Lpg0(%r13)
+               lghi    %r0,5
+.Lssch:                ssch    .Liplorb-.Lpg0(%r13)
+               jz      .L001
+               brct    %r0,.Lssch
+               bas     %r14,.Ldisab-.Lpg0(%r13)
+.L001:         mvc     __LC_IO_NEW_PSW(16),.Lionew-.Lpg0(%r13)
+.Ltpi:         lpswe   .Lwaitpsw-.Lpg0(%r13)
+.Lcont:                c       %r1,__LC_SUBCHANNEL_ID
+               jnz     .Ltpi
+               clc     __LC_IO_INT_PARM(4),.Liplorb-.Lpg0(%r13)
+               jnz     .Ltpi
+               tsch    .Liplirb-.Lpg0(%r13)
+               tm      .Liplirb+9-.Lpg0(%r13),0xbf
+               jz      .L002
+               bas     %r14,.Ldisab-.Lpg0(%r13)
+.L002:         tm      .Liplirb+8-.Lpg0(%r13),0xf3
+               jz      .L003
+               bas     %r14,.Ldisab-.Lpg0(%r13)
+.L003:         st      %r1,__LC_SUBCHANNEL_ID
+               lhi     %r1,0            # mode 0 = esa
+               slr     %r0,%r0          # set cpuid to zero
+               sigp    %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esa mode
+               lpsw    0
+.Ldisab:       sll     %r14,1
+               srl     %r14,1           # need to kill hi bit to avoid specification exceptions.
+               st      %r14,.Ldispsw+12-.Lpg0(%r13)
+               lpswe   .Ldispsw-.Lpg0(%r13)
+               .align  8
+.Lall:         .quad   0x00000000ff000000
+               .align  16
+/*
+ * These addresses have to be 31 bit otherwise
+ * the sigp will throw a specifcation exception
+ * when switching to ESA mode as bit 31 be set
+ * in the ESA psw.
+ * Bit 31 of the addresses has to be 0 for the
+ * 31bit lpswe instruction a fact they appear to have
+ * omitted from the pop.
+ */
+.Lnewpsw:      .quad   0x0000000080000000
+               .quad   .Lpg1
+.Lpcnew:       .quad   0x0000000080000000
+               .quad   .Lecs
+.Lionew:       .quad   0x0000000080000000
+               .quad   .Lcont
+.Lwaitpsw:     .quad   0x0202000080000000
+               .quad   .Ltpi
+.Ldispsw:      .quad   0x0002000080000000
+               .quad   0x0000000000000000
+.Liplccws:     .long   0x02000000,0x60000018
+               .long   0x08000008,0x20000001
+.Liplorb:      .long   0x0049504c,0x0040ff80
+               .long   0x00000000+.Liplccws
+.Lschib:       .long   0x00000000,0x00000000
+               .long   0x00000000,0x00000000
+               .long   0x00000000,0x00000000
+               .long   0x00000000,0x00000000
+               .long   0x00000000,0x00000000
+               .long   0x00000000,0x00000000
+.Liplirb:      .long   0x00000000,0x00000000
+               .long   0x00000000,0x00000000
+               .long   0x00000000,0x00000000
+               .long   0x00000000,0x00000000
+               .long   0x00000000,0x00000000
+               .long   0x00000000,0x00000000
+               .long   0x00000000,0x00000000
+               .long   0x00000000,0x00000000
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S
deleted file mode 100644 (file)
index dc3b127..0000000
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- *    Copyright IBM Corp 2000, 2011
- *    Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>,
- *              Denis Joseph Barrow,
- */
-
-#include <linux/linkage.h>
-#include <asm/asm-offsets.h>
-#include <asm/sigp.h>
-
-#
-# store_status
-#
-# Prerequisites to run this function:
-# - Prefix register is set to zero
-# - Original prefix register is stored in "dump_prefix_page"
-# - Lowcore protection is off
-#
-ENTRY(store_status)
-       /* Save register one and load save area base */
-       stg     %r1,__LC_SAVE_AREA_RESTART
-       lghi    %r1,SAVE_AREA_BASE
-       /* General purpose registers */
-       stmg    %r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
-       lg      %r2,__LC_SAVE_AREA_RESTART
-       stg     %r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1)
-       /* Control registers */
-       stctg   %c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
-       /* Access registers */
-       stam    %a0,%a15,__LC_AREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
-       /* Floating point registers */
-       std     %f0, 0x00 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
-       std     %f1, 0x08 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
-       std     %f2, 0x10 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
-       std     %f3, 0x18 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
-       std     %f4, 0x20 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
-       std     %f5, 0x28 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
-       std     %f6, 0x30 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
-       std     %f7, 0x38 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
-       std     %f8, 0x40 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
-       std     %f9, 0x48 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
-       std     %f10,0x50 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
-       std     %f11,0x58 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
-       std     %f12,0x60 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
-       std     %f13,0x68 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
-       std     %f14,0x70 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
-       std     %f15,0x78 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
-       /* Floating point control register */
-       stfpc   __LC_FP_CREG_SAVE_AREA-SAVE_AREA_BASE(%r1)
-       /* CPU timer */
-       stpt    __LC_CPU_TIMER_SAVE_AREA-SAVE_AREA_BASE(%r1)
-       /* Saved prefix register */
-       larl    %r2,dump_prefix_page
-       mvc     __LC_PREFIX_SAVE_AREA-SAVE_AREA_BASE(4,%r1),0(%r2)
-       /* Clock comparator - seven bytes */
-       larl    %r2,.Lclkcmp
-       stckc   0(%r2)
-       mvc     __LC_CLOCK_COMP_SAVE_AREA-SAVE_AREA_BASE + 1(7,%r1),1(%r2)
-       /* Program status word */
-       epsw    %r2,%r3
-       st      %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 0(%r1)
-       st      %r3,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 4(%r1)
-       larl    %r2,store_status
-       stg     %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1)
-       br      %r14
-
-       .section .bss
-       .align  8
-.Lclkcmp:      .quad   0x0000000000000000
-       .previous
-
-#
-# do_reipl_asm
-# Parameter: r2 = schid of reipl device
-#
-
-ENTRY(do_reipl_asm)
-               basr    %r13,0
-.Lpg0:         lpswe   .Lnewpsw-.Lpg0(%r13)
-.Lpg1:         brasl   %r14,store_status
-
-               lctlg   %c6,%c6,.Lall-.Lpg0(%r13)
-               lgr     %r1,%r2
-               mvc     __LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13)
-               stsch   .Lschib-.Lpg0(%r13)
-               oi      .Lschib+5-.Lpg0(%r13),0x84
-.Lecs:         xi      .Lschib+27-.Lpg0(%r13),0x01
-               msch    .Lschib-.Lpg0(%r13)
-               lghi    %r0,5
-.Lssch:                ssch    .Liplorb-.Lpg0(%r13)
-               jz      .L001
-               brct    %r0,.Lssch
-               bas     %r14,.Ldisab-.Lpg0(%r13)
-.L001:         mvc     __LC_IO_NEW_PSW(16),.Lionew-.Lpg0(%r13)
-.Ltpi:         lpswe   .Lwaitpsw-.Lpg0(%r13)
-.Lcont:                c       %r1,__LC_SUBCHANNEL_ID
-               jnz     .Ltpi
-               clc     __LC_IO_INT_PARM(4),.Liplorb-.Lpg0(%r13)
-               jnz     .Ltpi
-               tsch    .Liplirb-.Lpg0(%r13)
-               tm      .Liplirb+9-.Lpg0(%r13),0xbf
-               jz      .L002
-               bas     %r14,.Ldisab-.Lpg0(%r13)
-.L002:         tm      .Liplirb+8-.Lpg0(%r13),0xf3
-               jz      .L003
-               bas     %r14,.Ldisab-.Lpg0(%r13)
-.L003:         st      %r1,__LC_SUBCHANNEL_ID
-               lhi     %r1,0            # mode 0 = esa
-               slr     %r0,%r0          # set cpuid to zero
-               sigp    %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esa mode
-               lpsw    0
-.Ldisab:       sll     %r14,1
-               srl     %r14,1           # need to kill hi bit to avoid specification exceptions.
-               st      %r14,.Ldispsw+12-.Lpg0(%r13)
-               lpswe   .Ldispsw-.Lpg0(%r13)
-               .align  8
-.Lall:         .quad   0x00000000ff000000
-               .align  16
-/*
- * These addresses have to be 31 bit otherwise
- * the sigp will throw a specifcation exception
- * when switching to ESA mode as bit 31 be set
- * in the ESA psw.
- * Bit 31 of the addresses has to be 0 for the
- * 31bit lpswe instruction a fact they appear to have
- * omitted from the pop.
- */
-.Lnewpsw:      .quad   0x0000000080000000
-               .quad   .Lpg1
-.Lpcnew:       .quad   0x0000000080000000
-               .quad   .Lecs
-.Lionew:       .quad   0x0000000080000000
-               .quad   .Lcont
-.Lwaitpsw:     .quad   0x0202000080000000
-               .quad   .Ltpi
-.Ldispsw:      .quad   0x0002000080000000
-               .quad   0x0000000000000000
-.Liplccws:     .long   0x02000000,0x60000018
-               .long   0x08000008,0x20000001
-.Liplorb:      .long   0x0049504c,0x0040ff80
-               .long   0x00000000+.Liplccws
-.Lschib:       .long   0x00000000,0x00000000
-               .long   0x00000000,0x00000000
-               .long   0x00000000,0x00000000
-               .long   0x00000000,0x00000000
-               .long   0x00000000,0x00000000
-               .long   0x00000000,0x00000000
-.Liplirb:      .long   0x00000000,0x00000000
-               .long   0x00000000,0x00000000
-               .long   0x00000000,0x00000000
-               .long   0x00000000,0x00000000
-               .long   0x00000000,0x00000000
-               .long   0x00000000,0x00000000
-               .long   0x00000000,0x00000000
-               .long   0x00000000,0x00000000
diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S
new file mode 100644 (file)
index 0000000..cfac283
--- /dev/null
@@ -0,0 +1,121 @@
+/*
+ * Copyright IBM Corp. 2005
+ *
+ * Author(s): Rolf Adelsberger,
+ *           Heiko Carstens <heiko.carstens@de.ibm.com>
+ *
+ */
+
+#include <linux/linkage.h>
+#include <asm/sigp.h>
+
+/*
+ * moves the new kernel to its destination...
+ * %r2 = pointer to first kimage_entry_t
+ * %r3 = start address - where to jump to after the job is done...
+ *
+ * %r5 will be used as temp. storage
+ * %r6 holds the destination address
+ * %r7 = PAGE_SIZE
+ * %r8 holds the source address
+ * %r9 = PAGE_SIZE
+ *
+ * 0xf000 is a page_mask
+ */
+
+       .text
+ENTRY(relocate_kernel)
+               basr    %r13,0          # base address
+       .base:
+               stnsm   sys_msk-.base(%r13),0xfb        # disable DAT
+               stctg   %c0,%c15,ctlregs-.base(%r13)
+               stmg    %r0,%r15,gprregs-.base(%r13)
+               lghi    %r0,3
+               sllg    %r0,%r0,31
+               stg     %r0,0x1d0(%r0)
+               la      %r0,.back_pgm-.base(%r13)
+               stg     %r0,0x1d8(%r0)
+               la      %r1,load_psw-.base(%r13)
+               mvc     0(8,%r0),0(%r1)
+               la      %r0,.back-.base(%r13)
+               st      %r0,4(%r0)
+               oi      4(%r0),0x80
+               lghi    %r0,0
+               diag    %r0,%r0,0x308
+       .back:
+               lhi     %r1,1           # mode 1 = esame
+               sigp    %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esame mode
+               sam64                   # switch to 64 bit addressing mode
+               basr    %r13,0
+       .back_base:
+               oi      have_diag308-.back_base(%r13),0x01
+               lctlg   %c0,%c15,ctlregs-.back_base(%r13)
+               lmg     %r0,%r15,gprregs-.back_base(%r13)
+               j       .top
+       .back_pgm:
+               lmg     %r0,%r15,gprregs-.base(%r13)
+       .top:
+               lghi    %r7,4096        # load PAGE_SIZE in r7
+               lghi    %r9,4096        # load PAGE_SIZE in r9
+               lg      %r5,0(%r2)      # read another word for indirection page
+               aghi    %r2,8           # increment pointer
+               tml     %r5,0x1         # is it a destination page?
+               je      .indir_check    # NO, goto "indir_check"
+               lgr     %r6,%r5         # r6 = r5
+               nill    %r6,0xf000      # mask it out and...
+               j       .top            # ...next iteration
+       .indir_check:
+               tml     %r5,0x2         # is it a indirection page?
+               je      .done_test      # NO, goto "done_test"
+               nill    %r5,0xf000      # YES, mask out,
+               lgr     %r2,%r5         # move it into the right register,
+               j       .top            # and read next...
+       .done_test:
+               tml     %r5,0x4         # is it the done indicator?
+               je      .source_test    # NO! Well, then it should be the source indicator...
+               j       .done           # ok, lets finish it here...
+       .source_test:
+               tml     %r5,0x8         # it should be a source indicator...
+               je      .top            # NO, ignore it...
+               lgr     %r8,%r5         # r8 = r5
+               nill    %r8,0xf000      # masking
+       0:      mvcle   %r6,%r8,0x0     # copy PAGE_SIZE bytes from r8 to r6 - pad with 0
+               jo      0b
+               j       .top
+       .done:
+               sgr     %r0,%r0         # clear register r0
+               la      %r4,load_psw-.base(%r13)        # load psw-address into the register
+               o       %r3,4(%r4)      # or load address into psw
+               st      %r3,4(%r4)
+               mvc     0(8,%r0),0(%r4) # copy psw to absolute address 0
+               tm      have_diag308-.base(%r13),0x01
+               jno     .no_diag308
+               diag    %r0,%r0,0x308
+       .no_diag308:
+               sam31                   # 31 bit mode
+               sr      %r1,%r1         # erase register r1
+               sr      %r2,%r2         # erase register r2
+               sigp    %r1,%r2,SIGP_SET_ARCHITECTURE # set cpuid to zero
+               lpsw    0               # hopefully start new kernel...
+
+               .align  8
+       load_psw:
+               .long   0x00080000,0x80000000
+       sys_msk:
+               .quad   0
+       ctlregs:
+               .rept   16
+               .quad   0
+               .endr
+       gprregs:
+               .rept   16
+               .quad   0
+               .endr
+       have_diag308:
+               .byte   0
+               .align  8
+       relocate_kernel_end:
+       .align 8
+       .globl  relocate_kernel_len
+       relocate_kernel_len:
+               .quad   relocate_kernel_end - relocate_kernel
diff --git a/arch/s390/kernel/relocate_kernel64.S b/arch/s390/kernel/relocate_kernel64.S
deleted file mode 100644 (file)
index cfac283..0000000
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright IBM Corp. 2005
- *
- * Author(s): Rolf Adelsberger,
- *           Heiko Carstens <heiko.carstens@de.ibm.com>
- *
- */
-
-#include <linux/linkage.h>
-#include <asm/sigp.h>
-
-/*
- * moves the new kernel to its destination...
- * %r2 = pointer to first kimage_entry_t
- * %r3 = start address - where to jump to after the job is done...
- *
- * %r5 will be used as temp. storage
- * %r6 holds the destination address
- * %r7 = PAGE_SIZE
- * %r8 holds the source address
- * %r9 = PAGE_SIZE
- *
- * 0xf000 is a page_mask
- */
-
-       .text
-ENTRY(relocate_kernel)
-               basr    %r13,0          # base address
-       .base:
-               stnsm   sys_msk-.base(%r13),0xfb        # disable DAT
-               stctg   %c0,%c15,ctlregs-.base(%r13)
-               stmg    %r0,%r15,gprregs-.base(%r13)
-               lghi    %r0,3
-               sllg    %r0,%r0,31
-               stg     %r0,0x1d0(%r0)
-               la      %r0,.back_pgm-.base(%r13)
-               stg     %r0,0x1d8(%r0)
-               la      %r1,load_psw-.base(%r13)
-               mvc     0(8,%r0),0(%r1)
-               la      %r0,.back-.base(%r13)
-               st      %r0,4(%r0)
-               oi      4(%r0),0x80
-               lghi    %r0,0
-               diag    %r0,%r0,0x308
-       .back:
-               lhi     %r1,1           # mode 1 = esame
-               sigp    %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esame mode
-               sam64                   # switch to 64 bit addressing mode
-               basr    %r13,0
-       .back_base:
-               oi      have_diag308-.back_base(%r13),0x01
-               lctlg   %c0,%c15,ctlregs-.back_base(%r13)
-               lmg     %r0,%r15,gprregs-.back_base(%r13)
-               j       .top
-       .back_pgm:
-               lmg     %r0,%r15,gprregs-.base(%r13)
-       .top:
-               lghi    %r7,4096        # load PAGE_SIZE in r7
-               lghi    %r9,4096        # load PAGE_SIZE in r9
-               lg      %r5,0(%r2)      # read another word for indirection page
-               aghi    %r2,8           # increment pointer
-               tml     %r5,0x1         # is it a destination page?
-               je      .indir_check    # NO, goto "indir_check"
-               lgr     %r6,%r5         # r6 = r5
-               nill    %r6,0xf000      # mask it out and...
-               j       .top            # ...next iteration
-       .indir_check:
-               tml     %r5,0x2         # is it a indirection page?
-               je      .done_test      # NO, goto "done_test"
-               nill    %r5,0xf000      # YES, mask out,
-               lgr     %r2,%r5         # move it into the right register,
-               j       .top            # and read next...
-       .done_test:
-               tml     %r5,0x4         # is it the done indicator?
-               je      .source_test    # NO! Well, then it should be the source indicator...
-               j       .done           # ok, lets finish it here...
-       .source_test:
-               tml     %r5,0x8         # it should be a source indicator...
-               je      .top            # NO, ignore it...
-               lgr     %r8,%r5         # r8 = r5
-               nill    %r8,0xf000      # masking
-       0:      mvcle   %r6,%r8,0x0     # copy PAGE_SIZE bytes from r8 to r6 - pad with 0
-               jo      0b
-               j       .top
-       .done:
-               sgr     %r0,%r0         # clear register r0
-               la      %r4,load_psw-.base(%r13)        # load psw-address into the register
-               o       %r3,4(%r4)      # or load address into psw
-               st      %r3,4(%r4)
-               mvc     0(8,%r0),0(%r4) # copy psw to absolute address 0
-               tm      have_diag308-.base(%r13),0x01
-               jno     .no_diag308
-               diag    %r0,%r0,0x308
-       .no_diag308:
-               sam31                   # 31 bit mode
-               sr      %r1,%r1         # erase register r1
-               sr      %r2,%r2         # erase register r2
-               sigp    %r1,%r2,SIGP_SET_ARCHITECTURE # set cpuid to zero
-               lpsw    0               # hopefully start new kernel...
-
-               .align  8
-       load_psw:
-               .long   0x00080000,0x80000000
-       sys_msk:
-               .quad   0
-       ctlregs:
-               .rept   16
-               .quad   0
-               .endr
-       gprregs:
-               .rept   16
-               .quad   0
-               .endr
-       have_diag308:
-               .byte   0
-               .align  8
-       relocate_kernel_end:
-       .align 8
-       .globl  relocate_kernel_len
-       relocate_kernel_len:
-               .quad   relocate_kernel_end - relocate_kernel