All bad stuff from SSE version is implicitely inherited :)
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44794
91177308-0d34-0410-b5e6-
96231b3b80d8
".align 8\n"
".globl " ASMPREFIX "X86CompilationCallback\n"
ASMPREFIX "X86CompilationCallback:\n"
".align 8\n"
".globl " ASMPREFIX "X86CompilationCallback\n"
ASMPREFIX "X86CompilationCallback:\n"
// Save RBP
"pushq %rbp\n"
// Save RBP
"pushq %rbp\n"
+ ".cfi_def_cfa_offset 16\n"
+ ".cfi_offset %rbp, -16\n"
// Save RSP
"movq %rsp, %rbp\n"
// Save RSP
"movq %rsp, %rbp\n"
+ ".cfi_def_cfa_register %rbp\n"
// Save all int arg registers
"pushq %rdi\n"
// Save all int arg registers
"pushq %rdi\n"
+ ".cfi_rel_offset %rdi, 0\n"
+ ".cfi_rel_offset %rsi, 8\n"
+ ".cfi_rel_offset %rdx, 16\n"
+ ".cfi_rel_offset %rcx, 24\n"
+ ".cfi_rel_offset %r8, 32\n"
+ ".cfi_rel_offset %r9, 40\n"
// Align stack on 16-byte boundary. ESP might not be properly aligned
// (8 byte) if this is called from an indirect stub.
"andq $-16, %rsp\n"
// Align stack on 16-byte boundary. ESP might not be properly aligned
// (8 byte) if this is called from an indirect stub.
"andq $-16, %rsp\n"
"movaps (%rsp), %xmm0\n"
// Restore RSP
"movq %rbp, %rsp\n"
"movaps (%rsp), %xmm0\n"
// Restore RSP
"movq %rbp, %rsp\n"
+ ".cfi_def_cfa_register esp\n"
// Restore all int arg registers
"subq $48, %rsp\n"
// Restore all int arg registers
"subq $48, %rsp\n"
+ ".cfi_adjust_cfa_offset 48\n"
+ ".cfi_adjust_cfa_offset -8\n"
+ ".cfi_restore %r9\n"
+ ".cfi_adjust_cfa_offset -8\n"
+ ".cfi_restore %r8\n"
+ ".cfi_adjust_cfa_offset -8\n"
+ ".cfi_restore %rcx\n"
+ ".cfi_adjust_cfa_offset -8\n"
+ ".cfi_restore %rdx\n"
+ ".cfi_adjust_cfa_offset -8\n"
+ ".cfi_restore %rsi\n"
+ ".cfi_adjust_cfa_offset -8\n"
+ ".cfi_restore %rdi\n"
// Restore RBP
"popq %rbp\n"
// Restore RBP
"popq %rbp\n"
+ ".cfi_adjust_cfa_offset -8\n"
+ ".cfi_restore %rbp\n"
+ "ret\n"
+ ".cfi_endproc\n");
#elif defined(__i386__) || defined(i386) || defined(_M_IX86)
#ifndef _MSC_VER
void X86CompilationCallback(void);
#elif defined(__i386__) || defined(i386) || defined(_M_IX86)
#ifndef _MSC_VER
void X86CompilationCallback(void);
".cfi_startproc\n"
"pushl %ebp\n"
".cfi_def_cfa_offset 8\n"
".cfi_startproc\n"
"pushl %ebp\n"
".cfi_def_cfa_offset 8\n"
- ".cfi_offset ebp, -8\n"
+ ".cfi_offset %ebp, -8\n"
"movl %esp, %ebp\n" // Standard prologue
"movl %esp, %ebp\n" // Standard prologue
- ".cfi_def_cfa_register ebp\n"
+ ".cfi_def_cfa_register %ebp\n"
- ".cfi_rel_offset eax, 0\n"
+ ".cfi_rel_offset %eax, 0\n"
"pushl %edx\n" // Save EAX/EDX/ECX
"pushl %edx\n" // Save EAX/EDX/ECX
- ".cfi_rel_offset edx, 4\n"
+ ".cfi_rel_offset %edx, 4\n"
- ".cfi_rel_offset ecx, 8\n"
+ ".cfi_rel_offset %ecx, 8\n"
#if defined(__APPLE__)
"andl $-16, %esp\n" // Align ESP on 16-byte boundary
#endif
#if defined(__APPLE__)
"andl $-16, %esp\n" // Align ESP on 16-byte boundary
#endif
"movl %ebp, (%esp)\n"
"call " ASMPREFIX "X86CompilationCallback2\n"
"movl %ebp, %esp\n" // Restore ESP
"movl %ebp, (%esp)\n"
"call " ASMPREFIX "X86CompilationCallback2\n"
"movl %ebp, %esp\n" // Restore ESP
- ".cfi_def_cfa_register esp\n"
+ ".cfi_def_cfa_register %esp\n"
"subl $12, %esp\n"
".cfi_adjust_cfa_offset 12\n"
"popl %ecx\n"
".cfi_adjust_cfa_offset -4\n"
"subl $12, %esp\n"
".cfi_adjust_cfa_offset 12\n"
"popl %ecx\n"
".cfi_adjust_cfa_offset -4\n"
"popl %edx\n"
".cfi_adjust_cfa_offset -4\n"
"popl %edx\n"
".cfi_adjust_cfa_offset -4\n"
"popl %eax\n"
".cfi_adjust_cfa_offset -4\n"
"popl %eax\n"
".cfi_adjust_cfa_offset -4\n"
"popl %ebp\n"
".cfi_adjust_cfa_offset -4\n"
"popl %ebp\n"
".cfi_adjust_cfa_offset -4\n"
"ret\n"
".cfi_endproc\n");
"ret\n"
".cfi_endproc\n");
".cfi_startproc\n"
"pushl %ebp\n"
".cfi_def_cfa_offset 8\n"
".cfi_startproc\n"
"pushl %ebp\n"
".cfi_def_cfa_offset 8\n"
- ".cfi_offset ebp, -8\n"
+ ".cfi_offset %ebp, -8\n"
"movl %esp, %ebp\n" // Standard prologue
"movl %esp, %ebp\n" // Standard prologue
- ".cfi_def_cfa_register ebp\n"
+ ".cfi_def_cfa_register %ebp\n"
- ".cfi_rel_offset eax, 0\n"
+ ".cfi_rel_offset %eax, 0\n"
"pushl %edx\n" // Save EAX/EDX/ECX
"pushl %edx\n" // Save EAX/EDX/ECX
- ".cfi_rel_offset edx, 4\n"
+ ".cfi_rel_offset %edx, 4\n"
- ".cfi_rel_offset ecx, 8\n"
+ ".cfi_rel_offset %ecx, 8\n"
"andl $-16, %esp\n" // Align ESP on 16-byte boundary
// Save all XMM arg registers
"subl $64, %esp\n"
"andl $-16, %esp\n" // Align ESP on 16-byte boundary
// Save all XMM arg registers
"subl $64, %esp\n"
"call " ASMPREFIX "X86CompilationCallback2\n"
"addl $16, %esp\n"
"movaps 48(%esp), %xmm3\n"
"call " ASMPREFIX "X86CompilationCallback2\n"
"addl $16, %esp\n"
"movaps 48(%esp), %xmm3\n"
"movaps 32(%esp), %xmm2\n"
"movaps 32(%esp), %xmm2\n"
"movaps 16(%esp), %xmm1\n"
"movaps 16(%esp), %xmm1\n"
"movl %ebp, %esp\n" // Restore ESP
".cfi_def_cfa_register esp\n"
"subl $12, %esp\n"
".cfi_adjust_cfa_offset 12\n"
"popl %ecx\n"
".cfi_adjust_cfa_offset -4\n"
"movl %ebp, %esp\n" // Restore ESP
".cfi_def_cfa_register esp\n"
"subl $12, %esp\n"
".cfi_adjust_cfa_offset 12\n"
"popl %ecx\n"
".cfi_adjust_cfa_offset -4\n"
"popl %edx\n"
".cfi_adjust_cfa_offset -4\n"
"popl %edx\n"
".cfi_adjust_cfa_offset -4\n"
"popl %eax\n"
".cfi_adjust_cfa_offset -4\n"
"popl %eax\n"
".cfi_adjust_cfa_offset -4\n"
"popl %ebp\n"
".cfi_adjust_cfa_offset -4\n"
"popl %ebp\n"
".cfi_adjust_cfa_offset -4\n"
"ret\n"
".cfi_endproc\n");
#else
"ret\n"
".cfi_endproc\n");
#else