1 ; RUN: llc < %s -enable-tail-merge=0 -mtriple=x86_64-linux | FileCheck %s --check-prefix=LINUX
2 ; RUN: llc < %s -enable-tail-merge=0 -mtriple=x86_64-windows | FileCheck %s --check-prefix=WINDOWS
4 ; Test that we actually spill and reload all arguments in the variadic argument
5 ; pack. Doing a normal call will clobber all argument registers, and we will
6 ; spill around it. A simple adjustment should not require any XMM spills.
8 declare void @llvm.va_start(i8*) nounwind
10 declare void(i8*, ...)* @get_f(i8* %this)
12 define void @f_thunk(i8* %this, ...) {
13 ; Use va_start so that we exercise the combination.
14 %ap = alloca [4 x i8*], align 16
15 %ap_i8 = bitcast [4 x i8*]* %ap to i8*
16 call void @llvm.va_start(i8* %ap_i8)
18 %fptr = call void(i8*, ...)*(i8*)* @get_f(i8* %this)
19 musttail call void (i8*, ...)* %fptr(i8* %this, ...)
23 ; Save and restore 6 GPRs, 8 XMMs, and AL around the call.
25 ; LINUX-LABEL: f_thunk:
26 ; LINUX-DAG: movq %rdi, {{.*}}
27 ; LINUX-DAG: movq %rsi, {{.*}}
28 ; LINUX-DAG: movq %rdx, {{.*}}
29 ; LINUX-DAG: movq %rcx, {{.*}}
30 ; LINUX-DAG: movq %r8, {{.*}}
31 ; LINUX-DAG: movq %r9, {{.*}}
32 ; LINUX-DAG: movb %al, {{.*}}
33 ; LINUX-DAG: movaps %xmm0, {{[0-9]*}}(%rsp)
34 ; LINUX-DAG: movaps %xmm1, {{[0-9]*}}(%rsp)
35 ; LINUX-DAG: movaps %xmm2, {{[0-9]*}}(%rsp)
36 ; LINUX-DAG: movaps %xmm3, {{[0-9]*}}(%rsp)
37 ; LINUX-DAG: movaps %xmm4, {{[0-9]*}}(%rsp)
38 ; LINUX-DAG: movaps %xmm5, {{[0-9]*}}(%rsp)
39 ; LINUX-DAG: movaps %xmm6, {{[0-9]*}}(%rsp)
40 ; LINUX-DAG: movaps %xmm7, {{[0-9]*}}(%rsp)
42 ; LINUX-DAG: movaps {{[0-9]*}}(%rsp), %xmm0
43 ; LINUX-DAG: movaps {{[0-9]*}}(%rsp), %xmm1
44 ; LINUX-DAG: movaps {{[0-9]*}}(%rsp), %xmm2
45 ; LINUX-DAG: movaps {{[0-9]*}}(%rsp), %xmm3
46 ; LINUX-DAG: movaps {{[0-9]*}}(%rsp), %xmm4
47 ; LINUX-DAG: movaps {{[0-9]*}}(%rsp), %xmm5
48 ; LINUX-DAG: movaps {{[0-9]*}}(%rsp), %xmm6
49 ; LINUX-DAG: movaps {{[0-9]*}}(%rsp), %xmm7
50 ; LINUX-DAG: movq {{.*}}, %rdi
51 ; LINUX-DAG: movq {{.*}}, %rsi
52 ; LINUX-DAG: movq {{.*}}, %rdx
53 ; LINUX-DAG: movq {{.*}}, %rcx
54 ; LINUX-DAG: movq {{.*}}, %r8
55 ; LINUX-DAG: movq {{.*}}, %r9
56 ; LINUX-DAG: movb {{.*}}, %al
57 ; LINUX: jmpq *{{.*}} # TAILCALL
59 ; WINDOWS-LABEL: f_thunk:
60 ; WINDOWS-NOT: mov{{.}}ps
61 ; WINDOWS-DAG: movq %rdx, {{.*}}
62 ; WINDOWS-DAG: movq %rcx, {{.*}}
63 ; WINDOWS-DAG: movq %r8, {{.*}}
64 ; WINDOWS-DAG: movq %r9, {{.*}}
65 ; WINDOWS-NOT: mov{{.}}ps
66 ; WINDOWS: callq get_f
67 ; WINDOWS-NOT: mov{{.}}ps
68 ; WINDOWS-DAG: movq {{.*}}, %rdx
69 ; WINDOWS-DAG: movq {{.*}}, %rcx
70 ; WINDOWS-DAG: movq {{.*}}, %r8
71 ; WINDOWS-DAG: movq {{.*}}, %r9
72 ; WINDOWS-NOT: mov{{.}}ps
73 ; WINDOWS: jmpq *{{.*}} # TAILCALL
75 ; This thunk shouldn't require any spills and reloads, assuming the register
76 ; allocator knows what it's doing.
78 define void @g_thunk(i8* %fptr_i8, ...) {
79 %fptr = bitcast i8* %fptr_i8 to void (i8*, ...)*
80 musttail call void (i8*, ...)* %fptr(i8* %fptr_i8, ...)
84 ; LINUX-LABEL: g_thunk:
86 ; LINUX: jmpq *%rdi # TAILCALL
88 ; WINDOWS-LABEL: g_thunk:
90 ; WINDOWS: jmpq *%rcx # TAILCALL
92 ; Do a simple multi-exit multi-bb test.
94 %struct.Foo = type { i1, i8*, i8* }
96 @g = external global i32
98 define void @h_thunk(%struct.Foo* %this, ...) {
99 %cond_p = getelementptr %struct.Foo* %this, i32 0, i32 0
100 %cond = load i1* %cond_p
101 br i1 %cond, label %then, label %else
104 %a_p = getelementptr %struct.Foo* %this, i32 0, i32 1
105 %a_i8 = load i8** %a_p
106 %a = bitcast i8* %a_i8 to void (%struct.Foo*, ...)*
107 musttail call void (%struct.Foo*, ...)* %a(%struct.Foo* %this, ...)
111 %b_p = getelementptr %struct.Foo* %this, i32 0, i32 2
112 %b_i8 = load i8** %b_p
113 %b = bitcast i8* %b_i8 to void (%struct.Foo*, ...)*
114 store i32 42, i32* @g
115 musttail call void (%struct.Foo*, ...)* %b(%struct.Foo* %this, ...)
119 ; LINUX-LABEL: h_thunk:
121 ; LINUX: jmpq *{{.*}} # TAILCALL
122 ; LINUX: jmpq *{{.*}} # TAILCALL
123 ; WINDOWS-LABEL: h_thunk:
125 ; WINDOWS: jmpq *{{.*}} # TAILCALL
126 ; WINDOWS: jmpq *{{.*}} # TAILCALL