From 944061c4e152e9f66ffaaca5905253ba8012a4fa Mon Sep 17 00:00:00 2001 From: Nico Rieck Date: Mon, 29 Jul 2013 13:07:06 +0000 Subject: [PATCH] Proper va_arg/va_copy lowering on win64 Win64 uses CharPtrBuiltinVaList instead of X86_64ABIBuiltinVaList like other 64-bit targets. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@187355 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 4 +- test/CodeGen/X86/win64_vararg.ll | 60 ++++++++++++++++++++++++++++++ 2 files changed, 63 insertions(+), 1 deletion(-) diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index ad2d30891ed..04287c3e175 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -582,10 +582,12 @@ void X86TargetLowering::resetOperationActions() { // VASTART needs to be custom lowered to use the VarArgsFrameIndex setOperationAction(ISD::VASTART , MVT::Other, Custom); setOperationAction(ISD::VAEND , MVT::Other, Expand); - if (Subtarget->is64Bit()) { + if (Subtarget->is64Bit() && !Subtarget->isTargetWin64()) { + // TargetInfo::X86_64ABIBuiltinVaList setOperationAction(ISD::VAARG , MVT::Other, Custom); setOperationAction(ISD::VACOPY , MVT::Other, Custom); } else { + // TargetInfo::CharPtrBuiltinVaList setOperationAction(ISD::VAARG , MVT::Other, Expand); setOperationAction(ISD::VACOPY , MVT::Other, Expand); } diff --git a/test/CodeGen/X86/win64_vararg.ll b/test/CodeGen/X86/win64_vararg.ll index 74b7cead1ef..1a51b2a64a7 100644 --- a/test/CodeGen/X86/win64_vararg.ll +++ b/test/CodeGen/X86/win64_vararg.ll @@ -18,6 +18,7 @@ entry: } declare void @llvm.va_start(i8*) nounwind +declare void @llvm.va_copy(i8*, i8*) nounwind ; CHECK-LABEL: f5: ; CHECK: pushq @@ -51,3 +52,62 @@ entry: call void @llvm.va_start(i8* %ap1) ret i8* %ap1 } + +; WinX86_64 uses char* for va_list. Verify that the correct amount of bytes +; are copied using va_copy. + +; CHECK-LABEL: copy1: +; CHECK: subq $16 +; CHECK: leaq 32(%rsp), [[REG_copy1:%[a-z]+]] +; CHECK: movq [[REG_copy1]], 8(%rsp) +; CHECK: movq [[REG_copy1]], (%rsp) +; CHECK: addq $16 +; CHECK: ret +define void @copy1(i64 %a0, ...) nounwind { +entry: + %ap = alloca i8*, align 8 + %cp = alloca i8*, align 8 + %ap1 = bitcast i8** %ap to i8* + %cp1 = bitcast i8** %cp to i8* + call void @llvm.va_start(i8* %ap1) + call void @llvm.va_copy(i8* %cp1, i8* %ap1) + ret void +} + +; CHECK-LABEL: copy4: +; CHECK: subq $16 +; CHECK: leaq 56(%rsp), [[REG_copy4:%[a-z]+]] +; CHECK: movq [[REG_copy4]], 8(%rsp) +; CHECK: movq [[REG_copy4]], (%rsp) +; CHECK: addq $16 +; CHECK: ret +define void @copy4(i64 %a0, i64 %a1, i64 %a2, i64 %a3, ...) nounwind { +entry: + %ap = alloca i8*, align 8 + %cp = alloca i8*, align 8 + %ap1 = bitcast i8** %ap to i8* + %cp1 = bitcast i8** %cp to i8* + call void @llvm.va_start(i8* %ap1) + call void @llvm.va_copy(i8* %cp1, i8* %ap1) + ret void +} + +; CHECK-LABEL: arg4: +; CHECK: pushq +; va_start: +; CHECK: leaq 48(%rsp), [[REG_arg4_1:%[a-z]+]] +; CHECK: movq [[REG_arg4_1]], (%rsp) +; va_arg: +; CHECK: leaq 52(%rsp), [[REG_arg4_2:%[a-z]+]] +; CHECK: movq [[REG_arg4_2]], (%rsp) +; CHECK: movl 48(%rsp), %eax +; CHECK: popq +; CHECK: ret +define i32 @arg4(i64 %a0, i64 %a1, i64 %a2, i64 %a3, ...) nounwind { +entry: + %ap = alloca i8*, align 8 + %ap1 = bitcast i8** %ap to i8* + call void @llvm.va_start(i8* %ap1) + %tmp = va_arg i8** %ap, i32 + ret i32 %tmp +} -- 2.34.1