From: Michael Kuperstein Date: Sun, 2 Mar 2014 15:26:36 +0000 (+0000) Subject: Ensure bitcode encoding of instructions and their operands stays stable. X-Git-Url: http://plrg.eecs.uci.edu/git/?a=commitdiff_plain;h=3de6ee1ae69644ccdbb2f2f4550817de00719e90;p=oota-llvm.git Ensure bitcode encoding of instructions and their operands stays stable. This includes instructions that relate to memory access (load/store/GEP), comparison instructions and calls. Work was done by lama.saba@intel.com. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@202647 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/test/Bitcode/memInstructions.3.2.ll b/test/Bitcode/memInstructions.3.2.ll new file mode 100644 index 00000000000..868e4b5f4d1 --- /dev/null +++ b/test/Bitcode/memInstructions.3.2.ll @@ -0,0 +1,308 @@ +; RUN: llvm-dis < %s.bc| FileCheck %s + +; memOperations.3.2.ll.bc was generated by passing this file to llvm-as-3.2. +; The test checks that LLVM does not misread memory related instructions of +; older bitcode files. + +define void @alloca(){ +entry: +; CHECK: %res1 = alloca i8 + %res1 = alloca i8 + +; CHECK-NEXT: %res2 = alloca i8, i32 2 + %res2 = alloca i8, i32 2 + +; CHECK-NEXT: %res3 = alloca i8, i32 2, align 4 + %res3 = alloca i8, i32 2, align 4 + +; CHECK-NEXT: %res4 = alloca i8, align 4 + %res4 = alloca i8, align 4 + + ret void +} + +define void @load(){ +entry: + %ptr1 = alloca i8 + store i8 2, i8* %ptr1 + +; CHECK: %res1 = load i8* %ptr1 + %res1 = load i8* %ptr1 + +; CHECK-NEXT: %res2 = load volatile i8* %ptr1 + %res2 = load volatile i8* %ptr1 + +; CHECK-NEXT: %res3 = load i8* %ptr1, align 1 + %res3 = load i8* %ptr1, align 1 + +; CHECK-NEXT: %res4 = load volatile i8* %ptr1, align 1 + %res4 = load volatile i8* %ptr1, align 1 + +; CHECK-NEXT: %res5 = load i8* %ptr1, !nontemporal !0 + %res5 = load i8* %ptr1, !nontemporal !0 + +; CHECK-NEXT: %res6 = load volatile i8* %ptr1, !nontemporal !0 + %res6 = load volatile i8* %ptr1, !nontemporal !0 + +; CHECK-NEXT: %res7 = load i8* %ptr1, align 1, !nontemporal !0 + %res7 = load i8* %ptr1, align 1, !nontemporal !0 + +; CHECK-NEXT: %res8 = load volatile i8* %ptr1, align 1, !nontemporal !0 + %res8 = load volatile i8* %ptr1, align 1, !nontemporal !0 + +; CHECK-NEXT: %res9 = load i8* %ptr1, !invariant.load !1 + %res9 = load i8* %ptr1, !invariant.load !1 + +; CHECK-NEXT: %res10 = load volatile i8* %ptr1, !invariant.load !1 + %res10 = load volatile i8* %ptr1, !invariant.load !1 + +; CHECK-NEXT: %res11 = load i8* %ptr1, align 1, !invariant.load !1 + %res11 = load i8* %ptr1, align 1, !invariant.load !1 + +; CHECK-NEXT: %res12 = load volatile i8* %ptr1, align 1, !invariant.load !1 + %res12 = load volatile i8* %ptr1, align 1, !invariant.load !1 + +; CHECK-NEXT: %res13 = load i8* %ptr1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}} + %res13 = load i8* %ptr1, !nontemporal !0, !invariant.load !1 + +; CHECK-NEXT: %res14 = load volatile i8* %ptr1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}} + %res14 = load volatile i8* %ptr1, !nontemporal !0, !invariant.load !1 + +; CHECK-NEXT: %res15 = load i8* %ptr1, align 1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}} + %res15 = load i8* %ptr1, align 1, !nontemporal !0, !invariant.load !1 + +; CHECK-NEXT: %res16 = load volatile i8* %ptr1, align 1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}} + %res16 = load volatile i8* %ptr1, align 1, !nontemporal !0, !invariant.load !1 + + ret void +} + +define void @loadAtomic(){ +entry: + %ptr1 = alloca i8 + store i8 2, i8* %ptr1 + +; CHECK: %res1 = load atomic i8* %ptr1 unordered, align 1 + %res1 = load atomic i8* %ptr1 unordered, align 1 + +; CHECK-NEXT: %res2 = load atomic i8* %ptr1 monotonic, align 1 + %res2 = load atomic i8* %ptr1 monotonic, align 1 + +; CHECK-NEXT: %res3 = load atomic i8* %ptr1 acquire, align 1 + %res3 = load atomic i8* %ptr1 acquire, align 1 + +; CHECK-NEXT: %res4 = load atomic i8* %ptr1 seq_cst, align 1 + %res4 = load atomic i8* %ptr1 seq_cst, align 1 + +; CHECK-NEXT: %res5 = load atomic volatile i8* %ptr1 unordered, align 1 + %res5 = load atomic volatile i8* %ptr1 unordered, align 1 + +; CHECK-NEXT: %res6 = load atomic volatile i8* %ptr1 monotonic, align 1 + %res6 = load atomic volatile i8* %ptr1 monotonic, align 1 + +; CHECK-NEXT: %res7 = load atomic volatile i8* %ptr1 acquire, align 1 + %res7 = load atomic volatile i8* %ptr1 acquire, align 1 + +; CHECK-NEXT: %res8 = load atomic volatile i8* %ptr1 seq_cst, align 1 + %res8 = load atomic volatile i8* %ptr1 seq_cst, align 1 + +; CHECK-NEXT: %res9 = load atomic i8* %ptr1 singlethread unordered, align 1 + %res9 = load atomic i8* %ptr1 singlethread unordered, align 1 + +; CHECK-NEXT: %res10 = load atomic i8* %ptr1 singlethread monotonic, align 1 + %res10 = load atomic i8* %ptr1 singlethread monotonic, align 1 + +; CHECK-NEXT: %res11 = load atomic i8* %ptr1 singlethread acquire, align 1 + %res11 = load atomic i8* %ptr1 singlethread acquire, align 1 + +; CHECK-NEXT: %res12 = load atomic i8* %ptr1 singlethread seq_cst, align 1 + %res12 = load atomic i8* %ptr1 singlethread seq_cst, align 1 + +; CHECK-NEXT: %res13 = load atomic volatile i8* %ptr1 singlethread unordered, align 1 + %res13 = load atomic volatile i8* %ptr1 singlethread unordered, align 1 + +; CHECK-NEXT: %res14 = load atomic volatile i8* %ptr1 singlethread monotonic, align 1 + %res14 = load atomic volatile i8* %ptr1 singlethread monotonic, align 1 + +; CHECK-NEXT: %res15 = load atomic volatile i8* %ptr1 singlethread acquire, align 1 + %res15 = load atomic volatile i8* %ptr1 singlethread acquire, align 1 + +; CHECK-NEXT: %res16 = load atomic volatile i8* %ptr1 singlethread seq_cst, align 1 + %res16 = load atomic volatile i8* %ptr1 singlethread seq_cst, align 1 + + ret void +} + +define void @store(){ +entry: + %ptr1 = alloca i8 + +; CHECK: store i8 2, i8* %ptr1 + store i8 2, i8* %ptr1 + +; CHECK-NEXT: store volatile i8 2, i8* %ptr1 + store volatile i8 2, i8* %ptr1 + +; CHECK-NEXT: store i8 2, i8* %ptr1, align 1 + store i8 2, i8* %ptr1, align 1 + +; CHECK-NEXT: store volatile i8 2, i8* %ptr1, align 1 + store volatile i8 2, i8* %ptr1, align 1 + +; CHECK-NEXT: store i8 2, i8* %ptr1, !nontemporal !0 + store i8 2, i8* %ptr1, !nontemporal !0 + +; CHECK-NEXT: store volatile i8 2, i8* %ptr1, !nontemporal !0 + store volatile i8 2, i8* %ptr1, !nontemporal !0 + +; CHECK-NEXT: store i8 2, i8* %ptr1, align 1, !nontemporal !0 + store i8 2, i8* %ptr1, align 1, !nontemporal !0 + +; CHECK-NEXT: store volatile i8 2, i8* %ptr1, align 1, !nontemporal !0 + store volatile i8 2, i8* %ptr1, align 1, !nontemporal !0 + + ret void +} + +define void @storeAtomic(){ +entry: + %ptr1 = alloca i8 + +; CHECK: store atomic i8 2, i8* %ptr1 unordered, align 1 + store atomic i8 2, i8* %ptr1 unordered, align 1 + +; CHECK-NEXT: store atomic i8 2, i8* %ptr1 monotonic, align 1 + store atomic i8 2, i8* %ptr1 monotonic, align 1 + +; CHECK-NEXT: store atomic i8 2, i8* %ptr1 release, align 1 + store atomic i8 2, i8* %ptr1 release, align 1 + +; CHECK-NEXT: store atomic i8 2, i8* %ptr1 seq_cst, align 1 + store atomic i8 2, i8* %ptr1 seq_cst, align 1 + +; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 unordered, align 1 + store atomic volatile i8 2, i8* %ptr1 unordered, align 1 + +; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 monotonic, align 1 + store atomic volatile i8 2, i8* %ptr1 monotonic, align 1 + +; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 release, align 1 + store atomic volatile i8 2, i8* %ptr1 release, align 1 + +; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 seq_cst, align 1 + store atomic volatile i8 2, i8* %ptr1 seq_cst, align 1 + +; CHECK-NEXT: store atomic i8 2, i8* %ptr1 singlethread unordered, align 1 + store atomic i8 2, i8* %ptr1 singlethread unordered, align 1 + +; CHECK-NEXT: store atomic i8 2, i8* %ptr1 singlethread monotonic, align 1 + store atomic i8 2, i8* %ptr1 singlethread monotonic, align 1 + +; CHECK-NEXT: store atomic i8 2, i8* %ptr1 singlethread release, align 1 + store atomic i8 2, i8* %ptr1 singlethread release, align 1 + +; CHECK-NEXT: store atomic i8 2, i8* %ptr1 singlethread seq_cst, align 1 + store atomic i8 2, i8* %ptr1 singlethread seq_cst, align 1 + +; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 singlethread unordered, align 1 + store atomic volatile i8 2, i8* %ptr1 singlethread unordered, align 1 + +; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 singlethread monotonic, align 1 + store atomic volatile i8 2, i8* %ptr1 singlethread monotonic, align 1 + +; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 singlethread release, align 1 + store atomic volatile i8 2, i8* %ptr1 singlethread release, align 1 + +; CHECK-NEXT: store atomic volatile i8 2, i8* %ptr1 singlethread seq_cst, align 1 + store atomic volatile i8 2, i8* %ptr1 singlethread seq_cst, align 1 + + ret void +} + +define void @cmpxchg(i32* %ptr,i32 %cmp,i32 %new){ +entry: + ;cmpxchg [volatile] * , , [singlethread] + +; CHECK: %res1 = cmpxchg i32* %ptr, i32 %cmp, i32 %new monotonic + %res1 = cmpxchg i32* %ptr, i32 %cmp, i32 %new monotonic + +; CHECK-NEXT: %res2 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new monotonic + %res2 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new monotonic + +; CHECK-NEXT: %res3 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread monotonic + %res3 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread monotonic + +; CHECK-NEXT: %res4 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread monotonic + %res4 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread monotonic + + +; CHECK-NEXT: %res5 = cmpxchg i32* %ptr, i32 %cmp, i32 %new acquire + %res5 = cmpxchg i32* %ptr, i32 %cmp, i32 %new acquire + +; CHECK-NEXT: %res6 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acquire + %res6 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acquire + +; CHECK-NEXT: %res7 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acquire + %res7 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acquire + +; CHECK-NEXT: %res8 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acquire + %res8 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acquire + + +; CHECK-NEXT: %res9 = cmpxchg i32* %ptr, i32 %cmp, i32 %new release + %res9 = cmpxchg i32* %ptr, i32 %cmp, i32 %new release + +; CHECK-NEXT: %res10 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new release + %res10 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new release + +; CHECK-NEXT: %res11 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread release + %res11 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread release + +; CHECK-NEXT: %res12 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread release + %res12 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread release + + +; CHECK-NEXT: %res13 = cmpxchg i32* %ptr, i32 %cmp, i32 %new acq_rel + %res13 = cmpxchg i32* %ptr, i32 %cmp, i32 %new acq_rel + +; CHECK-NEXT: %res14 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acq_rel + %res14 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new acq_rel + +; CHECK-NEXT: %res15 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel + %res15 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel + +; CHECK-NEXT: %res16 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel + %res16 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread acq_rel + + +; CHECK-NEXT: %res17 = cmpxchg i32* %ptr, i32 %cmp, i32 %new seq_cst + %res17 = cmpxchg i32* %ptr, i32 %cmp, i32 %new seq_cst + +; CHECK-NEXT: %res18 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new seq_cst + %res18 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new seq_cst + +; CHECK-NEXT: %res19 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst + %res19 = cmpxchg i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst + +; CHECK-NEXT: %res20 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst + %res20 = cmpxchg volatile i32* %ptr, i32 %cmp, i32 %new singlethread seq_cst + + ret void +} + +define void @getelementptr({i8, i8}* %s, <4 x i8*> %ptrs, <4 x i64> %offsets ){ +entry: +; CHECK: %res1 = getelementptr { i8, i8 }* %s, i32 1, i32 1 + %res1 = getelementptr {i8, i8}* %s, i32 1, i32 1 + +; CHECK-NEXT: %res2 = getelementptr inbounds { i8, i8 }* %s, i32 1, i32 1 + %res2 = getelementptr inbounds {i8, i8}* %s, i32 1, i32 1 + +; CHECK-NEXT: %res3 = getelementptr <4 x i8*> %ptrs, <4 x i64> %offsets + %res3 = getelementptr <4 x i8*> %ptrs, <4 x i64> %offsets + + ret void +} + +!0 = metadata !{ i32 1 } +!1 = metadata !{} \ No newline at end of file diff --git a/test/Bitcode/memInstructions.3.2.ll.bc b/test/Bitcode/memInstructions.3.2.ll.bc new file mode 100644 index 00000000000..d75954a301b Binary files /dev/null and b/test/Bitcode/memInstructions.3.2.ll.bc differ diff --git a/test/Bitcode/miscInstructions.3.2.ll b/test/Bitcode/miscInstructions.3.2.ll new file mode 100644 index 00000000000..bceae20109c --- /dev/null +++ b/test/Bitcode/miscInstructions.3.2.ll @@ -0,0 +1,126 @@ +; RUN: llvm-dis < %s.bc| FileCheck %s + +; miscInstructions.3.2.ll.bc was generated by passing this file to llvm-as-3.2. +; The test checks that LLVM does not misread miscellaneous instructions of +; older bitcode files. + +define void @icmp(i32 %x1, i32 %x2, i32* %ptr1, i32* %ptr2, <2 x i32> %vec1, <2 x i32> %vec2){ +entry: +; CHECK: %res1 = icmp eq i32 %x1, %x2 + %res1 = icmp eq i32 %x1, %x2 + +; CHECK-NEXT: %res2 = icmp ne i32 %x1, %x2 + %res2 = icmp ne i32 %x1, %x2 + +; CHECK-NEXT: %res3 = icmp ugt i32 %x1, %x2 + %res3 = icmp ugt i32 %x1, %x2 + +; CHECK-NEXT: %res4 = icmp uge i32 %x1, %x2 + %res4 = icmp uge i32 %x1, %x2 + +; CHECK-NEXT: %res5 = icmp ult i32 %x1, %x2 + %res5 = icmp ult i32 %x1, %x2 + +; CHECK-NEXT: %res6 = icmp ule i32 %x1, %x2 + %res6 = icmp ule i32 %x1, %x2 + +; CHECK-NEXT: %res7 = icmp sgt i32 %x1, %x2 + %res7 = icmp sgt i32 %x1, %x2 + +; CHECK-NEXT: %res8 = icmp sge i32 %x1, %x2 + %res8 = icmp sge i32 %x1, %x2 + +; CHECK-NEXT: %res9 = icmp slt i32 %x1, %x2 + %res9 = icmp slt i32 %x1, %x2 + +; CHECK-NEXT: %res10 = icmp sle i32 %x1, %x2 + %res10 = icmp sle i32 %x1, %x2 + +; CHECK-NEXT: %res11 = icmp eq i32* %ptr1, %ptr2 + %res11 = icmp eq i32* %ptr1, %ptr2 + +; CHECK-NEXT: %res12 = icmp eq <2 x i32> %vec1, %vec2 + %res12 = icmp eq <2 x i32> %vec1, %vec2 + + ret void +} + + +define void @fcmp(float %x1, float %x2, <2 x float> %vec1, <2 x float> %vec2){ +entry: +; CHECK: %res1 = fcmp oeq float %x1, %x2 + %res1 = fcmp oeq float %x1, %x2 + +; CHECK-NEXT: %res2 = fcmp one float %x1, %x2 + %res2 = fcmp one float %x1, %x2 + +; CHECK-NEXT: %res3 = fcmp ugt float %x1, %x2 + %res3 = fcmp ugt float %x1, %x2 + +; CHECK-NEXT: %res4 = fcmp uge float %x1, %x2 + %res4 = fcmp uge float %x1, %x2 + +; CHECK-NEXT: %res5 = fcmp ult float %x1, %x2 + %res5 = fcmp ult float %x1, %x2 + +; CHECK-NEXT: %res6 = fcmp ule float %x1, %x2 + %res6 = fcmp ule float %x1, %x2 + +; CHECK-NEXT: %res7 = fcmp ogt float %x1, %x2 + %res7 = fcmp ogt float %x1, %x2 + +; CHECK-NEXT: %res8 = fcmp oge float %x1, %x2 + %res8 = fcmp oge float %x1, %x2 + +; CHECK-NEXT: %res9 = fcmp olt float %x1, %x2 + %res9 = fcmp olt float %x1, %x2 + +; CHECK-NEXT: %res10 = fcmp ole float %x1, %x2 + %res10 = fcmp ole float %x1, %x2 + +; CHECK-NEXT: %res11 = fcmp ord float %x1, %x2 + %res11 = fcmp ord float %x1, %x2 + +; CHECK-NEXT: %res12 = fcmp ueq float %x1, %x2 + %res12 = fcmp ueq float %x1, %x2 + +; CHECK-NEXT: %res13 = fcmp une float %x1, %x2 + %res13 = fcmp une float %x1, %x2 + +; CHECK-NEXT: %res14 = fcmp uno float %x1, %x2 + %res14 = fcmp uno float %x1, %x2 + +; CHECK-NEXT: %res15 = fcmp true float %x1, %x2 + %res15 = fcmp true float %x1, %x2 + +; CHECK-NEXT: %res16 = fcmp false float %x1, %x2 + %res16 = fcmp false float %x1, %x2 + +; CHECK-NEXT: %res17 = fcmp oeq <2 x float> %vec1, %vec2 + %res17 = fcmp oeq <2 x float> %vec1, %vec2 + + ret void +} + +declare i32 @printf(i8* noalias nocapture, ...) + +define void @call(i32 %x, i8* %msg ){ +entry: + +; CHECK: %res1 = call i32 @test(i32 %x) + %res1 = call i32 @test(i32 %x) + +; CHECK-NEXT: %res2 = tail call i32 @test(i32 %x) + %res2 = tail call i32 @test(i32 %x) + +; CHECK-NEXT: %res3 = call i32 (i8*, ...)* @printf(i8* %msg, i32 12, i8 42) + %res3 = call i32 (i8*, ...)* @printf(i8* %msg, i32 12, i8 42) + + ret void +} + +define i32 @test(i32 %x){ +entry: + + ret i32 %x +} diff --git a/test/Bitcode/miscInstructions.3.2.ll.bc b/test/Bitcode/miscInstructions.3.2.ll.bc new file mode 100644 index 00000000000..9d479b50617 Binary files /dev/null and b/test/Bitcode/miscInstructions.3.2.ll.bc differ diff --git a/test/Bitcode/variableArgumentIntrinsic.3.2.ll b/test/Bitcode/variableArgumentIntrinsic.3.2.ll new file mode 100644 index 00000000000..35fe0e25282 --- /dev/null +++ b/test/Bitcode/variableArgumentIntrinsic.3.2.ll @@ -0,0 +1,33 @@ +; RUN: llvm-dis < %s.bc| FileCheck %s + +; vaArgIntrinsic.3.2.ll.bc was generated by passing this file to llvm-as-3.2. +; The test checks that LLVM does not misread variable argument intrinsic instructions +; of older bitcode files. + +define i32 @varArgIntrinsic(i32 %X, ...) { + + %ap = alloca i8* + %ap2 = bitcast i8** %ap to i8* + +; CHECK: call void @llvm.va_start(i8* %ap2) + call void @llvm.va_start(i8* %ap2) + +; CHECK-NEXT: %tmp = va_arg i8** %ap, i32 + %tmp = va_arg i8** %ap, i32 + + %aq = alloca i8* + %aq2 = bitcast i8** %aq to i8* + +; CHECK: call void @llvm.va_copy(i8* %aq2, i8* %ap2) + call void @llvm.va_copy(i8* %aq2, i8* %ap2) +; CHECK-NEXT: call void @llvm.va_end(i8* %aq2) + call void @llvm.va_end(i8* %aq2) + +; CHECK-NEXT: call void @llvm.va_end(i8* %ap2) + call void @llvm.va_end(i8* %ap2) + ret i32 %tmp +} + +declare void @llvm.va_start(i8*) +declare void @llvm.va_copy(i8*, i8*) +declare void @llvm.va_end(i8*) \ No newline at end of file diff --git a/test/Bitcode/variableArgumentIntrinsic.3.2.ll.bc b/test/Bitcode/variableArgumentIntrinsic.3.2.ll.bc new file mode 100644 index 00000000000..066e102b1da Binary files /dev/null and b/test/Bitcode/variableArgumentIntrinsic.3.2.ll.bc differ