X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=test%2FCodeGen%2FARM%2Freg_sequence.ll;h=507ee48cafc23f738b78552500625cf3569ac5a7;hb=9daba6f571437eb90a0fdf477f33cf0cfef9c58b;hp=9e365c9421125c87386d06b13225aac83161219f;hpb=a0148c360e9bb4badabf1a2397cfd70907618f87;p=oota-llvm.git diff --git a/test/CodeGen/ARM/reg_sequence.ll b/test/CodeGen/ARM/reg_sequence.ll index 9e365c94211..507ee48cafc 100644 --- a/test/CodeGen/ARM/reg_sequence.ll +++ b/test/CodeGen/ARM/reg_sequence.ll @@ -1,4 +1,5 @@ -; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s +; RUN: llc < %s -mtriple=arm-apple-ios -mcpu=cortex-a8 -arm-atomic-cfg-tidy=0 | FileCheck %s +; RUN: llc < %s -mtriple=arm-apple-ios -mcpu=cortex-a8 -arm-atomic-cfg-tidy=0 -regalloc=basic | FileCheck %s ; Implementing vld / vst as REG_SEQUENCE eliminates the extra vmov's. %struct.int16x8_t = type { <8 x i16> } @@ -10,7 +11,7 @@ define void @t1(i16* %i_ptr, i16* %o_ptr, %struct.int32x4_t* nocapture %vT0ptr, %struct.int32x4_t* nocapture %vT1ptr) nounwind { entry: -; CHECK: t1: +; CHECK-LABEL: t1: ; CHECK: vld1.16 ; CHECK-NOT: vmov d ; CHECK: vmovl.s16 @@ -18,89 +19,92 @@ entry: ; CHECK: vshrn.i32 ; CHECK-NOT: vmov d ; CHECK-NEXT: vst1.16 - %0 = getelementptr inbounds %struct.int32x4_t* %vT0ptr, i32 0, i32 0 ; <<4 x i32>*> [#uses=1] - %1 = load <4 x i32>* %0, align 16 ; <<4 x i32>> [#uses=1] - %2 = getelementptr inbounds %struct.int32x4_t* %vT1ptr, i32 0, i32 0 ; <<4 x i32>*> [#uses=1] - %3 = load <4 x i32>* %2, align 16 ; <<4 x i32>> [#uses=1] + %0 = getelementptr inbounds %struct.int32x4_t, %struct.int32x4_t* %vT0ptr, i32 0, i32 0 ; <<4 x i32>*> [#uses=1] + %1 = load <4 x i32>, <4 x i32>* %0, align 16 ; <<4 x i32>> [#uses=1] + %2 = getelementptr inbounds %struct.int32x4_t, %struct.int32x4_t* %vT1ptr, i32 0, i32 0 ; <<4 x i32>*> [#uses=1] + %3 = load <4 x i32>, <4 x i32>* %2, align 16 ; <<4 x i32>> [#uses=1] %4 = bitcast i16* %i_ptr to i8* ; [#uses=1] - %5 = tail call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %4) ; <<8 x i16>> [#uses=1] + %5 = tail call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %4, i32 1) ; <<8 x i16>> [#uses=1] %6 = bitcast <8 x i16> %5 to <2 x double> ; <<2 x double>> [#uses=2] %7 = extractelement <2 x double> %6, i32 0 ; [#uses=1] %8 = bitcast double %7 to <4 x i16> ; <<4 x i16>> [#uses=1] - %9 = tail call <4 x i32> @llvm.arm.neon.vmovls.v4i32(<4 x i16> %8) ; <<4 x i32>> [#uses=1] + %9 = sext <4 x i16> %8 to <4 x i32> ; <<4 x i32>> [#uses=1] %10 = extractelement <2 x double> %6, i32 1 ; [#uses=1] %11 = bitcast double %10 to <4 x i16> ; <<4 x i16>> [#uses=1] - %12 = tail call <4 x i32> @llvm.arm.neon.vmovls.v4i32(<4 x i16> %11) ; <<4 x i32>> [#uses=1] + %12 = sext <4 x i16> %11 to <4 x i32> ; <<4 x i32>> [#uses=1] %13 = mul <4 x i32> %1, %9 ; <<4 x i32>> [#uses=1] %14 = mul <4 x i32> %3, %12 ; <<4 x i32>> [#uses=1] - %15 = tail call <4 x i16> @llvm.arm.neon.vshiftn.v4i16(<4 x i32> %13, <4 x i32> ) ; <<4 x i16>> [#uses=1] - %16 = tail call <4 x i16> @llvm.arm.neon.vshiftn.v4i16(<4 x i32> %14, <4 x i32> ) ; <<4 x i16>> [#uses=1] - %17 = shufflevector <4 x i16> %15, <4 x i16> %16, <8 x i32> ; <<8 x i16>> [#uses=1] + %15 = lshr <4 x i32> %13, + %trunc_15 = trunc <4 x i32> %15 to <4 x i16> + %16 = lshr <4 x i32> %14, + %trunc_16 = trunc <4 x i32> %16 to <4 x i16> + %17 = shufflevector <4 x i16> %trunc_15, <4 x i16> %trunc_16, <8 x i32> ; <<8 x i16>> [#uses=1] %18 = bitcast i16* %o_ptr to i8* ; [#uses=1] - tail call void @llvm.arm.neon.vst1.v8i16(i8* %18, <8 x i16> %17) + tail call void @llvm.arm.neon.vst1.v8i16(i8* %18, <8 x i16> %17, i32 1) ret void } define void @t2(i16* %i_ptr, i16* %o_ptr, %struct.int16x8_t* nocapture %vT0ptr, %struct.int16x8_t* nocapture %vT1ptr) nounwind { entry: -; CHECK: t2: +; CHECK-LABEL: t2: ; CHECK: vld1.16 -; CHECK: vmul.i16 ; CHECK-NOT: vmov +; CHECK: vmul.i16 ; CHECK: vld1.16 ; CHECK: vmul.i16 ; CHECK-NOT: vmov ; CHECK: vst1.16 ; CHECK: vst1.16 - %0 = getelementptr inbounds %struct.int16x8_t* %vT0ptr, i32 0, i32 0 ; <<8 x i16>*> [#uses=1] - %1 = load <8 x i16>* %0, align 16 ; <<8 x i16>> [#uses=1] - %2 = getelementptr inbounds %struct.int16x8_t* %vT1ptr, i32 0, i32 0 ; <<8 x i16>*> [#uses=1] - %3 = load <8 x i16>* %2, align 16 ; <<8 x i16>> [#uses=1] + %0 = getelementptr inbounds %struct.int16x8_t, %struct.int16x8_t* %vT0ptr, i32 0, i32 0 ; <<8 x i16>*> [#uses=1] + %1 = load <8 x i16>, <8 x i16>* %0, align 16 ; <<8 x i16>> [#uses=1] + %2 = getelementptr inbounds %struct.int16x8_t, %struct.int16x8_t* %vT1ptr, i32 0, i32 0 ; <<8 x i16>*> [#uses=1] + %3 = load <8 x i16>, <8 x i16>* %2, align 16 ; <<8 x i16>> [#uses=1] %4 = bitcast i16* %i_ptr to i8* ; [#uses=1] - %5 = tail call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %4) ; <<8 x i16>> [#uses=1] - %6 = getelementptr inbounds i16* %i_ptr, i32 8 ; [#uses=1] + %5 = tail call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %4, i32 1) ; <<8 x i16>> [#uses=1] + %6 = getelementptr inbounds i16, i16* %i_ptr, i32 8 ; [#uses=1] %7 = bitcast i16* %6 to i8* ; [#uses=1] - %8 = tail call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %7) ; <<8 x i16>> [#uses=1] + %8 = tail call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %7, i32 1) ; <<8 x i16>> [#uses=1] %9 = mul <8 x i16> %1, %5 ; <<8 x i16>> [#uses=1] %10 = mul <8 x i16> %3, %8 ; <<8 x i16>> [#uses=1] %11 = bitcast i16* %o_ptr to i8* ; [#uses=1] - tail call void @llvm.arm.neon.vst1.v8i16(i8* %11, <8 x i16> %9) - %12 = getelementptr inbounds i16* %o_ptr, i32 8 ; [#uses=1] + tail call void @llvm.arm.neon.vst1.v8i16(i8* %11, <8 x i16> %9, i32 1) + %12 = getelementptr inbounds i16, i16* %o_ptr, i32 8 ; [#uses=1] %13 = bitcast i16* %12 to i8* ; [#uses=1] - tail call void @llvm.arm.neon.vst1.v8i16(i8* %13, <8 x i16> %10) + tail call void @llvm.arm.neon.vst1.v8i16(i8* %13, <8 x i16> %10, i32 1) ret void } define <8 x i8> @t3(i8* %A, i8* %B) nounwind { -; CHECK: t3: +; CHECK-LABEL: t3: ; CHECK: vld3.8 ; CHECK: vmul.i8 -; CHECK-NOT: vmov +; CHECK: vmov r +; CHECK-NOT: vmov d ; CHECK: vst3.8 - %tmp1 = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A) ; <%struct.__neon_int8x8x3_t> [#uses=2] + %tmp1 = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A, i32 1) ; <%struct.__neon_int8x8x3_t> [#uses=2] %tmp2 = extractvalue %struct.__neon_int8x8x3_t %tmp1, 0 ; <<8 x i8>> [#uses=1] %tmp3 = extractvalue %struct.__neon_int8x8x3_t %tmp1, 2 ; <<8 x i8>> [#uses=1] %tmp4 = extractvalue %struct.__neon_int8x8x3_t %tmp1, 1 ; <<8 x i8>> [#uses=1] %tmp5 = sub <8 x i8> %tmp3, %tmp4 %tmp6 = add <8 x i8> %tmp2, %tmp3 ; <<8 x i8>> [#uses=1] %tmp7 = mul <8 x i8> %tmp4, %tmp2 - tail call void @llvm.arm.neon.vst3.v8i8(i8* %B, <8 x i8> %tmp5, <8 x i8> %tmp6, <8 x i8> %tmp7) + tail call void @llvm.arm.neon.vst3.v8i8(i8* %B, <8 x i8> %tmp5, <8 x i8> %tmp6, <8 x i8> %tmp7, i32 1) ret <8 x i8> %tmp4 } define void @t4(i32* %in, i32* %out) nounwind { entry: -; CHECK: t4: +; CHECK-LABEL: t4: ; CHECK: vld2.32 ; CHECK-NOT: vmov ; CHECK: vld2.32 ; CHECK-NOT: vmov ; CHECK: bne %tmp1 = bitcast i32* %in to i8* ; [#uses=1] - %tmp2 = tail call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i8* %tmp1) ; <%struct.__neon_int32x4x2_t> [#uses=2] - %tmp3 = getelementptr inbounds i32* %in, i32 8 ; [#uses=1] + %tmp2 = tail call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i8* %tmp1, i32 1) ; <%struct.__neon_int32x4x2_t> [#uses=2] + %tmp3 = getelementptr inbounds i32, i32* %in, i32 8 ; [#uses=1] %tmp4 = bitcast i32* %tmp3 to i8* ; [#uses=1] - %tmp5 = tail call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i8* %tmp4) ; <%struct.__neon_int32x4x2_t> [#uses=2] + %tmp5 = tail call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i8* %tmp4, i32 1) ; <%struct.__neon_int32x4x2_t> [#uses=2] %tmp8 = bitcast i32* %out to i8* ; [#uses=1] br i1 undef, label %return1, label %return2 @@ -116,34 +120,34 @@ return1: %tmp39 = extractvalue %struct.__neon_int32x4x2_t %tmp5, 1 ; <<4 x i32>> [#uses=1] %tmp6 = add <4 x i32> %tmp52, %tmp ; <<4 x i32>> [#uses=1] %tmp7 = add <4 x i32> %tmp57, %tmp39 ; <<4 x i32>> [#uses=1] - tail call void @llvm.arm.neon.vst2.v4i32(i8* %tmp8, <4 x i32> %tmp6, <4 x i32> %tmp7) + tail call void @llvm.arm.neon.vst2.v4i32(i8* %tmp8, <4 x i32> %tmp6, <4 x i32> %tmp7, i32 1) ret void return2: ; CHECK: %return2 ; CHECK: vadd.i32 -; CHECK: vmov q1, q3 ; CHECK-NOT: vmov -; CHECK: vst2.32 {d0, d1, d2, d3} +; CHECK: vst2.32 {d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}} %tmp100 = extractvalue %struct.__neon_int32x4x2_t %tmp2, 0 ; <<4 x i32>> [#uses=1] %tmp101 = extractvalue %struct.__neon_int32x4x2_t %tmp5, 1 ; <<4 x i32>> [#uses=1] %tmp102 = add <4 x i32> %tmp100, %tmp101 ; <<4 x i32>> [#uses=1] - tail call void @llvm.arm.neon.vst2.v4i32(i8* %tmp8, <4 x i32> %tmp102, <4 x i32> %tmp101) + tail call void @llvm.arm.neon.vst2.v4i32(i8* %tmp8, <4 x i32> %tmp102, <4 x i32> %tmp101, i32 1) call void @llvm.trap() unreachable } define <8 x i16> @t5(i16* %A, <8 x i16>* %B) nounwind { -; CHECK: t5: -; CHECK: vldmia -; CHECK: vmov q1, q0 +; CHECK-LABEL: t5: +; CHECK: vld1.32 +; How can FileCheck match Q and D registers? We need a lisp interpreter. +; CHECK: vorr {{q[0-9]+}}, {{q[0-9]+}}, {{q[0-9]+}} ; CHECK-NOT: vmov -; CHECK: vld2.16 {d0[1], d2[1]}, [r0] +; CHECK: vld2.16 {d{{[0-9]+}}[1], d{{[0-9]+}}[1]}, [r0] ; CHECK-NOT: vmov ; CHECK: vadd.i16 %tmp0 = bitcast i16* %A to i8* ; [#uses=1] - %tmp1 = load <8 x i16>* %B ; <<8 x i16>> [#uses=2] - %tmp2 = call %struct.__neon_int16x8x2_t @llvm.arm.neon.vld2lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1) ; <%struct.__neon_int16x8x2_t> [#uses=2] + %tmp1 = load <8 x i16>, <8 x i16>* %B ; <<8 x i16>> [#uses=2] + %tmp2 = call %struct.__neon_int16x8x2_t @llvm.arm.neon.vld2lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1, i32 1) ; <%struct.__neon_int16x8x2_t> [#uses=2] %tmp3 = extractvalue %struct.__neon_int16x8x2_t %tmp2, 0 ; <<8 x i16>> [#uses=1] %tmp4 = extractvalue %struct.__neon_int16x8x2_t %tmp2, 1 ; <<8 x i16>> [#uses=1] %tmp5 = add <8 x i16> %tmp3, %tmp4 ; <<8 x i16>> [#uses=1] @@ -151,12 +155,12 @@ define <8 x i16> @t5(i16* %A, <8 x i16>* %B) nounwind { } define <8 x i8> @t6(i8* %A, <8 x i8>* %B) nounwind { -; CHECK: t6: -; CHECK: vldr.64 -; CHECK: vmov d1, d0 -; CHECK-NEXT: vld2.8 {d0[1], d1[1]} - %tmp1 = load <8 x i8>* %B ; <<8 x i8>> [#uses=2] - %tmp2 = call %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1) ; <%struct.__neon_int8x8x2_t> [#uses=2] +; CHECK-LABEL: t6: +; CHECK: vldr +; CHECK: vorr d[[D0:[0-9]+]], d[[D1:[0-9]+]] +; CHECK-NEXT: vld2.8 {d[[D1]][1], d[[D0]][1]} + %tmp1 = load <8 x i8>, <8 x i8>* %B ; <<8 x i8>> [#uses=2] + %tmp2 = call %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 1) ; <%struct.__neon_int8x8x2_t> [#uses=2] %tmp3 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 0 ; <<8 x i8>> [#uses=1] %tmp4 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 1 ; <<8 x i8>> [#uses=1] %tmp5 = add <8 x i8> %tmp3, %tmp4 ; <<8 x i8>> [#uses=1] @@ -165,30 +169,30 @@ define <8 x i8> @t6(i8* %A, <8 x i8>* %B) nounwind { define void @t7(i32* %iptr, i32* %optr) nounwind { entry: -; CHECK: t7: +; CHECK-LABEL: t7: ; CHECK: vld2.32 ; CHECK: vst2.32 -; CHECK: vld1.32 {d0, d1}, -; CHECK: vmov q1, q0 +; CHECK: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, +; CHECK: vorr q[[Q0:[0-9]+]], q[[Q1:[0-9]+]], q[[Q1:[0-9]+]] ; CHECK-NOT: vmov -; CHECK: vuzp.32 q0, q1 +; CHECK: vuzp.32 q[[Q1]], q[[Q0]] ; CHECK: vst1.32 %0 = bitcast i32* %iptr to i8* ; [#uses=2] - %1 = tail call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i8* %0) ; <%struct.__neon_int32x4x2_t> [#uses=2] + %1 = tail call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i8* %0, i32 1) ; <%struct.__neon_int32x4x2_t> [#uses=2] %tmp57 = extractvalue %struct.__neon_int32x4x2_t %1, 0 ; <<4 x i32>> [#uses=1] %tmp60 = extractvalue %struct.__neon_int32x4x2_t %1, 1 ; <<4 x i32>> [#uses=1] %2 = bitcast i32* %optr to i8* ; [#uses=2] - tail call void @llvm.arm.neon.vst2.v4i32(i8* %2, <4 x i32> %tmp57, <4 x i32> %tmp60) - %3 = tail call <4 x i32> @llvm.arm.neon.vld1.v4i32(i8* %0) ; <<4 x i32>> [#uses=1] + tail call void @llvm.arm.neon.vst2.v4i32(i8* %2, <4 x i32> %tmp57, <4 x i32> %tmp60, i32 1) + %3 = tail call <4 x i32> @llvm.arm.neon.vld1.v4i32(i8* %0, i32 1) ; <<4 x i32>> [#uses=1] %4 = shufflevector <4 x i32> %3, <4 x i32> undef, <4 x i32> ; <<4 x i32>> [#uses=1] - tail call void @llvm.arm.neon.vst1.v4i32(i8* %2, <4 x i32> %4) + tail call void @llvm.arm.neon.vst1.v4i32(i8* %2, <4 x i32> %4, i32 1) ret void } ; PR7156 define arm_aapcs_vfpcc i32 @t8() nounwind { -; CHECK: t8: -; CHECK: vrsqrte.f32 q0, q0 +; CHECK-LABEL: t8: +; CHECK: vrsqrte.f32 q8, q8 bb.nph55.bb.nph55.split_crit_edge: br label %bb3 @@ -236,12 +240,11 @@ bb14: ; preds = %bb6 ; PR7157 define arm_aapcs_vfpcc float @t9(%0* nocapture, %3* nocapture) nounwind { -; CHECK: t9: -; CHECK: vldr.64 -; CHECK-NOT: vmov d{{.*}}, d0 -; CHECK: vmov.i8 d1 -; CHECK-NEXT: vstmia r0, {d0, d1} -; CHECK-NEXT: vstmia r0, {d0, d1} +; CHECK-LABEL: t9: +; CHECK: vmov.i32 d16, #0x0 +; CHECK-NEXT: vst1.64 {d16, d17}, [r0:128] +; CHECK-NEXT: vorr d17, d16, d16 +; CHECK-NEXT: vst1.64 {d16, d17}, [r0:128] %3 = bitcast double 0.000000e+00 to <2 x float> ; <<2 x float>> [#uses=2] %4 = shufflevector <2 x float> %3, <2 x float> undef, <4 x i32> ; <<4 x float>> [#uses=1] store <4 x float> %4, <4 x float>* undef, align 16 @@ -268,10 +271,10 @@ define arm_aapcs_vfpcc float @t9(%0* nocapture, %3* nocapture) nounwind { ; PR7162 define arm_aapcs_vfpcc i32 @t10() nounwind { entry: -; CHECK: t10: -; CHECK: vmov.i32 q1, #0x3F000000 -; CHECK: vmov d0, d1 -; CHECK: vmla.f32 q0, q0, d0[0] +; CHECK-LABEL: t10: +; CHECK: vmov.i32 q[[Q0:[0-9]+]], #0x3f000000 +; CHECK: vmul.f32 q8, q8, d[[DREG:[0-1]+]] +; CHECK: vadd.f32 q8, q8, q8 %0 = shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>> [#uses=1] %1 = insertelement <4 x float> %0, float undef, i32 1 ; <<4 x float>> [#uses=1] %2 = insertelement <4 x float> %1, float undef, i32 2 ; <<4 x float>> [#uses=1] @@ -304,44 +307,43 @@ bb14: ; preds = %bb6 ; This test crashes the coalescer because live variables were not updated properly. define <8 x i8> @t11(i8* %A1, i8* %A2, i8* %A3, i8* %A4, i8* %A5, i8* %A6, i8* %A7, i8* %A8, i8* %B) nounwind { - %tmp1d = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A4) ; <%struct.__neon_int8x8x3_t> [#uses=1] + %tmp1d = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A4, i32 1) ; <%struct.__neon_int8x8x3_t> [#uses=1] %tmp2d = extractvalue %struct.__neon_int8x8x3_t %tmp1d, 0 ; <<8 x i8>> [#uses=1] - %tmp1f = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A6) ; <%struct.__neon_int8x8x3_t> [#uses=1] + %tmp1f = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A6, i32 1) ; <%struct.__neon_int8x8x3_t> [#uses=1] %tmp2f = extractvalue %struct.__neon_int8x8x3_t %tmp1f, 0 ; <<8 x i8>> [#uses=1] %tmp2bd = add <8 x i8> zeroinitializer, %tmp2d ; <<8 x i8>> [#uses=1] %tmp2abcd = mul <8 x i8> zeroinitializer, %tmp2bd ; <<8 x i8>> [#uses=1] %tmp2ef = sub <8 x i8> zeroinitializer, %tmp2f ; <<8 x i8>> [#uses=1] %tmp2efgh = mul <8 x i8> %tmp2ef, undef ; <<8 x i8>> [#uses=2] - call void @llvm.arm.neon.vst3.v8i8(i8* %A2, <8 x i8> undef, <8 x i8> undef, <8 x i8> %tmp2efgh) + call void @llvm.arm.neon.vst3.v8i8(i8* %A2, <8 x i8> undef, <8 x i8> undef, <8 x i8> %tmp2efgh, i32 1) %tmp2 = sub <8 x i8> %tmp2efgh, %tmp2abcd ; <<8 x i8>> [#uses=1] %tmp7 = mul <8 x i8> undef, %tmp2 ; <<8 x i8>> [#uses=1] - tail call void @llvm.arm.neon.vst3.v8i8(i8* %B, <8 x i8> undef, <8 x i8> undef, <8 x i8> %tmp7) + tail call void @llvm.arm.neon.vst3.v8i8(i8* %B, <8 x i8> undef, <8 x i8> undef, <8 x i8> %tmp7, i32 1) ret <8 x i8> undef } -declare <4 x i32> @llvm.arm.neon.vld1.v4i32(i8*) nounwind readonly - -declare <8 x i16> @llvm.arm.neon.vld1.v8i16(i8*) nounwind readonly +declare <4 x i32> @llvm.arm.neon.vld1.v4i32(i8*, i32) nounwind readonly -declare <4 x i32> @llvm.arm.neon.vmovls.v4i32(<4 x i16>) nounwind readnone +declare <8 x i16> @llvm.arm.neon.vld1.v8i16(i8*, i32) nounwind readonly declare <4 x i16> @llvm.arm.neon.vshiftn.v4i16(<4 x i32>, <4 x i32>) nounwind readnone -declare void @llvm.arm.neon.vst1.v4i32(i8*, <4 x i32>) nounwind +declare void @llvm.arm.neon.vst1.v4i32(i8*, <4 x i32>, i32) nounwind -declare void @llvm.arm.neon.vst1.v8i16(i8*, <8 x i16>) nounwind +declare void @llvm.arm.neon.vst1.v8i16(i8*, <8 x i16>, i32) nounwind -declare void @llvm.arm.neon.vst3.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>) nounwind +declare void @llvm.arm.neon.vst3.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32) +nounwind -declare %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8*) nounwind readonly +declare %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8*, i32) nounwind readonly -declare %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i8*) nounwind readonly +declare %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i8*, i32) nounwind readonly -declare %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8*, <8 x i8>, <8 x i8>, i32) nounwind readonly +declare %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8*, <8 x i8>, <8 x i8>, i32, i32) nounwind readonly -declare %struct.__neon_int16x8x2_t @llvm.arm.neon.vld2lane.v8i16(i8*, <8 x i16>, <8 x i16>, i32) nounwind readonly +declare %struct.__neon_int16x8x2_t @llvm.arm.neon.vld2lane.v8i16(i8*, <8 x i16>, <8 x i16>, i32, i32) nounwind readonly -declare void @llvm.arm.neon.vst2.v4i32(i8*, <4 x i32>, <4 x i32>) nounwind +declare void @llvm.arm.neon.vst2.v4i32(i8*, <4 x i32>, <4 x i32>, i32) nounwind declare <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float>) nounwind readnone