From 04d6c289ab28114af5471c4dc38cbf7b7127d3c3 Mon Sep 17 00:00:00 2001 From: Bob Wilson Date: Sun, 29 Aug 2010 05:57:34 +0000 Subject: [PATCH] Remove NEON vaddl, vaddw, vsubl, and vsubw intrinsics. Instead, use llvm IR add/sub operations with one or both operands sign- or zero-extended. Auto-upgrade the old intrinsics. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@112416 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/IntrinsicsARM.td | 12 ---- lib/Target/ARM/ARMInstrNEON.td | 91 +++++++++++++++++++---------- lib/VMCore/AutoUpgrade.cpp | 34 ++++++++++- test/Bitcode/neon-intrinsics.ll | 48 +++++++++++++++ test/Bitcode/neon-intrinsics.ll.bc | Bin 2884 -> 3796 bytes test/CodeGen/ARM/vadd.ll | 82 +++++++++++++------------- test/CodeGen/ARM/vsub.ll | 82 +++++++++++++------------- 7 files changed, 225 insertions(+), 124 deletions(-) diff --git a/include/llvm/IntrinsicsARM.td b/include/llvm/IntrinsicsARM.td index 7be283b29c8..2528eaa9738 100644 --- a/include/llvm/IntrinsicsARM.td +++ b/include/llvm/IntrinsicsARM.td @@ -73,10 +73,6 @@ let TargetPrefix = "arm" in { // All intrinsics start with "llvm.arm.". [LLVMTruncatedElementVectorType<0>, LLVMTruncatedElementVectorType<0>], [IntrNoMem]>; - class Neon_2Arg_Wide_Intrinsic - : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, LLVMTruncatedElementVectorType<0>], - [IntrNoMem]>; class Neon_3Arg_Intrinsic : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], @@ -128,10 +124,6 @@ let Properties = [IntrNoMem, Commutative] in { def int_arm_neon_vqaddu : Neon_2Arg_Intrinsic; def int_arm_neon_vaddhn : Neon_2Arg_Narrow_Intrinsic; def int_arm_neon_vraddhn : Neon_2Arg_Narrow_Intrinsic; - def int_arm_neon_vaddls : Neon_2Arg_Long_Intrinsic; - def int_arm_neon_vaddlu : Neon_2Arg_Long_Intrinsic; - def int_arm_neon_vaddws : Neon_2Arg_Wide_Intrinsic; - def int_arm_neon_vaddwu : Neon_2Arg_Wide_Intrinsic; // Vector Multiply. def int_arm_neon_vmulp : Neon_2Arg_Intrinsic; @@ -172,10 +164,6 @@ def int_arm_neon_vqsubs : Neon_2Arg_Intrinsic; def int_arm_neon_vqsubu : Neon_2Arg_Intrinsic; def int_arm_neon_vsubhn : Neon_2Arg_Narrow_Intrinsic; def int_arm_neon_vrsubhn : Neon_2Arg_Narrow_Intrinsic; -def int_arm_neon_vsubls : Neon_2Arg_Long_Intrinsic; -def int_arm_neon_vsublu : Neon_2Arg_Long_Intrinsic; -def int_arm_neon_vsubws : Neon_2Arg_Wide_Intrinsic; -def int_arm_neon_vsubwu : Neon_2Arg_Wide_Intrinsic; // Vector Absolute Compare. let TargetPrefix = "arm" in { diff --git a/lib/Target/ARM/ARMInstrNEON.td b/lib/Target/ARM/ARMInstrNEON.td index 88d606c0f54..741df7e9d4a 100644 --- a/lib/Target/ARM/ARMInstrNEON.td +++ b/lib/Target/ARM/ARMInstrNEON.td @@ -1294,6 +1294,19 @@ class N3VNInt op21_20, bits<4> op11_8, bit op4, let isCommutable = Commutable; } +// Long 3-register operations. +class N3VL op21_20, bits<4> op11_8, bit op4, + InstrItinClass itin, string OpcodeStr, string Dt, + ValueType TyQ, ValueType TyD, SDNode OpNode, SDNode ExtOp, + bit Commutable> + : N3V { + let isCommutable = Commutable; +} + // Long 3-register intrinsics. class N3VLInt op21_20, bits<4> op11_8, bit op4, InstrItinClass itin, string OpcodeStr, string Dt, @@ -1325,14 +1338,15 @@ class N3VLIntSL16 op21_20, bits<4> op11_8, (OpTy (NEONvduplane (OpTy DPR_8:$src2), imm:$lane)))))]>; -// Wide 3-register intrinsics. -class N3VWInt op21_20, bits<4> op11_8, bit op4, - string OpcodeStr, string Dt, ValueType TyQ, ValueType TyD, - Intrinsic IntOp, bit Commutable> +// Wide 3-register operations. +class N3VW op21_20, bits<4> op11_8, bit op4, + string OpcodeStr, string Dt, ValueType TyQ, ValueType TyD, + SDNode OpNode, SDNode ExtOp, bit Commutable> : N3V { + [(set QPR:$dst, (OpNode (TyQ QPR:$src1), + (TyQ (ExtOp (TyD DPR:$src2)))))]> { let isCommutable = Commutable; } @@ -1684,6 +1698,23 @@ multiclass N3VNInt_HSD op11_8, bit op4, } +// Neon Long 3-register vector operations. + +multiclass N3VL_QHS op11_8, bit op4, + InstrItinClass itin16, InstrItinClass itin32, + string OpcodeStr, string Dt, + SDNode OpNode, SDNode ExtOp, bit Commutable = 0> { + def v4i32 : N3VL; + def v2i64 : N3VL; + def v8i16 : N3VL; +} + // Neon Long 3-register vector intrinsics. // First with only element sizes of 16 and 32 bits: @@ -1723,18 +1754,18 @@ multiclass N3VLInt_QHS op11_8, bit op4, // Neon Wide 3-register vector intrinsics, // source operand element sizes of 8, 16 and 32 bits: -multiclass N3VWInt_QHS op11_8, bit op4, - string OpcodeStr, string Dt, - Intrinsic IntOp, bit Commutable = 0> { - def v8i16 : N3VWInt; - def v4i32 : N3VWInt; - def v2i64 : N3VWInt; +multiclass N3VW_QHS op11_8, bit op4, + string OpcodeStr, string Dt, + SDNode OpNode, SDNode ExtOp, bit Commutable = 0> { + def v8i16 : N3VW; + def v4i32 : N3VW; + def v2i64 : N3VW; } @@ -2073,13 +2104,13 @@ def VADDfd : N3VD<0, 0, 0b00, 0b1101, 0, IIC_VBIND, "vadd", "f32", def VADDfq : N3VQ<0, 0, 0b00, 0b1101, 0, IIC_VBINQ, "vadd", "f32", v4f32, v4f32, fadd, 1>; // VADDL : Vector Add Long (Q = D + D) -defm VADDLs : N3VLInt_QHS<0,1,0b0000,0, IIC_VSHLiD, IIC_VSHLiD, - "vaddl", "s", int_arm_neon_vaddls, 1>; -defm VADDLu : N3VLInt_QHS<1,1,0b0000,0, IIC_VSHLiD, IIC_VSHLiD, - "vaddl", "u", int_arm_neon_vaddlu, 1>; +defm VADDLs : N3VL_QHS<0,1,0b0000,0, IIC_VSHLiD, IIC_VSHLiD, + "vaddl", "s", add, sext, 1>; +defm VADDLu : N3VL_QHS<1,1,0b0000,0, IIC_VSHLiD, IIC_VSHLiD, + "vaddl", "u", add, zext, 1>; // VADDW : Vector Add Wide (Q = Q + D) -defm VADDWs : N3VWInt_QHS<0,1,0b0001,0, "vaddw", "s", int_arm_neon_vaddws, 0>; -defm VADDWu : N3VWInt_QHS<1,1,0b0001,0, "vaddw", "u", int_arm_neon_vaddwu, 0>; +defm VADDWs : N3VW_QHS<0,1,0b0001,0, "vaddw", "s", add, sext, 0>; +defm VADDWu : N3VW_QHS<1,1,0b0001,0, "vaddw", "u", add, zext, 0>; // VHADD : Vector Halving Add defm VHADDs : N3VInt_QHS<0, 0, 0b0000, 0, N3RegFrm, IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q, IIC_VBINi4Q, @@ -2324,13 +2355,13 @@ def VSUBfd : N3VD<0, 0, 0b10, 0b1101, 0, IIC_VBIND, "vsub", "f32", def VSUBfq : N3VQ<0, 0, 0b10, 0b1101, 0, IIC_VBINQ, "vsub", "f32", v4f32, v4f32, fsub, 0>; // VSUBL : Vector Subtract Long (Q = D - D) -defm VSUBLs : N3VLInt_QHS<0,1,0b0010,0, IIC_VSHLiD, IIC_VSHLiD, - "vsubl", "s", int_arm_neon_vsubls, 1>; -defm VSUBLu : N3VLInt_QHS<1,1,0b0010,0, IIC_VSHLiD, IIC_VSHLiD, - "vsubl", "u", int_arm_neon_vsublu, 1>; +defm VSUBLs : N3VL_QHS<0,1,0b0010,0, IIC_VSHLiD, IIC_VSHLiD, + "vsubl", "s", sub, sext, 0>; +defm VSUBLu : N3VL_QHS<1,1,0b0010,0, IIC_VSHLiD, IIC_VSHLiD, + "vsubl", "u", sub, zext, 0>; // VSUBW : Vector Subtract Wide (Q = Q - D) -defm VSUBWs : N3VWInt_QHS<0,1,0b0011,0, "vsubw", "s", int_arm_neon_vsubws, 0>; -defm VSUBWu : N3VWInt_QHS<1,1,0b0011,0, "vsubw", "u", int_arm_neon_vsubwu, 0>; +defm VSUBWs : N3VW_QHS<0,1,0b0011,0, "vsubw", "s", sub, sext, 0>; +defm VSUBWu : N3VW_QHS<1,1,0b0011,0, "vsubw", "u", sub, zext, 0>; // VHSUB : Vector Halving Subtract defm VHSUBs : N3VInt_QHS<0, 0, 0b0010, 0, N3RegFrm, IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q, @@ -2559,7 +2590,7 @@ def VABDfq : N3VQInt<1, 0, 0b10, 0b1101, 0, N3RegFrm, IIC_VBINQ, defm VABDLs : N3VLInt_QHS<0,1,0b0111,0, IIC_VSUBi4Q, IIC_VSUBi4Q, "vabdl", "s", int_arm_neon_vabdls, 0>; defm VABDLu : N3VLInt_QHS<1,1,0b0111,0, IIC_VSUBi4Q, IIC_VSUBi4Q, - "vabdl", "u", int_arm_neon_vabdlu, 0>; + "vabdl", "u", int_arm_neon_vabdlu, 0>; // VABA : Vector Absolute Difference and Accumulate defm VABAs : N3VInt3_QHS<0,0,0b0111,1, IIC_VABAD, IIC_VABAQ, diff --git a/lib/VMCore/AutoUpgrade.cpp b/lib/VMCore/AutoUpgrade.cpp index 052fd2d5b1a..62a46259ac2 100644 --- a/lib/VMCore/AutoUpgrade.cpp +++ b/lib/VMCore/AutoUpgrade.cpp @@ -79,8 +79,17 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) { return true; } } else if (Name.compare(5, 9, "arm.neon.", 9) == 0) { - if (Name.compare(14, 7, "vmovls.", 7) == 0 || - Name.compare(14, 7, "vmovlu.", 7) == 0) { + if (((Name.compare(14, 5, "vmovl", 5) == 0 || + Name.compare(14, 5, "vaddl", 5) == 0 || + Name.compare(14, 5, "vsubl", 5) == 0) && + (Name.compare(19, 2, "s.", 2) == 0 || + Name.compare(19, 2, "u.", 2) == 0)) || + + ((Name.compare(14, 5, "vaddw", 5) == 0 || + Name.compare(14, 5, "vsubw", 5) == 0) && + (Name.compare(19, 2, "s.", 2) == 0 || + Name.compare(19, 2, "u.", 2) == 0))) { + // Calls to these are transformed into IR without intrinsics. NewFn = 0; return true; @@ -371,6 +380,27 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) { } else if (Name.compare(14, 7, "vmovlu.", 7) == 0) { NewI = new ZExtInst(CI->getArgOperand(0), CI->getType(), "upgraded." + CI->getName(), CI); + + } else if (Name.compare(14, 4, "vadd", 4) == 0 || + Name.compare(14, 4, "vsub", 4) == 0) { + // Extend one (vaddw/vsubw) or both (vaddl/vsubl) operands. + Value *V0 = CI->getArgOperand(0); + Value *V1 = CI->getArgOperand(1); + if (Name.at(19) == 's') { + if (Name.at(18) == 'l') + V0 = new SExtInst(CI->getArgOperand(0), CI->getType(), "", CI); + V1 = new SExtInst(CI->getArgOperand(1), CI->getType(), "", CI); + } else { + assert(Name.at(19) == 'u' && "unexpected vadd/vsub intrinsic"); + if (Name.at(18) == 'l') + V0 = new ZExtInst(CI->getArgOperand(0), CI->getType(), "", CI); + V1 = new ZExtInst(CI->getArgOperand(1), CI->getType(), "", CI); + } + if (Name.compare(14, 4, "vadd", 4) == 0) + NewI = BinaryOperator::CreateAdd(V0, V1,"upgraded."+CI->getName(),CI); + else + NewI = BinaryOperator::CreateSub(V0, V1,"upgraded."+CI->getName(),CI); + } else { llvm_unreachable("Unknown arm.neon function for CallInst upgrade."); } diff --git a/test/Bitcode/neon-intrinsics.ll b/test/Bitcode/neon-intrinsics.ll index fe76514ee01..d6d03d0970f 100644 --- a/test/Bitcode/neon-intrinsics.ll +++ b/test/Bitcode/neon-intrinsics.ll @@ -28,6 +28,54 @@ ; CHECK-NOT: arm.neon.vmovlu.v2i64 ; CHECK: zext <2 x i32> +; vaddl/vaddw should be auto-upgraded to add with sext/zext + +; CHECK: vaddls16 +; CHECK-NOT: arm.neon.vaddls.v4i32 +; CHECK: sext <4 x i16> +; CHECK-NEXT: sext <4 x i16> +; CHECK-NEXT: add <4 x i32> + +; CHECK: vaddlu32 +; CHECK-NOT: arm.neon.vaddlu.v2i64 +; CHECK: zext <2 x i32> +; CHECK-NEXT: zext <2 x i32> +; CHECK-NEXT: add <2 x i64> + +; CHECK: vaddws8 +; CHECK-NOT: arm.neon.vaddws.v8i16 +; CHECK: sext <8 x i8> +; CHECK-NEXT: add <8 x i16> + +; CHECK: vaddwu16 +; CHECK-NOT: arm.neon.vaddwu.v4i32 +; CHECK: zext <4 x i16> +; CHECK-NEXT: add <4 x i32> + +; vsubl/vsubw should be auto-upgraded to sub with sext/zext + +; CHECK: vsubls16 +; CHECK-NOT: arm.neon.vsubls.v4i32 +; CHECK: sext <4 x i16> +; CHECK-NEXT: sext <4 x i16> +; CHECK-NEXT: sub <4 x i32> + +; CHECK: vsublu32 +; CHECK-NOT: arm.neon.vsublu.v2i64 +; CHECK: zext <2 x i32> +; CHECK-NEXT: zext <2 x i32> +; CHECK-NEXT: sub <2 x i64> + +; CHECK: vsubws8 +; CHECK-NOT: arm.neon.vsubws.v8i16 +; CHECK: sext <8 x i8> +; CHECK-NEXT: sub <8 x i16> + +; CHECK: vsubwu16 +; CHECK-NOT: arm.neon.vsubwu.v4i32 +; CHECK: zext <4 x i16> +; CHECK-NEXT: sub <4 x i32> + ; vld* and vst* intrinsic calls need an alignment argument (defaulted to 1) ; CHECK: vld1i8 diff --git a/test/Bitcode/neon-intrinsics.ll.bc b/test/Bitcode/neon-intrinsics.ll.bc index c324aeef18cc3c811e06edf2768945afa9c01725..bc01eb540e6c2621b85d1ab3fa8a4104d15398c3 100644 GIT binary patch literal 3796 zcmb_e4Ny~87CuQH$pZpONaBRu01pC8>OeP1SwX8#45*2=E{SP%ra!z`*{aiMaHMtJ z788ipA5`VPgsOf!+amlD=tVPGqZ{P z>)h(4e3LKLbX{stl#wb+ilHLkoYIu53EAA@%-pT{qW*r_x{kIEW8WkXaV#pt!rxTy zzj5RQXwM`w5vYo#|H9v$5CpJN0ISf(kk_9~Ro8o=aJe}txo~+Oc~~>X&z&zPzR)#N z`9U(2Db`R~9`eaX zN?E!A!gQrJe6dU&rnQRjMSw6x7lr8#ZJ3f$VOpoy=T<4&Yqa$NE75R_bT5|?`|}8D znUoB>vWmj4T3y)nhC1xpRe1rE3sWwgSVj{3YFL=AfojsIUFu5Tr^`~bFVF^S-P%ws zB|Y-z1!&rPHl6P=wkbaG7@M?x+i)dgyn|VytotU6lvy#xSnb{Vya<N{gP?r!2!yr_80_wFGlvK!v z6oix$yeLn#A3!6qNj~y0RfFB~4K3wQU0B_kuGb$*!v!1;3Z~h+XJ`-kGsK^vJrv9k z|2x<-sk8dDErgep@cE6N?ED+PB(PQp-Ja~5J=t^_bIm8= zm$^^mulN&JiX?ueu)0q76+wXYy~*2+#h95d389!T8~T6w;u8jIzodxah-HT(DjvKk zYIqkm;a6yyOPo9 zJH^~=LLwg~$>d+(Ko@p6D7@Nn_J~{yz!;CuNta>r5i${vi*`a%N;?#!90yBE)QC6| z{LYliG8APfB1KUc@0ccx?vLP<1(k9ARq4e2I^Nft+dr~L|jKKSt4S9kvY=4(e6 zAC7L^e&y{|FVFk+z{E@EChpe0eeJ6)SDrpJGFjGrbLE#MtwZCt^;d8I^W4Cef0X|9 z#2?mg&#e3AT>;ck0Qf=z!Ww%(w>D!7dF8adQBu)(O}c7qP2=ifU)9K-gU<%?kyY6yXsd`$}xLtZDg6po}nzVos7I`Ef%~#r?0+vW$%e>^SLdN$UfOXq5-VZ-IQwuzs%bKDE8J$ZMO2S@$w zMa?BwQ!h&Uoj(j%^{YC~-41ngMN&)Xx%0N-AR>E4{h3_0j19|n?jJd+4OHG$9-3@v z;DD;RB)Z9=yUej4+;+DaMf-(z*zh?<-{g3LOMbw`kY^04DI_!JnTbA! zeV|g){gw`ML3>Q+)d$is-Dy5>cso)6VI~JU2;Zt`-_xg0@+xpcW8=%8edbk zHPUj-&eo!1*NBT#HOmm@8(lMRYow41o+NH4U=vAV>Vn|Hb@oBKrd!6A>6wYorq|_c zRkYXH+Q5m+P`b{<^-s6-SKTt7Y#r$2Ff18HZM)OdOat^rJX{bA&4QeGSY)hYFU?Iv z{?(f8HIX^>_QCb#~U363pqStJW-tUD*EtL~Bxy literal 2884 zcmb_ddrVVj6hF7Uy}iJ#l!C5Eowu|#xXD%!6^%2c6=+plK^EN>wMAXgKY-f-Y$R9X{uCQPDXDE+*)L+ng-!b-vqHYMh(IeMxh_e&6r> ze&_toxvkma`&P>VoRR=gN&poA$lbcF8SS)PKQZ!QotnG$nFl(}1( z8kf$hZIP*y_14I8t*$Lk;NrC_^z!!hn1Y7d2GfyV3VCd5oJyQWM)F_yub{Fe2M3ix`o8gss$dNwni*X|J)=w7QiwXVf@py1q+8 zsW}~=AY(1EpX^&px^=BaiTjKIt?cBKQofyfp)f1$qmwbF`Sn*dlzDvfNjP05#Od`0 zn{8@AUgd|s=#w&SQ)cW?m96yD=wN*!$7PmJ++>W1^Yck9-F6SPgH=t5k!(!lG?|c8 zYE)PGla^KaHwab!>hvmq!-i_DlC5$If*LJ@V@j(g3yy((@}|?VaT!TTl3Y7e@3XV@ z4r9{#TqR3ya>gOfH1h4W{#rX%$1B;mV;SO6U0jymVNWU{sL1QaNQBkm2P`TD2>5hZ zTPVPu0B4&3fNFwvBLgNpFzP!2SlpE8hhg6UeC0bO5^|%im}L@gEEQ2Kxp7$YD2m4$ z&Nhk%nvfio(E=VA^+!V_%P~Rm#4Qa99Zi6s^w7|WJcyw}7}DQBdbp8@%#t8R8)6j<7~_z(k7eZ&#ECNy@YX`SebM!7Tie9O1IpN+ zuY7}suL}}B#3$V6GyN}owNK6$@P^t&sXqpYR-A`c_kC|vOLmj-5vw7!;P?|C(VLM9 z-z)~+S@bEhSm-p#@SJ$5TGALAx9H%wiJp>uf1Dfy_t3rmp?j$kDFp+IWx*4O^$&HV zEB`AtUewJPMP40z#~@!#!Mn`bU1>0H81T$uAG4&EsgH0$>4?uW>_xpy`7&`h$omwi z=Lo5%16v&JdJbFMd!A!X8 z%um@LQ~FlaA*pN4tPR*e?(5hz#jBpJ&5k_W53_w^Aq!E z9xXUR-r^u4?g_XIu&m!FOzU5{-U|F(z-U&`v~_A=A$})+h>6nfYK^LC{Omcy&&|H} z$-&=OtvPw~&FV>&oeNiA-hOJ%*geO4<~8@+dt>{xAD3RvDZA90e(2`ZJ;s`I*YBiU zz4J%&@uhdi-P-;2{MGTrzwVO4avoqi@|M&lPd(gcYU@*UrWGGfzmWLbwkPVAi;TVvVMo8`|x?!W>Ytov%%uy$Way+}e1>%2v(Y(owX367Sy F{sx?;IP?Gj diff --git a/test/CodeGen/ARM/vadd.ll b/test/CodeGen/ARM/vadd.ll index 9bb8bf56104..a830e968ff7 100644 --- a/test/CodeGen/ARM/vadd.ll +++ b/test/CodeGen/ARM/vadd.ll @@ -157,8 +157,10 @@ define <8 x i16> @vaddls8(<8 x i8>* %A, <8 x i8>* %B) nounwind { ;CHECK: vaddl.s8 %tmp1 = load <8 x i8>* %A %tmp2 = load <8 x i8>* %B - %tmp3 = call <8 x i16> @llvm.arm.neon.vaddls.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2) - ret <8 x i16> %tmp3 + %tmp3 = sext <8 x i8> %tmp1 to <8 x i16> + %tmp4 = sext <8 x i8> %tmp2 to <8 x i16> + %tmp5 = add <8 x i16> %tmp3, %tmp4 + ret <8 x i16> %tmp5 } define <4 x i32> @vaddls16(<4 x i16>* %A, <4 x i16>* %B) nounwind { @@ -166,8 +168,10 @@ define <4 x i32> @vaddls16(<4 x i16>* %A, <4 x i16>* %B) nounwind { ;CHECK: vaddl.s16 %tmp1 = load <4 x i16>* %A %tmp2 = load <4 x i16>* %B - %tmp3 = call <4 x i32> @llvm.arm.neon.vaddls.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2) - ret <4 x i32> %tmp3 + %tmp3 = sext <4 x i16> %tmp1 to <4 x i32> + %tmp4 = sext <4 x i16> %tmp2 to <4 x i32> + %tmp5 = add <4 x i32> %tmp3, %tmp4 + ret <4 x i32> %tmp5 } define <2 x i64> @vaddls32(<2 x i32>* %A, <2 x i32>* %B) nounwind { @@ -175,8 +179,10 @@ define <2 x i64> @vaddls32(<2 x i32>* %A, <2 x i32>* %B) nounwind { ;CHECK: vaddl.s32 %tmp1 = load <2 x i32>* %A %tmp2 = load <2 x i32>* %B - %tmp3 = call <2 x i64> @llvm.arm.neon.vaddls.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2) - ret <2 x i64> %tmp3 + %tmp3 = sext <2 x i32> %tmp1 to <2 x i64> + %tmp4 = sext <2 x i32> %tmp2 to <2 x i64> + %tmp5 = add <2 x i64> %tmp3, %tmp4 + ret <2 x i64> %tmp5 } define <8 x i16> @vaddlu8(<8 x i8>* %A, <8 x i8>* %B) nounwind { @@ -184,8 +190,10 @@ define <8 x i16> @vaddlu8(<8 x i8>* %A, <8 x i8>* %B) nounwind { ;CHECK: vaddl.u8 %tmp1 = load <8 x i8>* %A %tmp2 = load <8 x i8>* %B - %tmp3 = call <8 x i16> @llvm.arm.neon.vaddlu.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2) - ret <8 x i16> %tmp3 + %tmp3 = zext <8 x i8> %tmp1 to <8 x i16> + %tmp4 = zext <8 x i8> %tmp2 to <8 x i16> + %tmp5 = add <8 x i16> %tmp3, %tmp4 + ret <8 x i16> %tmp5 } define <4 x i32> @vaddlu16(<4 x i16>* %A, <4 x i16>* %B) nounwind { @@ -193,8 +201,10 @@ define <4 x i32> @vaddlu16(<4 x i16>* %A, <4 x i16>* %B) nounwind { ;CHECK: vaddl.u16 %tmp1 = load <4 x i16>* %A %tmp2 = load <4 x i16>* %B - %tmp3 = call <4 x i32> @llvm.arm.neon.vaddlu.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2) - ret <4 x i32> %tmp3 + %tmp3 = zext <4 x i16> %tmp1 to <4 x i32> + %tmp4 = zext <4 x i16> %tmp2 to <4 x i32> + %tmp5 = add <4 x i32> %tmp3, %tmp4 + ret <4 x i32> %tmp5 } define <2 x i64> @vaddlu32(<2 x i32>* %A, <2 x i32>* %B) nounwind { @@ -202,25 +212,20 @@ define <2 x i64> @vaddlu32(<2 x i32>* %A, <2 x i32>* %B) nounwind { ;CHECK: vaddl.u32 %tmp1 = load <2 x i32>* %A %tmp2 = load <2 x i32>* %B - %tmp3 = call <2 x i64> @llvm.arm.neon.vaddlu.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2) - ret <2 x i64> %tmp3 + %tmp3 = zext <2 x i32> %tmp1 to <2 x i64> + %tmp4 = zext <2 x i32> %tmp2 to <2 x i64> + %tmp5 = add <2 x i64> %tmp3, %tmp4 + ret <2 x i64> %tmp5 } -declare <8 x i16> @llvm.arm.neon.vaddls.v8i16(<8 x i8>, <8 x i8>) nounwind readnone -declare <4 x i32> @llvm.arm.neon.vaddls.v4i32(<4 x i16>, <4 x i16>) nounwind readnone -declare <2 x i64> @llvm.arm.neon.vaddls.v2i64(<2 x i32>, <2 x i32>) nounwind readnone - -declare <8 x i16> @llvm.arm.neon.vaddlu.v8i16(<8 x i8>, <8 x i8>) nounwind readnone -declare <4 x i32> @llvm.arm.neon.vaddlu.v4i32(<4 x i16>, <4 x i16>) nounwind readnone -declare <2 x i64> @llvm.arm.neon.vaddlu.v2i64(<2 x i32>, <2 x i32>) nounwind readnone - define <8 x i16> @vaddws8(<8 x i16>* %A, <8 x i8>* %B) nounwind { ;CHECK: vaddws8: ;CHECK: vaddw.s8 %tmp1 = load <8 x i16>* %A %tmp2 = load <8 x i8>* %B - %tmp3 = call <8 x i16> @llvm.arm.neon.vaddws.v8i16(<8 x i16> %tmp1, <8 x i8> %tmp2) - ret <8 x i16> %tmp3 + %tmp3 = sext <8 x i8> %tmp2 to <8 x i16> + %tmp4 = add <8 x i16> %tmp1, %tmp3 + ret <8 x i16> %tmp4 } define <4 x i32> @vaddws16(<4 x i32>* %A, <4 x i16>* %B) nounwind { @@ -228,8 +233,9 @@ define <4 x i32> @vaddws16(<4 x i32>* %A, <4 x i16>* %B) nounwind { ;CHECK: vaddw.s16 %tmp1 = load <4 x i32>* %A %tmp2 = load <4 x i16>* %B - %tmp3 = call <4 x i32> @llvm.arm.neon.vaddws.v4i32(<4 x i32> %tmp1, <4 x i16> %tmp2) - ret <4 x i32> %tmp3 + %tmp3 = sext <4 x i16> %tmp2 to <4 x i32> + %tmp4 = add <4 x i32> %tmp1, %tmp3 + ret <4 x i32> %tmp4 } define <2 x i64> @vaddws32(<2 x i64>* %A, <2 x i32>* %B) nounwind { @@ -237,8 +243,9 @@ define <2 x i64> @vaddws32(<2 x i64>* %A, <2 x i32>* %B) nounwind { ;CHECK: vaddw.s32 %tmp1 = load <2 x i64>* %A %tmp2 = load <2 x i32>* %B - %tmp3 = call <2 x i64> @llvm.arm.neon.vaddws.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2) - ret <2 x i64> %tmp3 + %tmp3 = sext <2 x i32> %tmp2 to <2 x i64> + %tmp4 = add <2 x i64> %tmp1, %tmp3 + ret <2 x i64> %tmp4 } define <8 x i16> @vaddwu8(<8 x i16>* %A, <8 x i8>* %B) nounwind { @@ -246,8 +253,9 @@ define <8 x i16> @vaddwu8(<8 x i16>* %A, <8 x i8>* %B) nounwind { ;CHECK: vaddw.u8 %tmp1 = load <8 x i16>* %A %tmp2 = load <8 x i8>* %B - %tmp3 = call <8 x i16> @llvm.arm.neon.vaddwu.v8i16(<8 x i16> %tmp1, <8 x i8> %tmp2) - ret <8 x i16> %tmp3 + %tmp3 = zext <8 x i8> %tmp2 to <8 x i16> + %tmp4 = add <8 x i16> %tmp1, %tmp3 + ret <8 x i16> %tmp4 } define <4 x i32> @vaddwu16(<4 x i32>* %A, <4 x i16>* %B) nounwind { @@ -255,8 +263,9 @@ define <4 x i32> @vaddwu16(<4 x i32>* %A, <4 x i16>* %B) nounwind { ;CHECK: vaddw.u16 %tmp1 = load <4 x i32>* %A %tmp2 = load <4 x i16>* %B - %tmp3 = call <4 x i32> @llvm.arm.neon.vaddwu.v4i32(<4 x i32> %tmp1, <4 x i16> %tmp2) - ret <4 x i32> %tmp3 + %tmp3 = zext <4 x i16> %tmp2 to <4 x i32> + %tmp4 = add <4 x i32> %tmp1, %tmp3 + ret <4 x i32> %tmp4 } define <2 x i64> @vaddwu32(<2 x i64>* %A, <2 x i32>* %B) nounwind { @@ -264,14 +273,7 @@ define <2 x i64> @vaddwu32(<2 x i64>* %A, <2 x i32>* %B) nounwind { ;CHECK: vaddw.u32 %tmp1 = load <2 x i64>* %A %tmp2 = load <2 x i32>* %B - %tmp3 = call <2 x i64> @llvm.arm.neon.vaddwu.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2) - ret <2 x i64> %tmp3 + %tmp3 = zext <2 x i32> %tmp2 to <2 x i64> + %tmp4 = add <2 x i64> %tmp1, %tmp3 + ret <2 x i64> %tmp4 } - -declare <8 x i16> @llvm.arm.neon.vaddws.v8i16(<8 x i16>, <8 x i8>) nounwind readnone -declare <4 x i32> @llvm.arm.neon.vaddws.v4i32(<4 x i32>, <4 x i16>) nounwind readnone -declare <2 x i64> @llvm.arm.neon.vaddws.v2i64(<2 x i64>, <2 x i32>) nounwind readnone - -declare <8 x i16> @llvm.arm.neon.vaddwu.v8i16(<8 x i16>, <8 x i8>) nounwind readnone -declare <4 x i32> @llvm.arm.neon.vaddwu.v4i32(<4 x i32>, <4 x i16>) nounwind readnone -declare <2 x i64> @llvm.arm.neon.vaddwu.v2i64(<2 x i64>, <2 x i32>) nounwind readnone diff --git a/test/CodeGen/ARM/vsub.ll b/test/CodeGen/ARM/vsub.ll index 3416de76f12..df77bb31fc8 100644 --- a/test/CodeGen/ARM/vsub.ll +++ b/test/CodeGen/ARM/vsub.ll @@ -157,8 +157,10 @@ define <8 x i16> @vsubls8(<8 x i8>* %A, <8 x i8>* %B) nounwind { ;CHECK: vsubl.s8 %tmp1 = load <8 x i8>* %A %tmp2 = load <8 x i8>* %B - %tmp3 = call <8 x i16> @llvm.arm.neon.vsubls.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2) - ret <8 x i16> %tmp3 + %tmp3 = sext <8 x i8> %tmp1 to <8 x i16> + %tmp4 = sext <8 x i8> %tmp2 to <8 x i16> + %tmp5 = sub <8 x i16> %tmp3, %tmp4 + ret <8 x i16> %tmp5 } define <4 x i32> @vsubls16(<4 x i16>* %A, <4 x i16>* %B) nounwind { @@ -166,8 +168,10 @@ define <4 x i32> @vsubls16(<4 x i16>* %A, <4 x i16>* %B) nounwind { ;CHECK: vsubl.s16 %tmp1 = load <4 x i16>* %A %tmp2 = load <4 x i16>* %B - %tmp3 = call <4 x i32> @llvm.arm.neon.vsubls.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2) - ret <4 x i32> %tmp3 + %tmp3 = sext <4 x i16> %tmp1 to <4 x i32> + %tmp4 = sext <4 x i16> %tmp2 to <4 x i32> + %tmp5 = sub <4 x i32> %tmp3, %tmp4 + ret <4 x i32> %tmp5 } define <2 x i64> @vsubls32(<2 x i32>* %A, <2 x i32>* %B) nounwind { @@ -175,8 +179,10 @@ define <2 x i64> @vsubls32(<2 x i32>* %A, <2 x i32>* %B) nounwind { ;CHECK: vsubl.s32 %tmp1 = load <2 x i32>* %A %tmp2 = load <2 x i32>* %B - %tmp3 = call <2 x i64> @llvm.arm.neon.vsubls.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2) - ret <2 x i64> %tmp3 + %tmp3 = sext <2 x i32> %tmp1 to <2 x i64> + %tmp4 = sext <2 x i32> %tmp2 to <2 x i64> + %tmp5 = sub <2 x i64> %tmp3, %tmp4 + ret <2 x i64> %tmp5 } define <8 x i16> @vsublu8(<8 x i8>* %A, <8 x i8>* %B) nounwind { @@ -184,8 +190,10 @@ define <8 x i16> @vsublu8(<8 x i8>* %A, <8 x i8>* %B) nounwind { ;CHECK: vsubl.u8 %tmp1 = load <8 x i8>* %A %tmp2 = load <8 x i8>* %B - %tmp3 = call <8 x i16> @llvm.arm.neon.vsublu.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2) - ret <8 x i16> %tmp3 + %tmp3 = zext <8 x i8> %tmp1 to <8 x i16> + %tmp4 = zext <8 x i8> %tmp2 to <8 x i16> + %tmp5 = sub <8 x i16> %tmp3, %tmp4 + ret <8 x i16> %tmp5 } define <4 x i32> @vsublu16(<4 x i16>* %A, <4 x i16>* %B) nounwind { @@ -193,8 +201,10 @@ define <4 x i32> @vsublu16(<4 x i16>* %A, <4 x i16>* %B) nounwind { ;CHECK: vsubl.u16 %tmp1 = load <4 x i16>* %A %tmp2 = load <4 x i16>* %B - %tmp3 = call <4 x i32> @llvm.arm.neon.vsublu.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2) - ret <4 x i32> %tmp3 + %tmp3 = zext <4 x i16> %tmp1 to <4 x i32> + %tmp4 = zext <4 x i16> %tmp2 to <4 x i32> + %tmp5 = sub <4 x i32> %tmp3, %tmp4 + ret <4 x i32> %tmp5 } define <2 x i64> @vsublu32(<2 x i32>* %A, <2 x i32>* %B) nounwind { @@ -202,25 +212,20 @@ define <2 x i64> @vsublu32(<2 x i32>* %A, <2 x i32>* %B) nounwind { ;CHECK: vsubl.u32 %tmp1 = load <2 x i32>* %A %tmp2 = load <2 x i32>* %B - %tmp3 = call <2 x i64> @llvm.arm.neon.vsublu.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2) - ret <2 x i64> %tmp3 + %tmp3 = zext <2 x i32> %tmp1 to <2 x i64> + %tmp4 = zext <2 x i32> %tmp2 to <2 x i64> + %tmp5 = sub <2 x i64> %tmp3, %tmp4 + ret <2 x i64> %tmp5 } -declare <8 x i16> @llvm.arm.neon.vsubls.v8i16(<8 x i8>, <8 x i8>) nounwind readnone -declare <4 x i32> @llvm.arm.neon.vsubls.v4i32(<4 x i16>, <4 x i16>) nounwind readnone -declare <2 x i64> @llvm.arm.neon.vsubls.v2i64(<2 x i32>, <2 x i32>) nounwind readnone - -declare <8 x i16> @llvm.arm.neon.vsublu.v8i16(<8 x i8>, <8 x i8>) nounwind readnone -declare <4 x i32> @llvm.arm.neon.vsublu.v4i32(<4 x i16>, <4 x i16>) nounwind readnone -declare <2 x i64> @llvm.arm.neon.vsublu.v2i64(<2 x i32>, <2 x i32>) nounwind readnone - define <8 x i16> @vsubws8(<8 x i16>* %A, <8 x i8>* %B) nounwind { ;CHECK: vsubws8: ;CHECK: vsubw.s8 %tmp1 = load <8 x i16>* %A %tmp2 = load <8 x i8>* %B - %tmp3 = call <8 x i16> @llvm.arm.neon.vsubws.v8i16(<8 x i16> %tmp1, <8 x i8> %tmp2) - ret <8 x i16> %tmp3 + %tmp3 = sext <8 x i8> %tmp2 to <8 x i16> + %tmp4 = sub <8 x i16> %tmp1, %tmp3 + ret <8 x i16> %tmp4 } define <4 x i32> @vsubws16(<4 x i32>* %A, <4 x i16>* %B) nounwind { @@ -228,8 +233,9 @@ define <4 x i32> @vsubws16(<4 x i32>* %A, <4 x i16>* %B) nounwind { ;CHECK: vsubw.s16 %tmp1 = load <4 x i32>* %A %tmp2 = load <4 x i16>* %B - %tmp3 = call <4 x i32> @llvm.arm.neon.vsubws.v4i32(<4 x i32> %tmp1, <4 x i16> %tmp2) - ret <4 x i32> %tmp3 + %tmp3 = sext <4 x i16> %tmp2 to <4 x i32> + %tmp4 = sub <4 x i32> %tmp1, %tmp3 + ret <4 x i32> %tmp4 } define <2 x i64> @vsubws32(<2 x i64>* %A, <2 x i32>* %B) nounwind { @@ -237,8 +243,9 @@ define <2 x i64> @vsubws32(<2 x i64>* %A, <2 x i32>* %B) nounwind { ;CHECK: vsubw.s32 %tmp1 = load <2 x i64>* %A %tmp2 = load <2 x i32>* %B - %tmp3 = call <2 x i64> @llvm.arm.neon.vsubws.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2) - ret <2 x i64> %tmp3 + %tmp3 = sext <2 x i32> %tmp2 to <2 x i64> + %tmp4 = sub <2 x i64> %tmp1, %tmp3 + ret <2 x i64> %tmp4 } define <8 x i16> @vsubwu8(<8 x i16>* %A, <8 x i8>* %B) nounwind { @@ -246,8 +253,9 @@ define <8 x i16> @vsubwu8(<8 x i16>* %A, <8 x i8>* %B) nounwind { ;CHECK: vsubw.u8 %tmp1 = load <8 x i16>* %A %tmp2 = load <8 x i8>* %B - %tmp3 = call <8 x i16> @llvm.arm.neon.vsubwu.v8i16(<8 x i16> %tmp1, <8 x i8> %tmp2) - ret <8 x i16> %tmp3 + %tmp3 = zext <8 x i8> %tmp2 to <8 x i16> + %tmp4 = sub <8 x i16> %tmp1, %tmp3 + ret <8 x i16> %tmp4 } define <4 x i32> @vsubwu16(<4 x i32>* %A, <4 x i16>* %B) nounwind { @@ -255,8 +263,9 @@ define <4 x i32> @vsubwu16(<4 x i32>* %A, <4 x i16>* %B) nounwind { ;CHECK: vsubw.u16 %tmp1 = load <4 x i32>* %A %tmp2 = load <4 x i16>* %B - %tmp3 = call <4 x i32> @llvm.arm.neon.vsubwu.v4i32(<4 x i32> %tmp1, <4 x i16> %tmp2) - ret <4 x i32> %tmp3 + %tmp3 = zext <4 x i16> %tmp2 to <4 x i32> + %tmp4 = sub <4 x i32> %tmp1, %tmp3 + ret <4 x i32> %tmp4 } define <2 x i64> @vsubwu32(<2 x i64>* %A, <2 x i32>* %B) nounwind { @@ -264,14 +273,7 @@ define <2 x i64> @vsubwu32(<2 x i64>* %A, <2 x i32>* %B) nounwind { ;CHECK: vsubw.u32 %tmp1 = load <2 x i64>* %A %tmp2 = load <2 x i32>* %B - %tmp3 = call <2 x i64> @llvm.arm.neon.vsubwu.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2) - ret <2 x i64> %tmp3 + %tmp3 = zext <2 x i32> %tmp2 to <2 x i64> + %tmp4 = sub <2 x i64> %tmp1, %tmp3 + ret <2 x i64> %tmp4 } - -declare <8 x i16> @llvm.arm.neon.vsubws.v8i16(<8 x i16>, <8 x i8>) nounwind readnone -declare <4 x i32> @llvm.arm.neon.vsubws.v4i32(<4 x i32>, <4 x i16>) nounwind readnone -declare <2 x i64> @llvm.arm.neon.vsubws.v2i64(<2 x i64>, <2 x i32>) nounwind readnone - -declare <8 x i16> @llvm.arm.neon.vsubwu.v8i16(<8 x i16>, <8 x i8>) nounwind readnone -declare <4 x i32> @llvm.arm.neon.vsubwu.v4i32(<4 x i32>, <4 x i16>) nounwind readnone -declare <2 x i64> @llvm.arm.neon.vsubwu.v2i64(<2 x i64>, <2 x i32>) nounwind readnone -- 2.34.1