From: Tim Northover Date: Fri, 4 Apr 2014 09:03:02 +0000 (+0000) Subject: ARM64: add 128-bit MLA operations to the custom selection code. X-Git-Url: http://plrg.eecs.uci.edu/git/?a=commitdiff_plain;h=604dff27c96bc7cb22de061d737eaf2a24df9e01;p=oota-llvm.git ARM64: add 128-bit MLA operations to the custom selection code. Without this change, the llvm_unreachable kicked in. The code pattern being spotted is rather non-canonical for 128-bit MLAs, but it can happen and there's no point in generating sub-optimal code for it just because it looks odd. Should fix PR19332. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@205615 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/ARM64/ARM64ISelDAGToDAG.cpp b/lib/Target/ARM64/ARM64ISelDAGToDAG.cpp index 2e234c92773..0286b31d994 100644 --- a/lib/Target/ARM64/ARM64ISelDAGToDAG.cpp +++ b/lib/Target/ARM64/ARM64ISelDAGToDAG.cpp @@ -431,9 +431,9 @@ static bool checkV64LaneV128(SDValue Op0, SDValue Op1, SDValue &StdOp, return true; } -/// SelectMLAV64LaneV128 - ARM64 supports 64-bit vector MLAs (v4i16 and v2i32) -/// where one multiplicand is a lane in the upper half of a 128-bit vector. -/// Recognize and select this so that we don't emit unnecessary lane extracts. +/// SelectMLAV64LaneV128 - ARM64 supports vector MLAs where one multiplicand is +/// a lane in the upper half of a 128-bit vector. Recognize and select this so +/// that we don't emit unnecessary lane extracts. SDNode *ARM64DAGToDAGISel::SelectMLAV64LaneV128(SDNode *N) { SDValue Op0 = N->getOperand(0); SDValue Op1 = N->getOperand(1); @@ -463,9 +463,15 @@ SDNode *ARM64DAGToDAGISel::SelectMLAV64LaneV128(SDNode *N) { case MVT::v4i16: MLAOpc = ARM64::MLAv4i16_indexed; break; + case MVT::v8i16: + MLAOpc = ARM64::MLAv8i16_indexed; + break; case MVT::v2i32: MLAOpc = ARM64::MLAv2i32_indexed; break; + case MVT::v4i32: + MLAOpc = ARM64::MLAv4i32_indexed; + break; } return CurDAG->getMachineNode(MLAOpc, SDLoc(N), N->getValueType(0), Ops); diff --git a/test/CodeGen/ARM64/vmul.ll b/test/CodeGen/ARM64/vmul.ll index 3ef0a76e204..d11bb2f72eb 100644 --- a/test/CodeGen/ARM64/vmul.ll +++ b/test/CodeGen/ARM64/vmul.ll @@ -1598,6 +1598,32 @@ entry: ret <2 x i32> %add } +define <8 x i16> @not_really_vmlaq_laneq_s16_test(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c) nounwind readnone ssp { +entry: +; CHECK: not_really_vmlaq_laneq_s16_test +; CHECK-NOT: ext +; CHECK: mla.8h v0, v1, v2[5] +; CHECK-NEXT: ret + %shuffle1 = shufflevector <8 x i16> %c, <8 x i16> undef, <4 x i32> + %shuffle2 = shufflevector <4 x i16> %shuffle1, <4 x i16> undef, <8 x i32> + %mul = mul <8 x i16> %shuffle2, %b + %add = add <8 x i16> %mul, %a + ret <8 x i16> %add +} + +define <4 x i32> @not_really_vmlaq_laneq_s32_test(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) nounwind readnone ssp { +entry: +; CHECK: not_really_vmlaq_laneq_s32_test +; CHECK-NOT: ext +; CHECK: mla.4s v0, v1, v2[3] +; CHECK-NEXT: ret + %shuffle1 = shufflevector <4 x i32> %c, <4 x i32> undef, <2 x i32> + %shuffle2 = shufflevector <2 x i32> %shuffle1, <2 x i32> undef, <4 x i32> + %mul = mul <4 x i32> %shuffle2, %b + %add = add <4 x i32> %mul, %a + ret <4 x i32> %add +} + define <4 x i32> @vmull_laneq_s16_test(<4 x i16> %a, <8 x i16> %b) nounwind readnone ssp { entry: ; CHECK: vmull_laneq_s16_test