1 //===-- AArch64TargetTransformInfo.cpp - AArch64 specific TTI pass --------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file implements a TargetTransformInfo analysis pass specific to the
11 /// AArch64 target machine. It uses the target's detailed information to provide
12 /// more precise answers to certain TTI queries, while letting the target
13 /// independent and default TTI implementations handle the rest.
15 //===----------------------------------------------------------------------===//
18 #include "AArch64TargetMachine.h"
19 #include "MCTargetDesc/AArch64AddressingModes.h"
20 #include "llvm/Analysis/TargetTransformInfo.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Target/CostTable.h"
23 #include "llvm/Target/TargetLowering.h"
27 #define DEBUG_TYPE "aarch64tti"
29 // Declare the pass initialization routine locally as target-specific passes
30 // don't have a target-wide initialization entry point, and so we rely on the
31 // pass constructor initialization.
33 void initializeAArch64TTIPass(PassRegistry &);
38 class AArch64TTI final : public ImmutablePass, public TargetTransformInfo {
39 const AArch64TargetMachine *TM;
40 const AArch64Subtarget *ST;
41 const AArch64TargetLowering *TLI;
43 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
44 /// are set if the result needs to be inserted and/or extracted from vectors.
45 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const;
47 enum MemIntrinsicType {
48 VECTOR_LDST_TWO_ELEMENTS,
49 VECTOR_LDST_THREE_ELEMENTS,
50 VECTOR_LDST_FOUR_ELEMENTS
54 AArch64TTI() : ImmutablePass(ID), TM(nullptr), ST(nullptr), TLI(nullptr) {
55 llvm_unreachable("This pass cannot be directly constructed");
58 AArch64TTI(const AArch64TargetMachine *TM)
59 : ImmutablePass(ID), TM(TM), ST(TM->getSubtargetImpl()),
60 TLI(TM->getSubtargetImpl()->getTargetLowering()) {
61 initializeAArch64TTIPass(*PassRegistry::getPassRegistry());
64 void initializePass() override { pushTTIStack(this); }
66 void getAnalysisUsage(AnalysisUsage &AU) const override {
67 TargetTransformInfo::getAnalysisUsage(AU);
70 /// Pass identification.
73 /// Provide necessary pointer adjustments for the two base classes.
74 void *getAdjustedAnalysisPointer(const void *ID) override {
75 if (ID == &TargetTransformInfo::ID)
76 return (TargetTransformInfo *)this;
80 /// \name Scalar TTI Implementations
82 unsigned getIntImmCost(int64_t Val) const;
83 unsigned getIntImmCost(const APInt &Imm, Type *Ty) const override;
84 unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
85 Type *Ty) const override;
86 unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
87 Type *Ty) const override;
88 PopcntSupportKind getPopcntSupport(unsigned TyWidth) const override;
92 /// \name Vector TTI Implementations
95 unsigned getNumberOfRegisters(bool Vector) const override {
104 unsigned getRegisterBitWidth(bool Vector) const override {
113 unsigned getMaxInterleaveFactor() const override;
115 unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const
118 unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) const
121 unsigned getArithmeticInstrCost(
122 unsigned Opcode, Type *Ty, OperandValueKind Opd1Info = OK_AnyValue,
123 OperandValueKind Opd2Info = OK_AnyValue,
124 OperandValueProperties Opd1PropInfo = OP_None,
125 OperandValueProperties Opd2PropInfo = OP_None) const override;
127 unsigned getAddressComputationCost(Type *Ty, bool IsComplex) const override;
129 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) const
132 unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
133 unsigned AddressSpace) const override;
135 unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type*> Tys) const override;
137 void getUnrollingPreferences(const Function *F, Loop *L,
138 UnrollingPreferences &UP) const override;
140 Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
141 Type *ExpectedType) const override;
143 bool getTgtMemIntrinsic(IntrinsicInst *Inst,
144 MemIntrinsicInfo &Info) const override;
149 } // end anonymous namespace
151 INITIALIZE_AG_PASS(AArch64TTI, TargetTransformInfo, "aarch64tti",
152 "AArch64 Target Transform Info", true, true, false)
153 char AArch64TTI::ID = 0;
156 llvm::createAArch64TargetTransformInfoPass(const AArch64TargetMachine *TM) {
157 return new AArch64TTI(TM);
160 /// \brief Calculate the cost of materializing a 64-bit value. This helper
161 /// method might only calculate a fraction of a larger immediate. Therefore it
162 /// is valid to return a cost of ZERO.
163 unsigned AArch64TTI::getIntImmCost(int64_t Val) const {
164 // Check if the immediate can be encoded within an instruction.
165 if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, 64))
171 // Calculate how many moves we will need to materialize this constant.
172 unsigned LZ = countLeadingZeros((uint64_t)Val);
173 return (64 - LZ + 15) / 16;
176 /// \brief Calculate the cost of materializing the given constant.
177 unsigned AArch64TTI::getIntImmCost(const APInt &Imm, Type *Ty) const {
178 assert(Ty->isIntegerTy());
180 unsigned BitSize = Ty->getPrimitiveSizeInBits();
184 // Sign-extend all constants to a multiple of 64-bit.
187 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
189 // Split the constant into 64-bit chunks and calculate the cost for each
192 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
193 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
194 int64_t Val = Tmp.getSExtValue();
195 Cost += getIntImmCost(Val);
197 // We need at least one instruction to materialze the constant.
198 return std::max(1U, Cost);
201 unsigned AArch64TTI::getIntImmCost(unsigned Opcode, unsigned Idx,
202 const APInt &Imm, Type *Ty) const {
203 assert(Ty->isIntegerTy());
205 unsigned BitSize = Ty->getPrimitiveSizeInBits();
206 // There is no cost model for constants with a bit size of 0. Return TCC_Free
207 // here, so that constant hoisting will ignore this constant.
211 unsigned ImmIdx = ~0U;
215 case Instruction::GetElementPtr:
216 // Always hoist the base address of a GetElementPtr.
218 return 2 * TCC_Basic;
220 case Instruction::Store:
223 case Instruction::Add:
224 case Instruction::Sub:
225 case Instruction::Mul:
226 case Instruction::UDiv:
227 case Instruction::SDiv:
228 case Instruction::URem:
229 case Instruction::SRem:
230 case Instruction::And:
231 case Instruction::Or:
232 case Instruction::Xor:
233 case Instruction::ICmp:
236 // Always return TCC_Free for the shift value of a shift instruction.
237 case Instruction::Shl:
238 case Instruction::LShr:
239 case Instruction::AShr:
243 case Instruction::Trunc:
244 case Instruction::ZExt:
245 case Instruction::SExt:
246 case Instruction::IntToPtr:
247 case Instruction::PtrToInt:
248 case Instruction::BitCast:
249 case Instruction::PHI:
250 case Instruction::Call:
251 case Instruction::Select:
252 case Instruction::Ret:
253 case Instruction::Load:
258 unsigned NumConstants = (BitSize + 63) / 64;
259 unsigned Cost = AArch64TTI::getIntImmCost(Imm, Ty);
260 return (Cost <= NumConstants * TCC_Basic)
261 ? static_cast<unsigned>(TCC_Free) : Cost;
263 return AArch64TTI::getIntImmCost(Imm, Ty);
266 unsigned AArch64TTI::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
267 const APInt &Imm, Type *Ty) const {
268 assert(Ty->isIntegerTy());
270 unsigned BitSize = Ty->getPrimitiveSizeInBits();
271 // There is no cost model for constants with a bit size of 0. Return TCC_Free
272 // here, so that constant hoisting will ignore this constant.
279 case Intrinsic::sadd_with_overflow:
280 case Intrinsic::uadd_with_overflow:
281 case Intrinsic::ssub_with_overflow:
282 case Intrinsic::usub_with_overflow:
283 case Intrinsic::smul_with_overflow:
284 case Intrinsic::umul_with_overflow:
286 unsigned NumConstants = (BitSize + 63) / 64;
287 unsigned Cost = AArch64TTI::getIntImmCost(Imm, Ty);
288 return (Cost <= NumConstants * TCC_Basic)
289 ? static_cast<unsigned>(TCC_Free) : Cost;
292 case Intrinsic::experimental_stackmap:
293 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
296 case Intrinsic::experimental_patchpoint_void:
297 case Intrinsic::experimental_patchpoint_i64:
298 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
302 return AArch64TTI::getIntImmCost(Imm, Ty);
305 AArch64TTI::PopcntSupportKind
306 AArch64TTI::getPopcntSupport(unsigned TyWidth) const {
307 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
308 if (TyWidth == 32 || TyWidth == 64)
309 return PSK_FastHardware;
310 // TODO: AArch64TargetLowering::LowerCTPOP() supports 128bit popcount.
314 unsigned AArch64TTI::getCastInstrCost(unsigned Opcode, Type *Dst,
316 int ISD = TLI->InstructionOpcodeToISD(Opcode);
317 assert(ISD && "Invalid opcode");
319 EVT SrcTy = TLI->getValueType(Src);
320 EVT DstTy = TLI->getValueType(Dst);
322 if (!SrcTy.isSimple() || !DstTy.isSimple())
323 return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src);
325 static const TypeConversionCostTblEntry<MVT> ConversionTbl[] = {
326 // LowerVectorINT_TO_FP:
327 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
328 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
329 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
330 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
331 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
332 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
335 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 },
336 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
337 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
338 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 },
339 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
340 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
343 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 4 },
344 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
345 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
346 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
349 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 },
350 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
351 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
352 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 },
353 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
354 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
357 // LowerVectorFP_TO_INT
358 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f32, 1 },
359 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 },
360 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 },
361 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 },
362 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
363 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
365 // Complex, from v2f32: legal type is v2i32 (no cost) or v2i64 (1 ext).
366 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 2 },
367 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 1 },
368 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f32, 1 },
369 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 2 },
370 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 1 },
371 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f32, 1 },
373 // Complex, from v4f32: legal type is v4i16, 1 narrowing => ~2
374 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 },
375 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 2 },
376 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 },
377 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 2 },
379 // Complex, from v2f64: legal type is v2i32, 1 narrowing => ~2.
380 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 },
381 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2 },
382 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f64, 2 },
383 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 },
384 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2 },
385 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f64, 2 },
388 int Idx = ConvertCostTableLookup<MVT>(
389 ConversionTbl, array_lengthof(ConversionTbl), ISD, DstTy.getSimpleVT(),
390 SrcTy.getSimpleVT());
392 return ConversionTbl[Idx].Cost;
394 return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src);
397 unsigned AArch64TTI::getVectorInstrCost(unsigned Opcode, Type *Val,
398 unsigned Index) const {
399 assert(Val->isVectorTy() && "This must be a vector type");
402 // Legalize the type.
403 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Val);
405 // This type is legalized to a scalar type.
406 if (!LT.second.isVector())
409 // The type may be split. Normalize the index to the new type.
410 unsigned Width = LT.second.getVectorNumElements();
411 Index = Index % Width;
413 // The element at index zero is already inside the vector.
418 // All other insert/extracts cost this much.
422 unsigned AArch64TTI::getArithmeticInstrCost(
423 unsigned Opcode, Type *Ty, OperandValueKind Opd1Info,
424 OperandValueKind Opd2Info, OperandValueProperties Opd1PropInfo,
425 OperandValueProperties Opd2PropInfo) const {
426 // Legalize the type.
427 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty);
429 int ISD = TLI->InstructionOpcodeToISD(Opcode);
431 if (ISD == ISD::SDIV &&
432 Opd2Info == TargetTransformInfo::OK_UniformConstantValue &&
433 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
434 // On AArch64, scalar signed division by constants power-of-two are
435 // normally expanded to the sequence ADD + CMP + SELECT + SRA.
436 // The OperandValue properties many not be same as that of previous
437 // operation; conservatively assume OP_None.
439 getArithmeticInstrCost(Instruction::Add, Ty, Opd1Info, Opd2Info,
440 TargetTransformInfo::OP_None,
441 TargetTransformInfo::OP_None);
442 Cost += getArithmeticInstrCost(Instruction::Sub, Ty, Opd1Info, Opd2Info,
443 TargetTransformInfo::OP_None,
444 TargetTransformInfo::OP_None);
445 Cost += getArithmeticInstrCost(Instruction::Select, Ty, Opd1Info, Opd2Info,
446 TargetTransformInfo::OP_None,
447 TargetTransformInfo::OP_None);
448 Cost += getArithmeticInstrCost(Instruction::AShr, Ty, Opd1Info, Opd2Info,
449 TargetTransformInfo::OP_None,
450 TargetTransformInfo::OP_None);
456 return TargetTransformInfo::getArithmeticInstrCost(
457 Opcode, Ty, Opd1Info, Opd2Info, Opd1PropInfo, Opd2PropInfo);
463 // These nodes are marked as 'custom' for combining purposes only.
464 // We know that they are legal. See LowerAdd in ISelLowering.
469 unsigned AArch64TTI::getAddressComputationCost(Type *Ty, bool IsComplex) const {
470 // Address computations in vectorized code with non-consecutive addresses will
471 // likely result in more instructions compared to scalar code where the
472 // computation can more often be merged into the index mode. The resulting
473 // extra micro-ops can significantly decrease throughput.
474 unsigned NumVectorInstToHideOverhead = 10;
476 if (Ty->isVectorTy() && IsComplex)
477 return NumVectorInstToHideOverhead;
479 // In many cases the address computation is not merged into the instruction
484 unsigned AArch64TTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
485 Type *CondTy) const {
487 int ISD = TLI->InstructionOpcodeToISD(Opcode);
488 // We don't lower vector selects well that are wider than the register width.
489 if (ValTy->isVectorTy() && ISD == ISD::SELECT) {
490 // We would need this many instructions to hide the scalarization happening.
491 unsigned AmortizationCost = 20;
492 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
493 VectorSelectTbl[] = {
494 { ISD::SELECT, MVT::v16i1, MVT::v16i16, 16 * AmortizationCost },
495 { ISD::SELECT, MVT::v8i1, MVT::v8i32, 8 * AmortizationCost },
496 { ISD::SELECT, MVT::v16i1, MVT::v16i32, 16 * AmortizationCost },
497 { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4 * AmortizationCost },
498 { ISD::SELECT, MVT::v8i1, MVT::v8i64, 8 * AmortizationCost },
499 { ISD::SELECT, MVT::v16i1, MVT::v16i64, 16 * AmortizationCost }
502 EVT SelCondTy = TLI->getValueType(CondTy);
503 EVT SelValTy = TLI->getValueType(ValTy);
504 if (SelCondTy.isSimple() && SelValTy.isSimple()) {
506 ConvertCostTableLookup(VectorSelectTbl, ISD, SelCondTy.getSimpleVT(),
507 SelValTy.getSimpleVT());
509 return VectorSelectTbl[Idx].Cost;
512 return TargetTransformInfo::getCmpSelInstrCost(Opcode, ValTy, CondTy);
515 unsigned AArch64TTI::getMemoryOpCost(unsigned Opcode, Type *Src,
517 unsigned AddressSpace) const {
518 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Src);
520 if (Opcode == Instruction::Store && Src->isVectorTy() && Alignment != 16 &&
521 Src->getVectorElementType()->isIntegerTy(64)) {
522 // Unaligned stores are extremely inefficient. We don't split
523 // unaligned v2i64 stores because the negative impact that has shown in
524 // practice on inlined memcpy code.
525 // We make v2i64 stores expensive so that we will only vectorize if there
526 // are 6 other instructions getting vectorized.
527 unsigned AmortizationCost = 6;
529 return LT.first * 2 * AmortizationCost;
532 if (Src->isVectorTy() && Src->getVectorElementType()->isIntegerTy(8) &&
533 Src->getVectorNumElements() < 8) {
534 // We scalarize the loads/stores because there is not v.4b register and we
535 // have to promote the elements to v.4h.
536 unsigned NumVecElts = Src->getVectorNumElements();
537 unsigned NumVectorizableInstsToAmortize = NumVecElts * 2;
538 // We generate 2 instructions per vector element.
539 return NumVectorizableInstsToAmortize * NumVecElts * 2;
545 unsigned AArch64TTI::getCostOfKeepingLiveOverCall(ArrayRef<Type*> Tys) const {
547 for (auto *I : Tys) {
548 if (!I->isVectorTy())
550 if (I->getScalarSizeInBits() * I->getVectorNumElements() == 128)
551 Cost += getMemoryOpCost(Instruction::Store, I, 128, 0) +
552 getMemoryOpCost(Instruction::Load, I, 128, 0);
557 unsigned AArch64TTI::getMaxInterleaveFactor() const {
558 if (ST->isCortexA57())
563 void AArch64TTI::getUnrollingPreferences(const Function *F, Loop *L,
564 UnrollingPreferences &UP) const {
565 // Disable partial & runtime unrolling on -Os.
566 UP.PartialOptSizeThreshold = 0;
569 Value *AArch64TTI::getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
570 Type *ExpectedType) const {
571 switch (Inst->getIntrinsicID()) {
574 case Intrinsic::aarch64_neon_st2:
575 case Intrinsic::aarch64_neon_st3:
576 case Intrinsic::aarch64_neon_st4: {
577 // Create a struct type
578 StructType *ST = dyn_cast<StructType>(ExpectedType);
581 unsigned NumElts = Inst->getNumArgOperands() - 1;
582 if (ST->getNumElements() != NumElts)
584 for (unsigned i = 0, e = NumElts; i != e; ++i) {
585 if (Inst->getArgOperand(i)->getType() != ST->getElementType(i))
588 Value *Res = UndefValue::get(ExpectedType);
589 IRBuilder<> Builder(Inst);
590 for (unsigned i = 0, e = NumElts; i != e; ++i) {
591 Value *L = Inst->getArgOperand(i);
592 Res = Builder.CreateInsertValue(Res, L, i);
596 case Intrinsic::aarch64_neon_ld2:
597 case Intrinsic::aarch64_neon_ld3:
598 case Intrinsic::aarch64_neon_ld4:
599 if (Inst->getType() == ExpectedType)
605 bool AArch64TTI::getTgtMemIntrinsic(IntrinsicInst *Inst,
606 MemIntrinsicInfo &Info) const {
607 switch (Inst->getIntrinsicID()) {
610 case Intrinsic::aarch64_neon_ld2:
611 case Intrinsic::aarch64_neon_ld3:
612 case Intrinsic::aarch64_neon_ld4:
614 Info.WriteMem = false;
617 Info.PtrVal = Inst->getArgOperand(0);
619 case Intrinsic::aarch64_neon_st2:
620 case Intrinsic::aarch64_neon_st3:
621 case Intrinsic::aarch64_neon_st4:
622 Info.ReadMem = false;
623 Info.WriteMem = true;
626 Info.PtrVal = Inst->getArgOperand(Inst->getNumArgOperands() - 1);
630 switch (Inst->getIntrinsicID()) {
633 case Intrinsic::aarch64_neon_ld2:
634 case Intrinsic::aarch64_neon_st2:
635 Info.MatchingId = VECTOR_LDST_TWO_ELEMENTS;
637 case Intrinsic::aarch64_neon_ld3:
638 case Intrinsic::aarch64_neon_st3:
639 Info.MatchingId = VECTOR_LDST_THREE_ELEMENTS;
641 case Intrinsic::aarch64_neon_ld4:
642 case Intrinsic::aarch64_neon_st4:
643 Info.MatchingId = VECTOR_LDST_FOUR_ELEMENTS;