#include "Utils/AArch64BaseInfo.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/SelectionDAG.h"
-#include "llvm/Target/TargetLowering.h"
#include "llvm/IR/Intrinsics.h"
+#include "llvm/Target/TargetLowering.h"
namespace llvm {
namespace AArch64ISD {
// get selected.
WrapperSmall,
- // Vector bitwise select
- NEON_BSL,
-
// Vector move immediate
NEON_MOVIMM,
// Vector FP move immediate
NEON_FMOVIMM,
+ // Vector permute
+ NEON_UZP1,
+ NEON_UZP2,
+ NEON_ZIP1,
+ NEON_ZIP2,
+ NEON_TRN1,
+ NEON_TRN2,
+
+ // Vector Element reverse
+ NEON_REV64,
+ NEON_REV32,
+ NEON_REV16,
+
// Vector compare
NEON_CMP,
// Vector dup by lane
NEON_VDUPLANE,
+ // Vector extract
+ NEON_VEXTRACT,
+
+ // NEON duplicate lane loads
+ NEON_LD2DUP = ISD::FIRST_TARGET_MEMORY_OPCODE,
+ NEON_LD3DUP,
+ NEON_LD4DUP,
+
// NEON loads with post-increment base updates:
- NEON_LD1_UPD = ISD::FIRST_TARGET_MEMORY_OPCODE,
+ NEON_LD1_UPD,
NEON_LD2_UPD,
NEON_LD3_UPD,
NEON_LD4_UPD,
+ NEON_LD1x2_UPD,
+ NEON_LD1x3_UPD,
+ NEON_LD1x4_UPD,
// NEON stores with post-increment base updates:
NEON_ST1_UPD,
NEON_ST2_UPD,
NEON_ST3_UPD,
- NEON_ST4_UPD
+ NEON_ST4_UPD,
+ NEON_ST1x2_UPD,
+ NEON_ST1x3_UPD,
+ NEON_ST1x4_UPD,
+
+ // NEON duplicate lane loads with post-increment base updates:
+ NEON_LD2DUP_UPD,
+ NEON_LD3DUP_UPD,
+ NEON_LD4DUP_UPD,
+
+ // NEON lane loads with post-increment base updates:
+ NEON_LD2LN_UPD,
+ NEON_LD3LN_UPD,
+ NEON_LD4LN_UPD,
+
+ // NEON lane store with post-increment base updates:
+ NEON_ST2LN_UPD,
+ NEON_ST3LN_UPD,
+ NEON_ST4LN_UPD
};
}
const SmallVectorImpl<SDValue> &OutVals,
SDLoc dl, SelectionDAG &DAG) const;
+ virtual unsigned getByValTypeAlignment(Type *Ty) const override;
+
SDValue LowerCall(CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const;
SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
+ SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
+
+ bool isConcatVector(SDValue Op, SelectionDAG &DAG, SDValue V0, SDValue V1,
+ const int *Mask, SDValue &Res) const;
+
+ bool isKnownShuffleVector(SDValue Op, SelectionDAG &DAG, SDValue &V0,
+ SDValue &V1, int *Mask) const;
+
SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
const AArch64Subtarget *ST) const;
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
bool isLegalICmpImmediate(int64_t Val) const;
+
+ /// \brief Return true if the addressing mode represented by AM is legal for
+ /// this target, for a load/store of the specified type.
+ bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const override;
+
+ /// \brief Return the cost of the scaling factor used in the addressing
+ /// mode represented by AM for this target, for a load/store
+ /// of the specified type.
+ /// If the AM is supported, the return value must be >= 0.
+ /// If the AM is not supported, it returns a negative value.
+ int getScalingFactorCost(const AddrMode &AM, Type *Ty) const override;
+
+ bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
+ bool isTruncateFree(EVT VT1, EVT VT2) const override;
+
+ bool isZExtFree(Type *Ty1, Type *Ty2) const override;
+ bool isZExtFree(EVT VT1, EVT VT2) const override;
+ bool isZExtFree(SDValue Val, EVT VT2) const override;
+
SDValue getSelectableIntSetCC(SDValue LHS, SDValue RHS, ISD::CondCode CC,
SDValue &A64cc, SelectionDAG &DAG, SDLoc &dl) const;
SDValue LowerGlobalAddressELFLarge(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
+
SDValue LowerTLSDescCall(SDValue SymAddr, SDValue DescAddr, SDLoc DL,
SelectionDAG &DAG) const;
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
/// expanded to fmul + fadd.
virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const;
+ /// allowsUnalignedMemoryAccesses - Returns true if the target allows
+ /// unaligned memory accesses of the specified type. Returns whether it
+ /// is "fast" by reference in the second argument.
+ virtual bool allowsUnalignedMemoryAccesses(EVT VT, unsigned AddrSpace,
+ bool *Fast) const;
+
ConstraintType getConstraintType(const std::string &Constraint) const;
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &Info,
getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const;
virtual bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
- unsigned Intrinsic) const LLVM_OVERRIDE;
+ unsigned Intrinsic) const override;
+
+protected:
+ std::pair<const TargetRegisterClass*, uint8_t>
+ findRepresentativeClass(MVT VT) const;
private:
const InstrItineraryData *Itins;