TypeExpandFloat, // Split this float into two of half the size.
TypeScalarizeVector, // Replace this one-element vector with its element.
TypeSplitVector, // Split this vector into two of half the size.
- TypeWidenVector // This vector should be widened into a larger vector.
+ TypeWidenVector, // This vector should be widened into a larger vector.
+ TypePromoteFloat // Replace this float with a larger one.
};
/// LegalizeKind holds the legalization kind that needs to happen to EVT
/// Enum that specifies what a AtomicRMWInst is expanded to, if at all. Exists
/// because different targets have different levels of support for these
- /// atomic RMW instructions, and also have different options w.r.t. what they should
- /// expand to.
+ /// atomic RMW instructions, and also have different options w.r.t. what they
+ /// should expand to.
enum class AtomicRMWExpansionKind {
None, // Don't expand the instruction.
LLSC, // Expand the instruction into loadlinked/storeconditional; used
bool isBigEndian() const { return !IsLittleEndian; }
bool isLittleEndian() const { return IsLittleEndian; }
+ virtual bool useSoftFloat() const { return false; }
/// Return the pointer type for the given address space, defaults to
/// the pointer type from the data layout.
/// isLoadBitCastBeneficial() - Return true if the following transform
/// is beneficial.
/// fold (conv (load x)) -> (load (conv*)x)
- /// On architectures that don't natively support some vector loads efficiently,
- /// casting the load to a smaller vector of larger types and loading
- /// is more efficient, however, this can be undone by optimizations in
+ /// On architectures that don't natively support some vector loads
+ /// efficiently, casting the load to a smaller vector of larger types and
+ /// loading is more efficient, however, this can be undone by optimizations in
/// dag combiner.
- virtual bool isLoadBitCastBeneficial(EVT /* Load */, EVT /* Bitcast */) const {
+ virtual bool isLoadBitCastBeneficial(EVT /* Load */,
+ EVT /* Bitcast */) const {
return true;
}
+ /// Return true if it is expected to be cheaper to do a store of a non-zero
+ /// vector constant with the given size and type for the address space than to
+ /// store the individual scalar element constants.
+ virtual bool storeOfVectorConstantIsCheap(EVT MemVT,
+ unsigned NumElem,
+ unsigned AddrSpace) const {
+ return false;
+ }
+
/// \brief Return true if it is cheap to speculate a call to intrinsic cttz.
virtual bool isCheapToSpeculateCttz() const {
return false;
}
-
+
/// \brief Return true if it is cheap to speculate a call to intrinsic ctlz.
virtual bool isCheapToSpeculateCtlz() const {
return false;
/// Return how this load with extension should be treated: either it is legal,
/// needs to be promoted to a larger size, needs to be expanded to some other
/// code sequence, or the target has a custom expander for it.
- LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, EVT MemVT) const {
+ LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT,
+ EVT MemVT) const {
if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
return false;
}
+ /// Return true if the pointer arguments to CI should be aligned by aligning
+ /// the object whose address is being passed. If so then MinSize is set to the
+ /// minimum size the object must be to be aligned and PrefAlign is set to the
+ /// preferred alignment.
+ virtual bool shouldAlignPointerArgs(CallInst * /*CI*/, unsigned & /*MinSize*/,
+ unsigned & /*PrefAlign*/) const {
+ return false;
+ }
+
//===--------------------------------------------------------------------===//
/// \name Helpers for TargetTransformInfo implementations
/// @{
/// seq_cst. But if they are lowered to monotonic accesses, no amount of
/// IR-level fences can prevent it.
/// @{
- virtual Instruction* emitLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord,
- bool IsStore, bool IsLoad) const {
+ virtual Instruction *emitLeadingFence(IRBuilder<> &Builder,
+ AtomicOrdering Ord, bool IsStore,
+ bool IsLoad) const {
if (!getInsertFencesForAtomic())
return nullptr;
return nullptr;
}
- virtual Instruction* emitTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord,
- bool IsStore, bool IsLoad) const {
+ virtual Instruction *emitTrailingFence(IRBuilder<> &Builder,
+ AtomicOrdering Ord, bool IsStore,
+ bool IsLoad) const {
if (!getInsertFencesForAtomic())
return nullptr;
return false;
}
+ /// Returns true if arguments should be sign-extended in lib calls.
+ virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
+ return IsSigned;
+ }
+
/// Returns true if the given (atomic) load should be expanded by the
/// IR-level AtomicExpand pass into a load-linked instruction
/// (through emitLoadLinked()).
/// it succeeds, and nullptr otherwise.
/// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo
/// another round of expansion.
- virtual LoadInst *lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const {
+ virtual LoadInst *
+ lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const {
return nullptr;
}
/// load/store.
virtual bool GetAddrModeArguments(IntrinsicInst * /*I*/,
SmallVectorImpl<Value*> &/*Ops*/,
- Type *&/*AccessTy*/) const {
+ Type *&/*AccessTy*/,
+ unsigned AddrSpace = 0) const {
return false;
}
/// The type may be VoidTy, in which case only return true if the addressing
/// mode is legal for a load/store of any legal type. TODO: Handle
/// pre/postinc as well.
- virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const;
+ ///
+ /// If the address space cannot be determined, it will be -1.
+ ///
+ /// TODO: Remove default argument
+ virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty,
+ unsigned AddrSpace) const;
/// \brief Return the cost of the scaling factor used in the addressing mode
/// represented by AM for this target, for a load/store of the specified type.
/// If the AM is supported, the return value must be >= 0.
/// If the AM is not supported, it returns a negative value.
/// TODO: Handle pre/postinc as well.
- virtual int getScalingFactorCost(const AddrMode &AM, Type *Ty) const {
+ /// TODO: Remove default argument
+ virtual int getScalingFactorCost(const AddrMode &AM, Type *Ty,
+ unsigned AS = 0) const {
// Default: assume that any scaling factor used in a legal AM is free.
- if (isLegalAddressingMode(AM, Ty)) return 0;
+ if (isLegalAddressingMode(AM, Ty, AS))
+ return 0;
return -1;
}
/// Replace/modify any TargetFrameIndex operands with a targte-dependent
/// sequence of memory operands that is recognized by PrologEpilogInserter.
- MachineBasicBlock *emitPatchPoint(MachineInstr *MI, MachineBasicBlock *MBB) const;
+ MachineBasicBlock *emitPatchPoint(MachineInstr *MI,
+ MachineBasicBlock *MBB) const;
};
/// This class defines information used to lower LLVM code to legal SelectionDAG
ISD::CondCode &CCCode, SDLoc DL) const;
/// Returns a pair of (return value, chain).
+ /// It is an error to pass RTLIB::UNKNOWN_LIBCALL as \p LC.
std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC,
EVT RetVT, const SDValue *Ops,
unsigned NumOps, bool isSigned,
/// outgoing token chain. It calls LowerCall to do the actual lowering.
std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const;
- /// This hook must be implemented to lower calls into the the specified
+ /// This hook must be implemented to lower calls into the specified
/// DAG. The outgoing arguments to the call are described by the Outs array,
/// and the values to be returned by the call are described by the Ins
/// array. The implementation should fill in the InVals array with legal-type
virtual unsigned
getInlineAsmMemConstraint(const std::string &ConstraintCode) const {
- // FIXME: This currently maps all constraints to the the same code.
- // This will be corrected once all targets are updated.
- return InlineAsm::Constraint_m;
+ if (ConstraintCode == "i")
+ return InlineAsm::Constraint_i;
+ else if (ConstraintCode == "m")
+ return InlineAsm::Constraint_m;
+ return InlineAsm::Constraint_Unknown;
}
/// Try to replace an X constraint, which matches anything, with another that
/// Hooks for building estimates in place of slower divisions and square
/// roots.
-
+
/// Return a reciprocal square root estimate value for the input operand.
/// The RefinementSteps output is the number of Newton-Raphson refinement
/// iterations required to generate a sufficient (though not necessarily
/// If that's true, then return '0' as the number of RefinementSteps to avoid
/// any further refinement of the estimate.
/// An empty SDValue return means no estimate sequence can be created.
- virtual SDValue getRsqrtEstimate(SDValue Operand,
- DAGCombinerInfo &DCI,
- unsigned &RefinementSteps,
- bool &UseOneConstNR) const {
+ virtual SDValue getRsqrtEstimate(SDValue Operand, DAGCombinerInfo &DCI,
+ unsigned &RefinementSteps,
+ bool &UseOneConstNR) const {
return SDValue();
}
/// If that's true, then return '0' as the number of RefinementSteps to avoid
/// any further refinement of the estimate.
/// An empty SDValue return means no estimate sequence can be created.
- virtual SDValue getRecipEstimate(SDValue Operand,
- DAGCombinerInfo &DCI,
+ virtual SDValue getRecipEstimate(SDValue Operand, DAGCombinerInfo &DCI,
unsigned &RefinementSteps) const {
return SDValue();
}
SmallVectorImpl<ISD::OutputArg> &Outs,
const TargetLowering &TLI);
-} // end llvm namespace
+} // namespace llvm
#endif