1 //===-- llvm/Target/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// This file describes how to lower LLVM code to machine code. This has two
14 /// 1. Which ValueTypes are natively supported by the target.
15 /// 2. Which operations are supported for supported ValueTypes.
16 /// 3. Cost thresholds for alternative implementations of certain operations.
18 /// In addition it has a few other components, like information about FP
21 //===----------------------------------------------------------------------===//
23 #ifndef LLVM_TARGET_TARGETLOWERING_H
24 #define LLVM_TARGET_TARGETLOWERING_H
26 #include "llvm/ADT/DenseMap.h"
27 #include "llvm/CodeGen/DAGCombine.h"
28 #include "llvm/CodeGen/RuntimeLibcalls.h"
29 #include "llvm/CodeGen/SelectionDAGNodes.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/CallSite.h"
32 #include "llvm/IR/CallingConv.h"
33 #include "llvm/IR/InlineAsm.h"
34 #include "llvm/IR/IRBuilder.h"
35 #include "llvm/MC/MCRegisterInfo.h"
36 #include "llvm/Target/TargetCallingConv.h"
37 #include "llvm/Target/TargetMachine.h"
46 class FunctionLoweringInfo;
47 class ImmutableCallSite;
49 class MachineBasicBlock;
50 class MachineFunction;
52 class MachineJumpTableInfo;
57 template<typename T> class SmallVectorImpl;
59 class TargetRegisterClass;
60 class TargetLibraryInfo;
61 class TargetLoweringObjectFile;
66 None, // No preference
67 Source, // Follow source order.
68 RegPressure, // Scheduling for lowest register pressure.
69 Hybrid, // Scheduling for both latency and register pressure.
70 ILP, // Scheduling for ILP in low register pressure mode.
71 VLIW // Scheduling for VLIW targets.
75 /// This base class for TargetLowering contains the SelectionDAG-independent
76 /// parts that can be used from the rest of CodeGen.
77 class TargetLoweringBase {
78 TargetLoweringBase(const TargetLoweringBase&) LLVM_DELETED_FUNCTION;
79 void operator=(const TargetLoweringBase&) LLVM_DELETED_FUNCTION;
82 /// This enum indicates whether operations are valid for a target, and if not,
83 /// what action should be used to make them valid.
85 Legal, // The target natively supports this operation.
86 Promote, // This operation should be executed in a larger type.
87 Expand, // Try to expand this to other ops, otherwise use a libcall.
88 Custom // Use the LowerOperation hook to implement custom lowering.
91 /// This enum indicates whether a types are legal for a target, and if not,
92 /// what action should be used to make them valid.
93 enum LegalizeTypeAction {
94 TypeLegal, // The target natively supports this type.
95 TypePromoteInteger, // Replace this integer with a larger one.
96 TypeExpandInteger, // Split this integer into two of half the size.
97 TypeSoftenFloat, // Convert this float to a same size integer type.
98 TypeExpandFloat, // Split this float into two of half the size.
99 TypeScalarizeVector, // Replace this one-element vector with its element.
100 TypeSplitVector, // Split this vector into two of half the size.
101 TypeWidenVector // This vector should be widened into a larger vector.
104 /// LegalizeKind holds the legalization kind that needs to happen to EVT
105 /// in order to type-legalize it.
106 typedef std::pair<LegalizeTypeAction, EVT> LegalizeKind;
108 /// Enum that describes how the target represents true/false values.
109 enum BooleanContent {
110 UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage.
111 ZeroOrOneBooleanContent, // All bits zero except for bit 0.
112 ZeroOrNegativeOneBooleanContent // All bits equal to bit 0.
115 /// Enum that describes what type of support for selects the target has.
116 enum SelectSupportKind {
117 ScalarValSelect, // The target supports scalar selects (ex: cmov).
118 ScalarCondVectorVal, // The target supports selects with a scalar condition
119 // and vector values (ex: cmov).
120 VectorMaskSelect // The target supports vector selects with a vector
121 // mask (ex: x86 blends).
124 static ISD::NodeType getExtendForContent(BooleanContent Content) {
126 case UndefinedBooleanContent:
127 // Extend by adding rubbish bits.
128 return ISD::ANY_EXTEND;
129 case ZeroOrOneBooleanContent:
130 // Extend by adding zero bits.
131 return ISD::ZERO_EXTEND;
132 case ZeroOrNegativeOneBooleanContent:
133 // Extend by copying the sign bit.
134 return ISD::SIGN_EXTEND;
136 llvm_unreachable("Invalid content kind");
139 /// NOTE: The constructor takes ownership of TLOF.
140 explicit TargetLoweringBase(const TargetMachine &TM,
141 const TargetLoweringObjectFile *TLOF);
142 virtual ~TargetLoweringBase();
145 /// \brief Initialize all of the actions to default values.
149 const TargetMachine &getTargetMachine() const { return TM; }
150 const DataLayout *getDataLayout() const { return DL; }
151 const TargetLoweringObjectFile &getObjFileLowering() const { return TLOF; }
153 bool isBigEndian() const { return !IsLittleEndian; }
154 bool isLittleEndian() const { return IsLittleEndian; }
156 /// Return the pointer type for the given address space, defaults to
157 /// the pointer type from the data layout.
158 /// FIXME: The default needs to be removed once all the code is updated.
159 virtual MVT getPointerTy(uint32_t /*AS*/ = 0) const;
160 unsigned getPointerSizeInBits(uint32_t AS = 0) const;
161 unsigned getPointerTypeSizeInBits(Type *Ty) const;
162 virtual MVT getScalarShiftAmountTy(EVT LHSTy) const;
164 EVT getShiftAmountTy(EVT LHSTy) const;
166 /// Returns the type to be used for the index operand of:
167 /// ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
168 /// ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR
169 virtual MVT getVectorIdxTy() const {
170 return getPointerTy();
173 /// Return true if the select operation is expensive for this target.
174 bool isSelectExpensive() const { return SelectIsExpensive; }
176 virtual bool isSelectSupported(SelectSupportKind /*kind*/) const {
180 /// Return true if multiple condition registers are available.
181 bool hasMultipleConditionRegisters() const {
182 return HasMultipleConditionRegisters;
185 /// Return true if the target has BitExtract instructions.
186 bool hasExtractBitsInsn() const { return HasExtractBitsInsn; }
188 /// Return the preferred vector type legalization action.
189 virtual TargetLoweringBase::LegalizeTypeAction
190 getPreferredVectorAction(EVT VT) const {
191 // The default action for one element vectors is to scalarize
192 if (VT.getVectorNumElements() == 1)
193 return TypeScalarizeVector;
194 // The default action for other vectors is to promote
195 return TypePromoteInteger;
198 // There are two general methods for expanding a BUILD_VECTOR node:
199 // 1. Use SCALAR_TO_VECTOR on the defined scalar values and then shuffle
201 // 2. Build the vector on the stack and then load it.
202 // If this function returns true, then method (1) will be used, subject to
203 // the constraint that all of the necessary shuffles are legal (as determined
204 // by isShuffleMaskLegal). If this function returns false, then method (2) is
205 // always used. The vector type, and the number of defined values, are
208 shouldExpandBuildVectorWithShuffles(EVT /* VT */,
209 unsigned DefinedValues) const {
210 return DefinedValues < 3;
213 /// Return true if integer divide is usually cheaper than a sequence of
214 /// several shifts, adds, and multiplies for this target.
215 bool isIntDivCheap() const { return IntDivIsCheap; }
217 /// Returns true if target has indicated at least one type should be bypassed.
218 bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); }
220 /// Returns map of slow types for division or remainder with corresponding
222 const DenseMap<unsigned int, unsigned int> &getBypassSlowDivWidths() const {
223 return BypassSlowDivWidths;
226 /// Return true if pow2 div is cheaper than a chain of srl/add/sra.
227 bool isPow2DivCheap() const { return Pow2DivIsCheap; }
229 /// Return true if Flow Control is an expensive operation that should be
231 bool isJumpExpensive() const { return JumpIsExpensive; }
233 /// Return true if selects are only cheaper than branches if the branch is
234 /// unlikely to be predicted right.
235 bool isPredictableSelectExpensive() const {
236 return PredictableSelectIsExpensive;
239 /// isLoadBitCastBeneficial() - Return true if the following transform
241 /// fold (conv (load x)) -> (load (conv*)x)
242 /// On architectures that don't natively support some vector loads efficiently,
243 /// casting the load to a smaller vector of larger types and loading
244 /// is more efficient, however, this can be undone by optimizations in
246 virtual bool isLoadBitCastBeneficial(EVT /* Load */, EVT /* Bitcast */) const {
250 /// \brief Return if the target supports combining a
253 /// %andResult = and %val1, #imm-with-one-bit-set;
254 /// %icmpResult = icmp %andResult, 0
255 /// br i1 %icmpResult, label %dest1, label %dest2
257 /// into a single machine instruction of a form like:
259 /// brOnBitSet %register, #bitNumber, dest
261 bool isMaskAndBranchFoldingLegal() const {
262 return MaskAndBranchFoldingIsLegal;
265 /// Return the ValueType of the result of SETCC operations. Also used to
266 /// obtain the target's preferred type for the condition operand of SELECT and
267 /// BRCOND nodes. In the case of BRCOND the argument passed is MVT::Other
268 /// since there are no other operands to get a type hint from.
269 virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const;
271 /// Return the ValueType for comparison libcalls. Comparions libcalls include
272 /// floating point comparion calls, and Ordered/Unordered check calls on
273 /// floating point numbers.
275 MVT::SimpleValueType getCmpLibcallReturnType() const;
277 /// For targets without i1 registers, this gives the nature of the high-bits
278 /// of boolean values held in types wider than i1.
280 /// "Boolean values" are special true/false values produced by nodes like
281 /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND.
282 /// Not to be confused with general values promoted from i1. Some cpus
283 /// distinguish between vectors of boolean and scalars; the isVec parameter
284 /// selects between the two kinds. For example on X86 a scalar boolean should
285 /// be zero extended from i1, while the elements of a vector of booleans
286 /// should be sign extended from i1.
288 /// Some cpus also treat floating point types the same way as they treat
289 /// vectors instead of the way they treat scalars.
290 BooleanContent getBooleanContents(bool isVec, bool isFloat) const {
292 return BooleanVectorContents;
293 return isFloat ? BooleanFloatContents : BooleanContents;
296 BooleanContent getBooleanContents(EVT Type) const {
297 return getBooleanContents(Type.isVector(), Type.isFloatingPoint());
300 /// Return target scheduling preference.
301 Sched::Preference getSchedulingPreference() const {
302 return SchedPreferenceInfo;
305 /// Some scheduler, e.g. hybrid, can switch to different scheduling heuristics
306 /// for different nodes. This function returns the preference (or none) for
308 virtual Sched::Preference getSchedulingPreference(SDNode *) const {
312 /// Return the register class that should be used for the specified value
314 virtual const TargetRegisterClass *getRegClassFor(MVT VT) const {
315 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
316 assert(RC && "This value type is not natively supported!");
320 /// Return the 'representative' register class for the specified value
323 /// The 'representative' register class is the largest legal super-reg
324 /// register class for the register class of the value type. For example, on
325 /// i386 the rep register class for i8, i16, and i32 are GR32; while the rep
326 /// register class is GR64 on x86_64.
327 virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const {
328 const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy];
332 /// Return the cost of the 'representative' register class for the specified
334 virtual uint8_t getRepRegClassCostFor(MVT VT) const {
335 return RepRegClassCostForVT[VT.SimpleTy];
338 /// Return true if the target has native support for the specified value type.
339 /// This means that it has a register that directly holds it without
340 /// promotions or expansions.
341 bool isTypeLegal(EVT VT) const {
342 assert(!VT.isSimple() ||
343 (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT));
344 return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != nullptr;
347 class ValueTypeActionImpl {
348 /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum
349 /// that indicates how instruction selection should deal with the type.
350 uint8_t ValueTypeActions[MVT::LAST_VALUETYPE];
353 ValueTypeActionImpl() {
354 std::fill(std::begin(ValueTypeActions), std::end(ValueTypeActions), 0);
357 LegalizeTypeAction getTypeAction(MVT VT) const {
358 return (LegalizeTypeAction)ValueTypeActions[VT.SimpleTy];
361 void setTypeAction(MVT VT, LegalizeTypeAction Action) {
362 unsigned I = VT.SimpleTy;
363 ValueTypeActions[I] = Action;
367 const ValueTypeActionImpl &getValueTypeActions() const {
368 return ValueTypeActions;
371 /// Return how we should legalize values of this type, either it is already
372 /// legal (return 'Legal') or we need to promote it to a larger type (return
373 /// 'Promote'), or we need to expand it into multiple registers of smaller
374 /// integer type (return 'Expand'). 'Custom' is not an option.
375 LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const {
376 return getTypeConversion(Context, VT).first;
378 LegalizeTypeAction getTypeAction(MVT VT) const {
379 return ValueTypeActions.getTypeAction(VT);
382 /// For types supported by the target, this is an identity function. For
383 /// types that must be promoted to larger types, this returns the larger type
384 /// to promote to. For integer types that are larger than the largest integer
385 /// register, this contains one step in the expansion to get to the smaller
386 /// register. For illegal floating point types, this returns the integer type
388 EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const {
389 return getTypeConversion(Context, VT).second;
392 /// For types supported by the target, this is an identity function. For
393 /// types that must be expanded (i.e. integer types that are larger than the
394 /// largest integer register or illegal floating point types), this returns
395 /// the largest legal type it will be expanded to.
396 EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const {
397 assert(!VT.isVector());
399 switch (getTypeAction(Context, VT)) {
402 case TypeExpandInteger:
403 VT = getTypeToTransformTo(Context, VT);
406 llvm_unreachable("Type is not legal nor is it to be expanded!");
411 /// Vector types are broken down into some number of legal first class types.
412 /// For example, EVT::v8f32 maps to 2 EVT::v4f32 with Altivec or SSE1, or 8
413 /// promoted EVT::f64 values with the X86 FP stack. Similarly, EVT::v2i64
414 /// turns into 4 EVT::i32 values with both PPC and X86.
416 /// This method returns the number of registers needed, and the VT for each
417 /// register. It also returns the VT and quantity of the intermediate values
418 /// before they are promoted/expanded.
419 unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
421 unsigned &NumIntermediates,
422 MVT &RegisterVT) const;
424 struct IntrinsicInfo {
425 unsigned opc; // target opcode
426 EVT memVT; // memory VT
427 const Value* ptrVal; // value representing memory location
428 int offset; // offset off of ptrVal
429 unsigned align; // alignment
430 bool vol; // is volatile?
431 bool readMem; // reads memory?
432 bool writeMem; // writes memory?
435 /// Given an intrinsic, checks if on the target the intrinsic will need to map
436 /// to a MemIntrinsicNode (touches memory). If this is the case, it returns
437 /// true and store the intrinsic information into the IntrinsicInfo that was
438 /// passed to the function.
439 virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &,
440 unsigned /*Intrinsic*/) const {
444 /// Returns true if the target can instruction select the specified FP
445 /// immediate natively. If false, the legalizer will materialize the FP
446 /// immediate as a load from a constant pool.
447 virtual bool isFPImmLegal(const APFloat &/*Imm*/, EVT /*VT*/) const {
451 /// Targets can use this to indicate that they only support *some*
452 /// VECTOR_SHUFFLE operations, those with specific masks. By default, if a
453 /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to be
455 virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &/*Mask*/,
460 /// Returns true if the operation can trap for the value type.
462 /// VT must be a legal type. By default, we optimistically assume most
463 /// operations don't trap except for divide and remainder.
464 virtual bool canOpTrap(unsigned Op, EVT VT) const;
466 /// Similar to isShuffleMaskLegal. This is used by Targets can use this to
467 /// indicate if there is a suitable VECTOR_SHUFFLE that can be used to replace
468 /// a VAND with a constant pool entry.
469 virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &/*Mask*/,
474 /// Return how this operation should be treated: either it is legal, needs to
475 /// be promoted to a larger size, needs to be expanded to some other code
476 /// sequence, or the target has a custom expander for it.
477 LegalizeAction getOperationAction(unsigned Op, EVT VT) const {
478 if (VT.isExtended()) return Expand;
479 // If a target-specific SDNode requires legalization, require the target
480 // to provide custom legalization for it.
481 if (Op > array_lengthof(OpActions[0])) return Custom;
482 unsigned I = (unsigned) VT.getSimpleVT().SimpleTy;
483 return (LegalizeAction)OpActions[I][Op];
486 /// Return true if the specified operation is legal on this target or can be
487 /// made legal with custom lowering. This is used to help guide high-level
488 /// lowering decisions.
489 bool isOperationLegalOrCustom(unsigned Op, EVT VT) const {
490 return (VT == MVT::Other || isTypeLegal(VT)) &&
491 (getOperationAction(Op, VT) == Legal ||
492 getOperationAction(Op, VT) == Custom);
495 /// Return true if the specified operation is legal on this target or can be
496 /// made legal using promotion. This is used to help guide high-level lowering
498 bool isOperationLegalOrPromote(unsigned Op, EVT VT) const {
499 return (VT == MVT::Other || isTypeLegal(VT)) &&
500 (getOperationAction(Op, VT) == Legal ||
501 getOperationAction(Op, VT) == Promote);
504 /// Return true if the specified operation is illegal on this target or
505 /// unlikely to be made legal with custom lowering. This is used to help guide
506 /// high-level lowering decisions.
507 bool isOperationExpand(unsigned Op, EVT VT) const {
508 return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand);
511 /// Return true if the specified operation is legal on this target.
512 bool isOperationLegal(unsigned Op, EVT VT) const {
513 return (VT == MVT::Other || isTypeLegal(VT)) &&
514 getOperationAction(Op, VT) == Legal;
517 /// Return how this load with extension should be treated: either it is legal,
518 /// needs to be promoted to a larger size, needs to be expanded to some other
519 /// code sequence, or the target has a custom expander for it.
520 LegalizeAction getLoadExtAction(unsigned ExtType, MVT VT) const {
521 assert(ExtType < ISD::LAST_LOADEXT_TYPE && VT < MVT::LAST_VALUETYPE &&
522 "Table isn't big enough!");
523 return (LegalizeAction)LoadExtActions[VT.SimpleTy][ExtType];
526 /// Return true if the specified load with extension is legal on this target.
527 bool isLoadExtLegal(unsigned ExtType, EVT VT) const {
528 return VT.isSimple() &&
529 getLoadExtAction(ExtType, VT.getSimpleVT()) == Legal;
532 /// Return how this store with truncation should be treated: either it is
533 /// legal, needs to be promoted to a larger size, needs to be expanded to some
534 /// other code sequence, or the target has a custom expander for it.
535 LegalizeAction getTruncStoreAction(MVT ValVT, MVT MemVT) const {
536 assert(ValVT < MVT::LAST_VALUETYPE && MemVT < MVT::LAST_VALUETYPE &&
537 "Table isn't big enough!");
538 return (LegalizeAction)TruncStoreActions[ValVT.SimpleTy]
542 /// Return true if the specified store with truncation is legal on this
544 bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const {
545 return isTypeLegal(ValVT) && MemVT.isSimple() &&
546 getTruncStoreAction(ValVT.getSimpleVT(), MemVT.getSimpleVT()) == Legal;
549 /// Return how the indexed load should be treated: either it is legal, needs
550 /// to be promoted to a larger size, needs to be expanded to some other code
551 /// sequence, or the target has a custom expander for it.
553 getIndexedLoadAction(unsigned IdxMode, MVT VT) const {
554 assert(IdxMode < ISD::LAST_INDEXED_MODE && VT < MVT::LAST_VALUETYPE &&
555 "Table isn't big enough!");
556 unsigned Ty = (unsigned)VT.SimpleTy;
557 return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] & 0xf0) >> 4);
560 /// Return true if the specified indexed load is legal on this target.
561 bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const {
562 return VT.isSimple() &&
563 (getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
564 getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
567 /// Return how the indexed store should be treated: either it is legal, needs
568 /// to be promoted to a larger size, needs to be expanded to some other code
569 /// sequence, or the target has a custom expander for it.
571 getIndexedStoreAction(unsigned IdxMode, MVT VT) const {
572 assert(IdxMode < ISD::LAST_INDEXED_MODE && VT < MVT::LAST_VALUETYPE &&
573 "Table isn't big enough!");
574 unsigned Ty = (unsigned)VT.SimpleTy;
575 return (LegalizeAction)(IndexedModeActions[Ty][IdxMode] & 0x0f);
578 /// Return true if the specified indexed load is legal on this target.
579 bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const {
580 return VT.isSimple() &&
581 (getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
582 getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
585 /// Return how the condition code should be treated: either it is legal, needs
586 /// to be expanded to some other code sequence, or the target has a custom
589 getCondCodeAction(ISD::CondCode CC, MVT VT) const {
590 assert((unsigned)CC < array_lengthof(CondCodeActions) &&
591 ((unsigned)VT.SimpleTy >> 4) < array_lengthof(CondCodeActions[0]) &&
592 "Table isn't big enough!");
593 // See setCondCodeAction for how this is encoded.
594 uint32_t Shift = 2 * (VT.SimpleTy & 0xF);
595 uint32_t Value = CondCodeActions[CC][VT.SimpleTy >> 4];
596 LegalizeAction Action = (LegalizeAction) ((Value >> Shift) & 0x3);
597 assert(Action != Promote && "Can't promote condition code!");
601 /// Return true if the specified condition code is legal on this target.
602 bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const {
604 getCondCodeAction(CC, VT) == Legal ||
605 getCondCodeAction(CC, VT) == Custom;
609 /// If the action for this operation is to promote, this method returns the
610 /// ValueType to promote to.
611 MVT getTypeToPromoteTo(unsigned Op, MVT VT) const {
612 assert(getOperationAction(Op, VT) == Promote &&
613 "This operation isn't promoted!");
615 // See if this has an explicit type specified.
616 std::map<std::pair<unsigned, MVT::SimpleValueType>,
617 MVT::SimpleValueType>::const_iterator PTTI =
618 PromoteToType.find(std::make_pair(Op, VT.SimpleTy));
619 if (PTTI != PromoteToType.end()) return PTTI->second;
621 assert((VT.isInteger() || VT.isFloatingPoint()) &&
622 "Cannot autopromote this type, add it with AddPromotedToType.");
626 NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1);
627 assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid &&
628 "Didn't find type to promote to!");
629 } while (!isTypeLegal(NVT) ||
630 getOperationAction(Op, NVT) == Promote);
634 /// Return the EVT corresponding to this LLVM type. This is fixed by the LLVM
635 /// operations except for the pointer size. If AllowUnknown is true, this
636 /// will return MVT::Other for types with no EVT counterpart (e.g. structs),
637 /// otherwise it will assert.
638 EVT getValueType(Type *Ty, bool AllowUnknown = false) const {
639 // Lower scalar pointers to native pointer types.
640 if (PointerType *PTy = dyn_cast<PointerType>(Ty))
641 return getPointerTy(PTy->getAddressSpace());
643 if (Ty->isVectorTy()) {
644 VectorType *VTy = cast<VectorType>(Ty);
645 Type *Elm = VTy->getElementType();
646 // Lower vectors of pointers to native pointer types.
647 if (PointerType *PT = dyn_cast<PointerType>(Elm)) {
648 EVT PointerTy(getPointerTy(PT->getAddressSpace()));
649 Elm = PointerTy.getTypeForEVT(Ty->getContext());
652 return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(Elm, false),
653 VTy->getNumElements());
655 return EVT::getEVT(Ty, AllowUnknown);
658 /// Return the MVT corresponding to this LLVM type. See getValueType.
659 MVT getSimpleValueType(Type *Ty, bool AllowUnknown = false) const {
660 return getValueType(Ty, AllowUnknown).getSimpleVT();
663 /// Return the desired alignment for ByVal or InAlloca aggregate function
664 /// arguments in the caller parameter area. This is the actual alignment, not
666 virtual unsigned getByValTypeAlignment(Type *Ty) const;
668 /// Return the type of registers that this ValueType will eventually require.
669 MVT getRegisterType(MVT VT) const {
670 assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT));
671 return RegisterTypeForVT[VT.SimpleTy];
674 /// Return the type of registers that this ValueType will eventually require.
675 MVT getRegisterType(LLVMContext &Context, EVT VT) const {
677 assert((unsigned)VT.getSimpleVT().SimpleTy <
678 array_lengthof(RegisterTypeForVT));
679 return RegisterTypeForVT[VT.getSimpleVT().SimpleTy];
684 unsigned NumIntermediates;
685 (void)getVectorTypeBreakdown(Context, VT, VT1,
686 NumIntermediates, RegisterVT);
689 if (VT.isInteger()) {
690 return getRegisterType(Context, getTypeToTransformTo(Context, VT));
692 llvm_unreachable("Unsupported extended type!");
695 /// Return the number of registers that this ValueType will eventually
698 /// This is one for any types promoted to live in larger registers, but may be
699 /// more than one for types (like i64) that are split into pieces. For types
700 /// like i140, which are first promoted then expanded, it is the number of
701 /// registers needed to hold all the bits of the original type. For an i140
702 /// on a 32 bit machine this means 5 registers.
703 unsigned getNumRegisters(LLVMContext &Context, EVT VT) const {
705 assert((unsigned)VT.getSimpleVT().SimpleTy <
706 array_lengthof(NumRegistersForVT));
707 return NumRegistersForVT[VT.getSimpleVT().SimpleTy];
712 unsigned NumIntermediates;
713 return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2);
715 if (VT.isInteger()) {
716 unsigned BitWidth = VT.getSizeInBits();
717 unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits();
718 return (BitWidth + RegWidth - 1) / RegWidth;
720 llvm_unreachable("Unsupported extended type!");
723 /// If true, then instruction selection should seek to shrink the FP constant
724 /// of the specified type to a smaller type in order to save space and / or
726 virtual bool ShouldShrinkFPConstant(EVT) const { return true; }
728 /// When splitting a value of the specified type into parts, does the Lo
729 /// or Hi part come first? This usually follows the endianness, except
730 /// for ppcf128, where the Hi part always comes first.
731 bool hasBigEndianPartOrdering(EVT VT) const {
732 return isBigEndian() || VT == MVT::ppcf128;
735 /// If true, the target has custom DAG combine transformations that it can
736 /// perform for the specified node.
737 bool hasTargetDAGCombine(ISD::NodeType NT) const {
738 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
739 return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7));
742 /// \brief Get maximum # of store operations permitted for llvm.memset
744 /// This function returns the maximum number of store operations permitted
745 /// to replace a call to llvm.memset. The value is set by the target at the
746 /// performance threshold for such a replacement. If OptSize is true,
747 /// return the limit for functions that have OptSize attribute.
748 unsigned getMaxStoresPerMemset(bool OptSize) const {
749 return OptSize ? MaxStoresPerMemsetOptSize : MaxStoresPerMemset;
752 /// \brief Get maximum # of store operations permitted for llvm.memcpy
754 /// This function returns the maximum number of store operations permitted
755 /// to replace a call to llvm.memcpy. The value is set by the target at the
756 /// performance threshold for such a replacement. If OptSize is true,
757 /// return the limit for functions that have OptSize attribute.
758 unsigned getMaxStoresPerMemcpy(bool OptSize) const {
759 return OptSize ? MaxStoresPerMemcpyOptSize : MaxStoresPerMemcpy;
762 /// \brief Get maximum # of store operations permitted for llvm.memmove
764 /// This function returns the maximum number of store operations permitted
765 /// to replace a call to llvm.memmove. The value is set by the target at the
766 /// performance threshold for such a replacement. If OptSize is true,
767 /// return the limit for functions that have OptSize attribute.
768 unsigned getMaxStoresPerMemmove(bool OptSize) const {
769 return OptSize ? MaxStoresPerMemmoveOptSize : MaxStoresPerMemmove;
772 /// \brief Determine if the target supports unaligned memory accesses.
774 /// This function returns true if the target allows unaligned memory accesses
775 /// of the specified type in the given address space. If true, it also returns
776 /// whether the unaligned memory access is "fast" in the third argument by
777 /// reference. This is used, for example, in situations where an array
778 /// copy/move/set is converted to a sequence of store operations. Its use
779 /// helps to ensure that such replacements don't generate code that causes an
780 /// alignment error (trap) on the target machine.
781 virtual bool allowsUnalignedMemoryAccesses(EVT,
782 unsigned AddrSpace = 0,
783 bool * /*Fast*/ = nullptr) const {
787 /// Returns the target specific optimal type for load and store operations as
788 /// a result of memset, memcpy, and memmove lowering.
790 /// If DstAlign is zero that means it's safe to destination alignment can
791 /// satisfy any constraint. Similarly if SrcAlign is zero it means there isn't
792 /// a need to check it against alignment requirement, probably because the
793 /// source does not need to be loaded. If 'IsMemset' is true, that means it's
794 /// expanding a memset. If 'ZeroMemset' is true, that means it's a memset of
795 /// zero. 'MemcpyStrSrc' indicates whether the memcpy source is constant so it
796 /// does not need to be loaded. It returns EVT::Other if the type should be
797 /// determined using generic target-independent logic.
798 virtual EVT getOptimalMemOpType(uint64_t /*Size*/,
799 unsigned /*DstAlign*/, unsigned /*SrcAlign*/,
802 bool /*MemcpyStrSrc*/,
803 MachineFunction &/*MF*/) const {
807 /// Returns true if it's safe to use load / store of the specified type to
808 /// expand memcpy / memset inline.
810 /// This is mostly true for all types except for some special cases. For
811 /// example, on X86 targets without SSE2 f64 load / store are done with fldl /
812 /// fstpl which also does type conversion. Note the specified type doesn't
813 /// have to be legal as the hook is used before type legalization.
814 virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; }
816 /// Determine if we should use _setjmp or setjmp to implement llvm.setjmp.
817 bool usesUnderscoreSetJmp() const {
818 return UseUnderscoreSetJmp;
821 /// Determine if we should use _longjmp or longjmp to implement llvm.longjmp.
822 bool usesUnderscoreLongJmp() const {
823 return UseUnderscoreLongJmp;
826 /// Return whether the target can generate code for jump tables.
827 bool supportJumpTables() const {
828 return SupportJumpTables;
831 /// Return integer threshold on number of blocks to use jump tables rather
832 /// than if sequence.
833 int getMinimumJumpTableEntries() const {
834 return MinimumJumpTableEntries;
837 /// If a physical register, this specifies the register that
838 /// llvm.savestack/llvm.restorestack should save and restore.
839 unsigned getStackPointerRegisterToSaveRestore() const {
840 return StackPointerRegisterToSaveRestore;
843 /// If a physical register, this returns the register that receives the
844 /// exception address on entry to a landing pad.
845 unsigned getExceptionPointerRegister() const {
846 return ExceptionPointerRegister;
849 /// If a physical register, this returns the register that receives the
850 /// exception typeid on entry to a landing pad.
851 unsigned getExceptionSelectorRegister() const {
852 return ExceptionSelectorRegister;
855 /// Returns the target's jmp_buf size in bytes (if never set, the default is
857 unsigned getJumpBufSize() const {
861 /// Returns the target's jmp_buf alignment in bytes (if never set, the default
863 unsigned getJumpBufAlignment() const {
864 return JumpBufAlignment;
867 /// Return the minimum stack alignment of an argument.
868 unsigned getMinStackArgumentAlignment() const {
869 return MinStackArgumentAlignment;
872 /// Return the minimum function alignment.
873 unsigned getMinFunctionAlignment() const {
874 return MinFunctionAlignment;
877 /// Return the preferred function alignment.
878 unsigned getPrefFunctionAlignment() const {
879 return PrefFunctionAlignment;
882 /// Return the preferred loop alignment.
883 unsigned getPrefLoopAlignment() const {
884 return PrefLoopAlignment;
887 /// Return whether the DAG builder should automatically insert fences and
888 /// reduce ordering for atomics.
889 bool getInsertFencesForAtomic() const {
890 return InsertFencesForAtomic;
893 /// Return true if the target stores stack protector cookies at a fixed offset
894 /// in some non-standard address space, and populates the address space and
895 /// offset as appropriate.
896 virtual bool getStackCookieLocation(unsigned &/*AddressSpace*/,
897 unsigned &/*Offset*/) const {
901 /// Returns the maximal possible offset which can be used for loads / stores
903 virtual unsigned getMaximalGlobalOffset() const {
907 /// Returns true if a cast between SrcAS and DestAS is a noop.
908 virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
912 //===--------------------------------------------------------------------===//
913 /// \name Helpers for TargetTransformInfo implementations
916 /// Get the ISD node that corresponds to the Instruction class opcode.
917 int InstructionOpcodeToISD(unsigned Opcode) const;
919 /// Estimate the cost of type-legalization and the legalized type.
920 std::pair<unsigned, MVT> getTypeLegalizationCost(Type *Ty) const;
924 //===--------------------------------------------------------------------===//
925 /// \name Helpers for load-linked/store-conditional atomic expansion.
928 /// Perform a load-linked operation on Addr, returning a "Value *" with the
929 /// corresponding pointee type. This may entail some non-trivial operations to
930 /// truncate or reconstruct types that will be illegal in the backend. See
931 /// ARMISelLowering for an example implementation.
932 virtual Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
933 AtomicOrdering Ord) const {
934 llvm_unreachable("Load linked unimplemented on this target");
937 /// Perform a store-conditional operation to Addr. Return the status of the
938 /// store. This should be 0 if the store succeeded, non-zero otherwise.
939 virtual Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
940 Value *Addr, AtomicOrdering Ord) const {
941 llvm_unreachable("Store conditional unimplemented on this target");
944 /// Return true if the given (atomic) instruction should be expanded by the
945 /// IR-level AtomicExpandLoadLinked pass into a loop involving
946 /// load-linked/store-conditional pairs. Atomic stores will be expanded in the
947 /// same way as "atomic xchg" operations which ignore their output if needed.
948 virtual bool shouldExpandAtomicInIR(Instruction *Inst) const {
953 //===--------------------------------------------------------------------===//
954 // TargetLowering Configuration Methods - These methods should be invoked by
955 // the derived class constructor to configure this object for the target.
958 /// \brief Reset the operation actions based on target options.
959 virtual void resetOperationActions() {}
962 /// Specify how the target extends the result of integer and floating point
963 /// boolean values from i1 to a wider type. See getBooleanContents.
964 void setBooleanContents(BooleanContent Ty) {
965 BooleanContents = Ty;
966 BooleanFloatContents = Ty;
969 /// Specify how the target extends the result of integer and floating point
970 /// boolean values from i1 to a wider type. See getBooleanContents.
971 void setBooleanContents(BooleanContent IntTy, BooleanContent FloatTy) {
972 BooleanContents = IntTy;
973 BooleanFloatContents = FloatTy;
976 /// Specify how the target extends the result of a vector boolean value from a
977 /// vector of i1 to a wider type. See getBooleanContents.
978 void setBooleanVectorContents(BooleanContent Ty) {
979 BooleanVectorContents = Ty;
982 /// Specify the target scheduling preference.
983 void setSchedulingPreference(Sched::Preference Pref) {
984 SchedPreferenceInfo = Pref;
987 /// Indicate whether this target prefers to use _setjmp to implement
988 /// llvm.setjmp or the version without _. Defaults to false.
989 void setUseUnderscoreSetJmp(bool Val) {
990 UseUnderscoreSetJmp = Val;
993 /// Indicate whether this target prefers to use _longjmp to implement
994 /// llvm.longjmp or the version without _. Defaults to false.
995 void setUseUnderscoreLongJmp(bool Val) {
996 UseUnderscoreLongJmp = Val;
999 /// Indicate whether the target can generate code for jump tables.
1000 void setSupportJumpTables(bool Val) {
1001 SupportJumpTables = Val;
1004 /// Indicate the number of blocks to generate jump tables rather than if
1006 void setMinimumJumpTableEntries(int Val) {
1007 MinimumJumpTableEntries = Val;
1010 /// If set to a physical register, this specifies the register that
1011 /// llvm.savestack/llvm.restorestack should save and restore.
1012 void setStackPointerRegisterToSaveRestore(unsigned R) {
1013 StackPointerRegisterToSaveRestore = R;
1016 /// If set to a physical register, this sets the register that receives the
1017 /// exception address on entry to a landing pad.
1018 void setExceptionPointerRegister(unsigned R) {
1019 ExceptionPointerRegister = R;
1022 /// If set to a physical register, this sets the register that receives the
1023 /// exception typeid on entry to a landing pad.
1024 void setExceptionSelectorRegister(unsigned R) {
1025 ExceptionSelectorRegister = R;
1028 /// Tells the code generator not to expand operations into sequences that use
1029 /// the select operations if possible.
1030 void setSelectIsExpensive(bool isExpensive = true) {
1031 SelectIsExpensive = isExpensive;
1034 /// Tells the code generator that the target has multiple (allocatable)
1035 /// condition registers that can be used to store the results of comparisons
1036 /// for use by selects and conditional branches. With multiple condition
1037 /// registers, the code generator will not aggressively sink comparisons into
1038 /// the blocks of their users.
1039 void setHasMultipleConditionRegisters(bool hasManyRegs = true) {
1040 HasMultipleConditionRegisters = hasManyRegs;
1043 /// Tells the code generator that the target has BitExtract instructions.
1044 /// The code generator will aggressively sink "shift"s into the blocks of
1045 /// their users if the users will generate "and" instructions which can be
1046 /// combined with "shift" to BitExtract instructions.
1047 void setHasExtractBitsInsn(bool hasExtractInsn = true) {
1048 HasExtractBitsInsn = hasExtractInsn;
1051 /// Tells the code generator not to expand sequence of operations into a
1052 /// separate sequences that increases the amount of flow control.
1053 void setJumpIsExpensive(bool isExpensive = true) {
1054 JumpIsExpensive = isExpensive;
1057 /// Tells the code generator that integer divide is expensive, and if
1058 /// possible, should be replaced by an alternate sequence of instructions not
1059 /// containing an integer divide.
1060 void setIntDivIsCheap(bool isCheap = true) { IntDivIsCheap = isCheap; }
1062 /// Tells the code generator which bitwidths to bypass.
1063 void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) {
1064 BypassSlowDivWidths[SlowBitWidth] = FastBitWidth;
1067 /// Tells the code generator that it shouldn't generate srl/add/sra for a
1068 /// signed divide by power of two, and let the target handle it.
1069 void setPow2DivIsCheap(bool isCheap = true) { Pow2DivIsCheap = isCheap; }
1071 /// Add the specified register class as an available regclass for the
1072 /// specified value type. This indicates the selector can handle values of
1073 /// that class natively.
1074 void addRegisterClass(MVT VT, const TargetRegisterClass *RC) {
1075 assert((unsigned)VT.SimpleTy < array_lengthof(RegClassForVT));
1076 AvailableRegClasses.push_back(std::make_pair(VT, RC));
1077 RegClassForVT[VT.SimpleTy] = RC;
1080 /// Remove all register classes.
1081 void clearRegisterClasses() {
1082 memset(RegClassForVT, 0,MVT::LAST_VALUETYPE * sizeof(TargetRegisterClass*));
1084 AvailableRegClasses.clear();
1087 /// \brief Remove all operation actions.
1088 void clearOperationActions() {
1091 /// Return the largest legal super-reg register class of the register class
1092 /// for the specified type and its associated "cost".
1093 virtual std::pair<const TargetRegisterClass*, uint8_t>
1094 findRepresentativeClass(MVT VT) const;
1096 /// Once all of the register classes are added, this allows us to compute
1097 /// derived properties we expose.
1098 void computeRegisterProperties();
1100 /// Indicate that the specified operation does not work with the specified
1101 /// type and indicate what to do about it.
1102 void setOperationAction(unsigned Op, MVT VT,
1103 LegalizeAction Action) {
1104 assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!");
1105 OpActions[(unsigned)VT.SimpleTy][Op] = (uint8_t)Action;
1108 /// Indicate that the specified load with extension does not work with the
1109 /// specified type and indicate what to do about it.
1110 void setLoadExtAction(unsigned ExtType, MVT VT,
1111 LegalizeAction Action) {
1112 assert(ExtType < ISD::LAST_LOADEXT_TYPE && VT < MVT::LAST_VALUETYPE &&
1113 "Table isn't big enough!");
1114 LoadExtActions[VT.SimpleTy][ExtType] = (uint8_t)Action;
1117 /// Indicate that the specified truncating store does not work with the
1118 /// specified type and indicate what to do about it.
1119 void setTruncStoreAction(MVT ValVT, MVT MemVT,
1120 LegalizeAction Action) {
1121 assert(ValVT < MVT::LAST_VALUETYPE && MemVT < MVT::LAST_VALUETYPE &&
1122 "Table isn't big enough!");
1123 TruncStoreActions[ValVT.SimpleTy][MemVT.SimpleTy] = (uint8_t)Action;
1126 /// Indicate that the specified indexed load does or does not work with the
1127 /// specified type and indicate what to do abort it.
1129 /// NOTE: All indexed mode loads are initialized to Expand in
1130 /// TargetLowering.cpp
1131 void setIndexedLoadAction(unsigned IdxMode, MVT VT,
1132 LegalizeAction Action) {
1133 assert(VT < MVT::LAST_VALUETYPE && IdxMode < ISD::LAST_INDEXED_MODE &&
1134 (unsigned)Action < 0xf && "Table isn't big enough!");
1135 // Load action are kept in the upper half.
1136 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0xf0;
1137 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action) <<4;
1140 /// Indicate that the specified indexed store does or does not work with the
1141 /// specified type and indicate what to do about it.
1143 /// NOTE: All indexed mode stores are initialized to Expand in
1144 /// TargetLowering.cpp
1145 void setIndexedStoreAction(unsigned IdxMode, MVT VT,
1146 LegalizeAction Action) {
1147 assert(VT < MVT::LAST_VALUETYPE && IdxMode < ISD::LAST_INDEXED_MODE &&
1148 (unsigned)Action < 0xf && "Table isn't big enough!");
1149 // Store action are kept in the lower half.
1150 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0x0f;
1151 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action);
1154 /// Indicate that the specified condition code is or isn't supported on the
1155 /// target and indicate what to do about it.
1156 void setCondCodeAction(ISD::CondCode CC, MVT VT,
1157 LegalizeAction Action) {
1158 assert(VT < MVT::LAST_VALUETYPE &&
1159 (unsigned)CC < array_lengthof(CondCodeActions) &&
1160 "Table isn't big enough!");
1161 /// The lower 5 bits of the SimpleTy index into Nth 2bit set from the 32-bit
1162 /// value and the upper 27 bits index into the second dimension of the array
1163 /// to select what 32-bit value to use.
1164 uint32_t Shift = 2 * (VT.SimpleTy & 0xF);
1165 CondCodeActions[CC][VT.SimpleTy >> 4] &= ~((uint32_t)0x3 << Shift);
1166 CondCodeActions[CC][VT.SimpleTy >> 4] |= (uint32_t)Action << Shift;
1169 /// If Opc/OrigVT is specified as being promoted, the promotion code defaults
1170 /// to trying a larger integer/fp until it can find one that works. If that
1171 /// default is insufficient, this method can be used by the target to override
1173 void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
1174 PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy;
1177 /// Targets should invoke this method for each target independent node that
1178 /// they want to provide a custom DAG combiner for by implementing the
1179 /// PerformDAGCombine virtual method.
1180 void setTargetDAGCombine(ISD::NodeType NT) {
1181 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
1182 TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7);
1185 /// Set the target's required jmp_buf buffer size (in bytes); default is 200
1186 void setJumpBufSize(unsigned Size) {
1190 /// Set the target's required jmp_buf buffer alignment (in bytes); default is
1192 void setJumpBufAlignment(unsigned Align) {
1193 JumpBufAlignment = Align;
1196 /// Set the target's minimum function alignment (in log2(bytes))
1197 void setMinFunctionAlignment(unsigned Align) {
1198 MinFunctionAlignment = Align;
1201 /// Set the target's preferred function alignment. This should be set if
1202 /// there is a performance benefit to higher-than-minimum alignment (in
1204 void setPrefFunctionAlignment(unsigned Align) {
1205 PrefFunctionAlignment = Align;
1208 /// Set the target's preferred loop alignment. Default alignment is zero, it
1209 /// means the target does not care about loop alignment. The alignment is
1210 /// specified in log2(bytes).
1211 void setPrefLoopAlignment(unsigned Align) {
1212 PrefLoopAlignment = Align;
1215 /// Set the minimum stack alignment of an argument (in log2(bytes)).
1216 void setMinStackArgumentAlignment(unsigned Align) {
1217 MinStackArgumentAlignment = Align;
1220 /// Set if the DAG builder should automatically insert fences and reduce the
1221 /// order of atomic memory operations to Monotonic.
1222 void setInsertFencesForAtomic(bool fence) {
1223 InsertFencesForAtomic = fence;
1227 //===--------------------------------------------------------------------===//
1228 // Addressing mode description hooks (used by LSR etc).
1231 /// CodeGenPrepare sinks address calculations into the same BB as Load/Store
1232 /// instructions reading the address. This allows as much computation as
1233 /// possible to be done in the address mode for that operand. This hook lets
1234 /// targets also pass back when this should be done on intrinsics which
1236 virtual bool GetAddrModeArguments(IntrinsicInst * /*I*/,
1237 SmallVectorImpl<Value*> &/*Ops*/,
1238 Type *&/*AccessTy*/) const {
1242 /// This represents an addressing mode of:
1243 /// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
1244 /// If BaseGV is null, there is no BaseGV.
1245 /// If BaseOffs is zero, there is no base offset.
1246 /// If HasBaseReg is false, there is no base register.
1247 /// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with
1250 GlobalValue *BaseGV;
1254 AddrMode() : BaseGV(nullptr), BaseOffs(0), HasBaseReg(false), Scale(0) {}
1257 /// Return true if the addressing mode represented by AM is legal for this
1258 /// target, for a load/store of the specified type.
1260 /// The type may be VoidTy, in which case only return true if the addressing
1261 /// mode is legal for a load/store of any legal type. TODO: Handle
1262 /// pre/postinc as well.
1263 virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const;
1265 /// \brief Return the cost of the scaling factor used in the addressing mode
1266 /// represented by AM for this target, for a load/store of the specified type.
1268 /// If the AM is supported, the return value must be >= 0.
1269 /// If the AM is not supported, it returns a negative value.
1270 /// TODO: Handle pre/postinc as well.
1271 virtual int getScalingFactorCost(const AddrMode &AM, Type *Ty) const {
1272 // Default: assume that any scaling factor used in a legal AM is free.
1273 if (isLegalAddressingMode(AM, Ty)) return 0;
1277 /// Return true if the specified immediate is legal icmp immediate, that is
1278 /// the target has icmp instructions which can compare a register against the
1279 /// immediate without having to materialize the immediate into a register.
1280 virtual bool isLegalICmpImmediate(int64_t) const {
1284 /// Return true if the specified immediate is legal add immediate, that is the
1285 /// target has add instructions which can add a register with the immediate
1286 /// without having to materialize the immediate into a register.
1287 virtual bool isLegalAddImmediate(int64_t) const {
1291 /// Return true if it's significantly cheaper to shift a vector by a uniform
1292 /// scalar than by an amount which will vary across each lane. On x86, for
1293 /// example, there is a "psllw" instruction for the former case, but no simple
1294 /// instruction for a general "a << b" operation on vectors.
1295 virtual bool isVectorShiftByScalarCheap(Type *Ty) const {
1299 /// Return true if it's free to truncate a value of type Ty1 to type
1300 /// Ty2. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
1301 /// by referencing its sub-register AX.
1302 virtual bool isTruncateFree(Type * /*Ty1*/, Type * /*Ty2*/) const {
1306 /// Return true if a truncation from Ty1 to Ty2 is permitted when deciding
1307 /// whether a call is in tail position. Typically this means that both results
1308 /// would be assigned to the same register or stack slot, but it could mean
1309 /// the target performs adequate checks of its own before proceeding with the
1311 virtual bool allowTruncateForTailCall(Type * /*Ty1*/, Type * /*Ty2*/) const {
1315 virtual bool isTruncateFree(EVT /*VT1*/, EVT /*VT2*/) const {
1319 /// Return true if any actual instruction that defines a value of type Ty1
1320 /// implicitly zero-extends the value to Ty2 in the result register.
1322 /// This does not necessarily include registers defined in unknown ways, such
1323 /// as incoming arguments, or copies from unknown virtual registers. Also, if
1324 /// isTruncateFree(Ty2, Ty1) is true, this does not necessarily apply to
1325 /// truncate instructions. e.g. on x86-64, all instructions that define 32-bit
1326 /// values implicit zero-extend the result out to 64 bits.
1327 virtual bool isZExtFree(Type * /*Ty1*/, Type * /*Ty2*/) const {
1331 virtual bool isZExtFree(EVT /*VT1*/, EVT /*VT2*/) const {
1335 /// Return true if the target supplies and combines to a paired load
1336 /// two loaded values of type LoadedType next to each other in memory.
1337 /// RequiredAlignment gives the minimal alignment constraints that must be met
1338 /// to be able to select this paired load.
1340 /// This information is *not* used to generate actual paired loads, but it is
1341 /// used to generate a sequence of loads that is easier to combine into a
1343 /// For instance, something like this:
1344 /// a = load i64* addr
1345 /// b = trunc i64 a to i32
1346 /// c = lshr i64 a, 32
1347 /// d = trunc i64 c to i32
1348 /// will be optimized into:
1349 /// b = load i32* addr1
1350 /// d = load i32* addr2
1351 /// Where addr1 = addr2 +/- sizeof(i32).
1353 /// In other words, unless the target performs a post-isel load combining,
1354 /// this information should not be provided because it will generate more
1356 virtual bool hasPairedLoad(Type * /*LoadedType*/,
1357 unsigned & /*RequiredAligment*/) const {
1361 virtual bool hasPairedLoad(EVT /*LoadedType*/,
1362 unsigned & /*RequiredAligment*/) const {
1366 /// Return true if zero-extending the specific node Val to type VT2 is free
1367 /// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or
1368 /// because it's folded such as X86 zero-extending loads).
1369 virtual bool isZExtFree(SDValue Val, EVT VT2) const {
1370 return isZExtFree(Val.getValueType(), VT2);
1373 /// Return true if an fneg operation is free to the point where it is never
1374 /// worthwhile to replace it with a bitwise operation.
1375 virtual bool isFNegFree(EVT VT) const {
1376 assert(VT.isFloatingPoint());
1380 /// Return true if an fabs operation is free to the point where it is never
1381 /// worthwhile to replace it with a bitwise operation.
1382 virtual bool isFAbsFree(EVT VT) const {
1383 assert(VT.isFloatingPoint());
1387 /// Return true if an FMA operation is faster than a pair of fmul and fadd
1388 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
1389 /// returns true, otherwise fmuladd is expanded to fmul + fadd.
1391 /// NOTE: This may be called before legalization on types for which FMAs are
1392 /// not legal, but should return true if those types will eventually legalize
1393 /// to types that support FMAs. After legalization, it will only be called on
1394 /// types that support FMAs (via Legal or Custom actions)
1395 virtual bool isFMAFasterThanFMulAndFAdd(EVT) const {
1399 /// Return true if it's profitable to narrow operations of type VT1 to
1400 /// VT2. e.g. on x86, it's profitable to narrow from i32 to i8 but not from
1402 virtual bool isNarrowingProfitable(EVT /*VT1*/, EVT /*VT2*/) const {
1406 /// \brief Return true if it is beneficial to convert a load of a constant to
1407 /// just the constant itself.
1408 /// On some targets it might be more efficient to use a combination of
1409 /// arithmetic instructions to materialize the constant instead of loading it
1410 /// from a constant pool.
1411 virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
1415 //===--------------------------------------------------------------------===//
1416 // Runtime Library hooks
1419 /// Rename the default libcall routine name for the specified libcall.
1420 void setLibcallName(RTLIB::Libcall Call, const char *Name) {
1421 LibcallRoutineNames[Call] = Name;
1424 /// Get the libcall routine name for the specified libcall.
1425 const char *getLibcallName(RTLIB::Libcall Call) const {
1426 return LibcallRoutineNames[Call];
1429 /// Override the default CondCode to be used to test the result of the
1430 /// comparison libcall against zero.
1431 void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) {
1432 CmpLibcallCCs[Call] = CC;
1435 /// Get the CondCode that's to be used to test the result of the comparison
1436 /// libcall against zero.
1437 ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const {
1438 return CmpLibcallCCs[Call];
1441 /// Set the CallingConv that should be used for the specified libcall.
1442 void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) {
1443 LibcallCallingConvs[Call] = CC;
1446 /// Get the CallingConv that should be used for the specified libcall.
1447 CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const {
1448 return LibcallCallingConvs[Call];
1452 const TargetMachine &TM;
1453 const DataLayout *DL;
1454 const TargetLoweringObjectFile &TLOF;
1456 /// True if this is a little endian target.
1457 bool IsLittleEndian;
1459 /// Tells the code generator not to expand operations into sequences that use
1460 /// the select operations if possible.
1461 bool SelectIsExpensive;
1463 /// Tells the code generator that the target has multiple (allocatable)
1464 /// condition registers that can be used to store the results of comparisons
1465 /// for use by selects and conditional branches. With multiple condition
1466 /// registers, the code generator will not aggressively sink comparisons into
1467 /// the blocks of their users.
1468 bool HasMultipleConditionRegisters;
1470 /// Tells the code generator that the target has BitExtract instructions.
1471 /// The code generator will aggressively sink "shift"s into the blocks of
1472 /// their users if the users will generate "and" instructions which can be
1473 /// combined with "shift" to BitExtract instructions.
1474 bool HasExtractBitsInsn;
1476 /// Tells the code generator not to expand integer divides by constants into a
1477 /// sequence of muls, adds, and shifts. This is a hack until a real cost
1478 /// model is in place. If we ever optimize for size, this will be set to true
1479 /// unconditionally.
1482 /// Tells the code generator to bypass slow divide or remainder
1483 /// instructions. For example, BypassSlowDivWidths[32,8] tells the code
1484 /// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer
1485 /// div/rem when the operands are positive and less than 256.
1486 DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;
1488 /// Tells the code generator that it shouldn't generate srl/add/sra for a
1489 /// signed divide by power of two, and let the target handle it.
1490 bool Pow2DivIsCheap;
1492 /// Tells the code generator that it shouldn't generate extra flow control
1493 /// instructions and should attempt to combine flow control instructions via
1495 bool JumpIsExpensive;
1497 /// This target prefers to use _setjmp to implement llvm.setjmp.
1499 /// Defaults to false.
1500 bool UseUnderscoreSetJmp;
1502 /// This target prefers to use _longjmp to implement llvm.longjmp.
1504 /// Defaults to false.
1505 bool UseUnderscoreLongJmp;
1507 /// Whether the target can generate code for jumptables. If it's not true,
1508 /// then each jumptable must be lowered into if-then-else's.
1509 bool SupportJumpTables;
1511 /// Number of blocks threshold to use jump tables.
1512 int MinimumJumpTableEntries;
1514 /// Information about the contents of the high-bits in boolean values held in
1515 /// a type wider than i1. See getBooleanContents.
1516 BooleanContent BooleanContents;
1518 /// Information about the contents of the high-bits in boolean values held in
1519 /// a type wider than i1. See getBooleanContents.
1520 BooleanContent BooleanFloatContents;
1522 /// Information about the contents of the high-bits in boolean vector values
1523 /// when the element type is wider than i1. See getBooleanContents.
1524 BooleanContent BooleanVectorContents;
1526 /// The target scheduling preference: shortest possible total cycles or lowest
1528 Sched::Preference SchedPreferenceInfo;
1530 /// The size, in bytes, of the target's jmp_buf buffers
1531 unsigned JumpBufSize;
1533 /// The alignment, in bytes, of the target's jmp_buf buffers
1534 unsigned JumpBufAlignment;
1536 /// The minimum alignment that any argument on the stack needs to have.
1537 unsigned MinStackArgumentAlignment;
1539 /// The minimum function alignment (used when optimizing for size, and to
1540 /// prevent explicitly provided alignment from leading to incorrect code).
1541 unsigned MinFunctionAlignment;
1543 /// The preferred function alignment (used when alignment unspecified and
1544 /// optimizing for speed).
1545 unsigned PrefFunctionAlignment;
1547 /// The preferred loop alignment.
1548 unsigned PrefLoopAlignment;
1550 /// Whether the DAG builder should automatically insert fences and reduce
1551 /// ordering for atomics. (This will be set for for most architectures with
1552 /// weak memory ordering.)
1553 bool InsertFencesForAtomic;
1555 /// If set to a physical register, this specifies the register that
1556 /// llvm.savestack/llvm.restorestack should save and restore.
1557 unsigned StackPointerRegisterToSaveRestore;
1559 /// If set to a physical register, this specifies the register that receives
1560 /// the exception address on entry to a landing pad.
1561 unsigned ExceptionPointerRegister;
1563 /// If set to a physical register, this specifies the register that receives
1564 /// the exception typeid on entry to a landing pad.
1565 unsigned ExceptionSelectorRegister;
1567 /// This indicates the default register class to use for each ValueType the
1568 /// target supports natively.
1569 const TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE];
1570 unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE];
1571 MVT RegisterTypeForVT[MVT::LAST_VALUETYPE];
1573 /// This indicates the "representative" register class to use for each
1574 /// ValueType the target supports natively. This information is used by the
1575 /// scheduler to track register pressure. By default, the representative
1576 /// register class is the largest legal super-reg register class of the
1577 /// register class of the specified type. e.g. On x86, i8, i16, and i32's
1578 /// representative class would be GR32.
1579 const TargetRegisterClass *RepRegClassForVT[MVT::LAST_VALUETYPE];
1581 /// This indicates the "cost" of the "representative" register class for each
1582 /// ValueType. The cost is used by the scheduler to approximate register
1584 uint8_t RepRegClassCostForVT[MVT::LAST_VALUETYPE];
1586 /// For any value types we are promoting or expanding, this contains the value
1587 /// type that we are changing to. For Expanded types, this contains one step
1588 /// of the expand (e.g. i64 -> i32), even if there are multiple steps required
1589 /// (e.g. i64 -> i16). For types natively supported by the system, this holds
1590 /// the same type (e.g. i32 -> i32).
1591 MVT TransformToType[MVT::LAST_VALUETYPE];
1593 /// For each operation and each value type, keep a LegalizeAction that
1594 /// indicates how instruction selection should deal with the operation. Most
1595 /// operations are Legal (aka, supported natively by the target), but
1596 /// operations that are not should be described. Note that operations on
1597 /// non-legal value types are not described here.
1598 uint8_t OpActions[MVT::LAST_VALUETYPE][ISD::BUILTIN_OP_END];
1600 /// For each load extension type and each value type, keep a LegalizeAction
1601 /// that indicates how instruction selection should deal with a load of a
1602 /// specific value type and extension type.
1603 uint8_t LoadExtActions[MVT::LAST_VALUETYPE][ISD::LAST_LOADEXT_TYPE];
1605 /// For each value type pair keep a LegalizeAction that indicates whether a
1606 /// truncating store of a specific value type and truncating type is legal.
1607 uint8_t TruncStoreActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE];
1609 /// For each indexed mode and each value type, keep a pair of LegalizeAction
1610 /// that indicates how instruction selection should deal with the load /
1613 /// The first dimension is the value_type for the reference. The second
1614 /// dimension represents the various modes for load store.
1615 uint8_t IndexedModeActions[MVT::LAST_VALUETYPE][ISD::LAST_INDEXED_MODE];
1617 /// For each condition code (ISD::CondCode) keep a LegalizeAction that
1618 /// indicates how instruction selection should deal with the condition code.
1620 /// Because each CC action takes up 2 bits, we need to have the array size be
1621 /// large enough to fit all of the value types. This can be done by rounding
1622 /// up the MVT::LAST_VALUETYPE value to the next multiple of 16.
1623 uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::LAST_VALUETYPE + 15) / 16];
1625 ValueTypeActionImpl ValueTypeActions;
1629 getTypeConversion(LLVMContext &Context, EVT VT) const {
1630 // If this is a simple type, use the ComputeRegisterProp mechanism.
1631 if (VT.isSimple()) {
1632 MVT SVT = VT.getSimpleVT();
1633 assert((unsigned)SVT.SimpleTy < array_lengthof(TransformToType));
1634 MVT NVT = TransformToType[SVT.SimpleTy];
1635 LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT);
1639 ValueTypeActions.getTypeAction(NVT) != TypePromoteInteger)
1640 && "Promote may not follow Expand or Promote");
1642 if (LA == TypeSplitVector)
1643 return LegalizeKind(LA, EVT::getVectorVT(Context,
1644 SVT.getVectorElementType(),
1645 SVT.getVectorNumElements()/2));
1646 if (LA == TypeScalarizeVector)
1647 return LegalizeKind(LA, SVT.getVectorElementType());
1648 return LegalizeKind(LA, NVT);
1651 // Handle Extended Scalar Types.
1652 if (!VT.isVector()) {
1653 assert(VT.isInteger() && "Float types must be simple");
1654 unsigned BitSize = VT.getSizeInBits();
1655 // First promote to a power-of-two size, then expand if necessary.
1656 if (BitSize < 8 || !isPowerOf2_32(BitSize)) {
1657 EVT NVT = VT.getRoundIntegerType(Context);
1658 assert(NVT != VT && "Unable to round integer VT");
1659 LegalizeKind NextStep = getTypeConversion(Context, NVT);
1660 // Avoid multi-step promotion.
1661 if (NextStep.first == TypePromoteInteger) return NextStep;
1662 // Return rounded integer type.
1663 return LegalizeKind(TypePromoteInteger, NVT);
1666 return LegalizeKind(TypeExpandInteger,
1667 EVT::getIntegerVT(Context, VT.getSizeInBits()/2));
1670 // Handle vector types.
1671 unsigned NumElts = VT.getVectorNumElements();
1672 EVT EltVT = VT.getVectorElementType();
1674 // Vectors with only one element are always scalarized.
1676 return LegalizeKind(TypeScalarizeVector, EltVT);
1678 // Try to widen vector elements until the element type is a power of two and
1679 // promote it to a legal type later on, for example:
1680 // <3 x i8> -> <4 x i8> -> <4 x i32>
1681 if (EltVT.isInteger()) {
1682 // Vectors with a number of elements that is not a power of two are always
1683 // widened, for example <3 x i8> -> <4 x i8>.
1684 if (!VT.isPow2VectorType()) {
1685 NumElts = (unsigned)NextPowerOf2(NumElts);
1686 EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts);
1687 return LegalizeKind(TypeWidenVector, NVT);
1690 // Examine the element type.
1691 LegalizeKind LK = getTypeConversion(Context, EltVT);
1693 // If type is to be expanded, split the vector.
1694 // <4 x i140> -> <2 x i140>
1695 if (LK.first == TypeExpandInteger)
1696 return LegalizeKind(TypeSplitVector,
1697 EVT::getVectorVT(Context, EltVT, NumElts / 2));
1699 // Promote the integer element types until a legal vector type is found
1700 // or until the element integer type is too big. If a legal type was not
1701 // found, fallback to the usual mechanism of widening/splitting the
1703 EVT OldEltVT = EltVT;
1705 // Increase the bitwidth of the element to the next pow-of-two
1706 // (which is greater than 8 bits).
1707 EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits()
1708 ).getRoundIntegerType(Context);
1710 // Stop trying when getting a non-simple element type.
1711 // Note that vector elements may be greater than legal vector element
1712 // types. Example: X86 XMM registers hold 64bit element on 32bit
1714 if (!EltVT.isSimple()) break;
1716 // Build a new vector type and check if it is legal.
1717 MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
1718 // Found a legal promoted vector type.
1719 if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal)
1720 return LegalizeKind(TypePromoteInteger,
1721 EVT::getVectorVT(Context, EltVT, NumElts));
1724 // Reset the type to the unexpanded type if we did not find a legal vector
1725 // type with a promoted vector element type.
1729 // Try to widen the vector until a legal type is found.
1730 // If there is no wider legal type, split the vector.
1732 // Round up to the next power of 2.
1733 NumElts = (unsigned)NextPowerOf2(NumElts);
1735 // If there is no simple vector type with this many elements then there
1736 // cannot be a larger legal vector type. Note that this assumes that
1737 // there are no skipped intermediate vector types in the simple types.
1738 if (!EltVT.isSimple()) break;
1739 MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
1740 if (LargerVector == MVT()) break;
1742 // If this type is legal then widen the vector.
1743 if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal)
1744 return LegalizeKind(TypeWidenVector, LargerVector);
1747 // Widen odd vectors to next power of two.
1748 if (!VT.isPow2VectorType()) {
1749 EVT NVT = VT.getPow2VectorType(Context);
1750 return LegalizeKind(TypeWidenVector, NVT);
1753 // Vectors with illegal element types are expanded.
1754 EVT NVT = EVT::getVectorVT(Context, EltVT, VT.getVectorNumElements() / 2);
1755 return LegalizeKind(TypeSplitVector, NVT);
1759 std::vector<std::pair<MVT, const TargetRegisterClass*> > AvailableRegClasses;
1761 /// Targets can specify ISD nodes that they would like PerformDAGCombine
1762 /// callbacks for by calling setTargetDAGCombine(), which sets a bit in this
1765 TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT];
1767 /// For operations that must be promoted to a specific type, this holds the
1768 /// destination type. This map should be sparse, so don't hold it as an
1771 /// Targets add entries to this map with AddPromotedToType(..), clients access
1772 /// this with getTypeToPromoteTo(..).
1773 std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType>
1776 /// Stores the name each libcall.
1777 const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL];
1779 /// The ISD::CondCode that should be used to test the result of each of the
1780 /// comparison libcall against zero.
1781 ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL];
1783 /// Stores the CallingConv that should be used for each libcall.
1784 CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL];
1787 /// \brief Specify maximum number of store instructions per memset call.
1789 /// When lowering \@llvm.memset this field specifies the maximum number of
1790 /// store operations that may be substituted for the call to memset. Targets
1791 /// must set this value based on the cost threshold for that target. Targets
1792 /// should assume that the memset will be done using as many of the largest
1793 /// store operations first, followed by smaller ones, if necessary, per
1794 /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine
1795 /// with 16-bit alignment would result in four 2-byte stores and one 1-byte
1796 /// store. This only applies to setting a constant array of a constant size.
1797 unsigned MaxStoresPerMemset;
1799 /// Maximum number of stores operations that may be substituted for the call
1800 /// to memset, used for functions with OptSize attribute.
1801 unsigned MaxStoresPerMemsetOptSize;
1803 /// \brief Specify maximum bytes of store instructions per memcpy call.
1805 /// When lowering \@llvm.memcpy this field specifies the maximum number of
1806 /// store operations that may be substituted for a call to memcpy. Targets
1807 /// must set this value based on the cost threshold for that target. Targets
1808 /// should assume that the memcpy will be done using as many of the largest
1809 /// store operations first, followed by smaller ones, if necessary, per
1810 /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine
1811 /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store
1812 /// and one 1-byte store. This only applies to copying a constant array of
1814 unsigned MaxStoresPerMemcpy;
1816 /// Maximum number of store operations that may be substituted for a call to
1817 /// memcpy, used for functions with OptSize attribute.
1818 unsigned MaxStoresPerMemcpyOptSize;
1820 /// \brief Specify maximum bytes of store instructions per memmove call.
1822 /// When lowering \@llvm.memmove this field specifies the maximum number of
1823 /// store instructions that may be substituted for a call to memmove. Targets
1824 /// must set this value based on the cost threshold for that target. Targets
1825 /// should assume that the memmove will be done using as many of the largest
1826 /// store operations first, followed by smaller ones, if necessary, per
1827 /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine
1828 /// with 8-bit alignment would result in nine 1-byte stores. This only
1829 /// applies to copying a constant array of constant size.
1830 unsigned MaxStoresPerMemmove;
1832 /// Maximum number of store instructions that may be substituted for a call to
1833 /// memmove, used for functions with OpSize attribute.
1834 unsigned MaxStoresPerMemmoveOptSize;
1836 /// Tells the code generator that select is more expensive than a branch if
1837 /// the branch is usually predicted right.
1838 bool PredictableSelectIsExpensive;
1840 /// MaskAndBranchFoldingIsLegal - Indicates if the target supports folding
1841 /// a mask of a single bit, a compare, and a branch into a single instruction.
1842 bool MaskAndBranchFoldingIsLegal;
1845 /// Return true if the value types that can be represented by the specified
1846 /// register class are all legal.
1847 bool isLegalRC(const TargetRegisterClass *RC) const;
1849 /// Replace/modify any TargetFrameIndex operands with a targte-dependent
1850 /// sequence of memory operands that is recognized by PrologEpilogInserter.
1851 MachineBasicBlock *emitPatchPoint(MachineInstr *MI, MachineBasicBlock *MBB) const;
1854 /// This class defines information used to lower LLVM code to legal SelectionDAG
1855 /// operators that the target instruction selector can accept natively.
1857 /// This class also defines callbacks that targets must implement to lower
1858 /// target-specific constructs to SelectionDAG operators.
1859 class TargetLowering : public TargetLoweringBase {
1860 TargetLowering(const TargetLowering&) LLVM_DELETED_FUNCTION;
1861 void operator=(const TargetLowering&) LLVM_DELETED_FUNCTION;
1864 /// NOTE: The constructor takes ownership of TLOF.
1865 explicit TargetLowering(const TargetMachine &TM,
1866 const TargetLoweringObjectFile *TLOF);
1868 /// Returns true by value, base pointer and offset pointer and addressing mode
1869 /// by reference if the node's address can be legally represented as
1870 /// pre-indexed load / store address.
1871 virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/,
1872 SDValue &/*Offset*/,
1873 ISD::MemIndexedMode &/*AM*/,
1874 SelectionDAG &/*DAG*/) const {
1878 /// Returns true by value, base pointer and offset pointer and addressing mode
1879 /// by reference if this node can be combined with a load / store to form a
1880 /// post-indexed load / store.
1881 virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/,
1883 SDValue &/*Offset*/,
1884 ISD::MemIndexedMode &/*AM*/,
1885 SelectionDAG &/*DAG*/) const {
1889 /// Return the entry encoding for a jump table in the current function. The
1890 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
1891 virtual unsigned getJumpTableEncoding() const;
1893 virtual const MCExpr *
1894 LowerCustomJumpTableEntry(const MachineJumpTableInfo * /*MJTI*/,
1895 const MachineBasicBlock * /*MBB*/, unsigned /*uid*/,
1896 MCContext &/*Ctx*/) const {
1897 llvm_unreachable("Need to implement this hook if target has custom JTIs");
1900 /// Returns relocation base for the given PIC jumptable.
1901 virtual SDValue getPICJumpTableRelocBase(SDValue Table,
1902 SelectionDAG &DAG) const;
1904 /// This returns the relocation base for the given PIC jumptable, the same as
1905 /// getPICJumpTableRelocBase, but as an MCExpr.
1906 virtual const MCExpr *
1907 getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
1908 unsigned JTI, MCContext &Ctx) const;
1910 /// Return true if folding a constant offset with the given GlobalAddress is
1911 /// legal. It is frequently not legal in PIC relocation models.
1912 virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
1914 bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
1915 SDValue &Chain) const;
1917 void softenSetCCOperands(SelectionDAG &DAG, EVT VT,
1918 SDValue &NewLHS, SDValue &NewRHS,
1919 ISD::CondCode &CCCode, SDLoc DL) const;
1921 /// Returns a pair of (return value, chain).
1922 std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC,
1923 EVT RetVT, const SDValue *Ops,
1924 unsigned NumOps, bool isSigned,
1925 SDLoc dl, bool doesNotReturn = false,
1926 bool isReturnValueUsed = true) const;
1928 //===--------------------------------------------------------------------===//
1929 // TargetLowering Optimization Methods
1932 /// A convenience struct that encapsulates a DAG, and two SDValues for
1933 /// returning information from TargetLowering to its clients that want to
1935 struct TargetLoweringOpt {
1942 explicit TargetLoweringOpt(SelectionDAG &InDAG,
1944 DAG(InDAG), LegalTys(LT), LegalOps(LO) {}
1946 bool LegalTypes() const { return LegalTys; }
1947 bool LegalOperations() const { return LegalOps; }
1949 bool CombineTo(SDValue O, SDValue N) {
1955 /// Check to see if the specified operand of the specified instruction is a
1956 /// constant integer. If so, check to see if there are any bits set in the
1957 /// constant that are not demanded. If so, shrink the constant and return
1959 bool ShrinkDemandedConstant(SDValue Op, const APInt &Demanded);
1961 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free. This
1962 /// uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
1963 /// generalized for targets with other types of implicit widening casts.
1964 bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded,
1968 /// Look at Op. At this point, we know that only the DemandedMask bits of the
1969 /// result of Op are ever used downstream. If we can use this information to
1970 /// simplify Op, create a new simplified DAG node and return true, returning
1971 /// the original and new nodes in Old and New. Otherwise, analyze the
1972 /// expression and return a mask of KnownOne and KnownZero bits for the
1973 /// expression (used to simplify the caller). The KnownZero/One bits may only
1974 /// be accurate for those bits in the DemandedMask.
1975 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
1976 APInt &KnownZero, APInt &KnownOne,
1977 TargetLoweringOpt &TLO, unsigned Depth = 0) const;
1979 /// Determine which of the bits specified in Mask are known to be either zero
1980 /// or one and return them in the KnownZero/KnownOne bitsets.
1981 virtual void computeKnownBitsForTargetNode(const SDValue Op,
1984 const SelectionDAG &DAG,
1985 unsigned Depth = 0) const;
1987 /// This method can be implemented by targets that want to expose additional
1988 /// information about sign bits to the DAG Combiner.
1989 virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
1990 const SelectionDAG &DAG,
1991 unsigned Depth = 0) const;
1993 struct DAGCombinerInfo {
1994 void *DC; // The DAG Combiner object.
1996 bool CalledByLegalizer;
2000 DAGCombinerInfo(SelectionDAG &dag, CombineLevel level, bool cl, void *dc)
2001 : DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {}
2003 bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; }
2004 bool isBeforeLegalizeOps() const { return Level < AfterLegalizeVectorOps; }
2005 bool isAfterLegalizeVectorOps() const {
2006 return Level == AfterLegalizeDAG;
2008 CombineLevel getDAGCombineLevel() { return Level; }
2009 bool isCalledByLegalizer() const { return CalledByLegalizer; }
2011 void AddToWorklist(SDNode *N);
2012 void RemoveFromWorklist(SDNode *N);
2013 SDValue CombineTo(SDNode *N, const std::vector<SDValue> &To,
2015 SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true);
2016 SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true);
2018 void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO);
2021 /// Return if the N is a constant or constant vector equal to the true value
2022 /// from getBooleanContents().
2023 bool isConstTrueVal(const SDNode *N) const;
2025 /// Return if the N is a constant or constant vector equal to the false value
2026 /// from getBooleanContents().
2027 bool isConstFalseVal(const SDNode *N) const;
2029 /// Try to simplify a setcc built with the specified operands and cc. If it is
2030 /// unable to simplify it, return a null SDValue.
2031 SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
2032 ISD::CondCode Cond, bool foldBooleans,
2033 DAGCombinerInfo &DCI, SDLoc dl) const;
2035 /// Returns true (and the GlobalValue and the offset) if the node is a
2036 /// GlobalAddress + offset.
2038 isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
2040 /// This method will be invoked for all target nodes and for any
2041 /// target-independent nodes that the target has registered with invoke it
2044 /// The semantics are as follows:
2046 /// SDValue.Val == 0 - No change was made
2047 /// SDValue.Val == N - N was replaced, is dead, and is already handled.
2048 /// otherwise - N should be replaced by the returned Operand.
2050 /// In addition, methods provided by DAGCombinerInfo may be used to perform
2051 /// more complex transformations.
2053 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
2055 /// Return true if it is profitable to move a following shift through this
2056 // node, adjusting any immediate operands as necessary to preserve semantics.
2057 // This transformation may not be desirable if it disrupts a particularly
2058 // auspicious target-specific tree (e.g. bitfield extraction in AArch64).
2059 // By default, it returns true.
2060 virtual bool isDesirableToCommuteWithShift(const SDNode *N /*Op*/) const {
2064 /// Return true if the target has native support for the specified value type
2065 /// and it is 'desirable' to use the type for the given node type. e.g. On x86
2066 /// i16 is legal, but undesirable since i16 instruction encodings are longer
2067 /// and some i16 instructions are slow.
2068 virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const {
2069 // By default, assume all legal types are desirable.
2070 return isTypeLegal(VT);
2073 /// Return true if it is profitable for dag combiner to transform a floating
2074 /// point op of specified opcode to a equivalent op of an integer
2075 /// type. e.g. f32 load -> i32 load can be profitable on ARM.
2076 virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/,
2081 /// This method query the target whether it is beneficial for dag combiner to
2082 /// promote the specified node. If true, it should return the desired
2083 /// promotion type by reference.
2084 virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const {
2088 //===--------------------------------------------------------------------===//
2089 // Lowering methods - These methods must be implemented by targets so that
2090 // the SelectionDAGBuilder code knows how to lower these.
2093 /// This hook must be implemented to lower the incoming (formal) arguments,
2094 /// described by the Ins array, into the specified DAG. The implementation
2095 /// should fill in the InVals array with legal-type argument values, and
2096 /// return the resulting token chain value.
2099 LowerFormalArguments(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
2101 const SmallVectorImpl<ISD::InputArg> &/*Ins*/,
2102 SDLoc /*dl*/, SelectionDAG &/*DAG*/,
2103 SmallVectorImpl<SDValue> &/*InVals*/) const {
2104 llvm_unreachable("Not Implemented");
2107 struct ArgListEntry {
2116 bool isInAlloca : 1;
2117 bool isReturned : 1;
2120 ArgListEntry() : isSExt(false), isZExt(false), isInReg(false),
2121 isSRet(false), isNest(false), isByVal(false), isInAlloca(false),
2122 isReturned(false), Alignment(0) { }
2124 void setAttributes(ImmutableCallSite *CS, unsigned AttrIdx);
2126 typedef std::vector<ArgListEntry> ArgListTy;
2128 /// This structure contains all information that is necessary for lowering
2129 /// calls. It is passed to TLI::LowerCallTo when the SelectionDAG builder
2130 /// needs to lower a call, and targets will see this struct in their LowerCall
2132 struct CallLoweringInfo {
2139 bool DoesNotReturn : 1;
2140 bool IsReturnValueUsed : 1;
2142 // IsTailCall should be modified by implementations of
2143 // TargetLowering::LowerCall that perform tail call conversions.
2146 unsigned NumFixedArgs;
2147 CallingConv::ID CallConv;
2152 ImmutableCallSite *CS;
2153 SmallVector<ISD::OutputArg, 32> Outs;
2154 SmallVector<SDValue, 32> OutVals;
2155 SmallVector<ISD::InputArg, 32> Ins;
2157 CallLoweringInfo(SelectionDAG &DAG)
2158 : RetTy(nullptr), RetSExt(false), RetZExt(false), IsVarArg(false),
2159 IsInReg(false), DoesNotReturn(false), IsReturnValueUsed(true),
2160 IsTailCall(false), NumFixedArgs(-1), CallConv(CallingConv::C),
2161 DAG(DAG), CS(nullptr) {}
2163 CallLoweringInfo &setDebugLoc(SDLoc dl) {
2168 CallLoweringInfo &setChain(SDValue InChain) {
2173 CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultType,
2174 SDValue Target, ArgListTy &&ArgsList,
2175 unsigned FixedArgs = -1) {
2180 (FixedArgs == static_cast<unsigned>(-1) ? Args.size() : FixedArgs);
2181 Args = std::move(ArgsList);
2185 CallLoweringInfo &setCallee(Type *ResultType, FunctionType *FTy,
2186 SDValue Target, ArgListTy &&ArgsList,
2187 ImmutableCallSite &Call) {
2190 IsInReg = Call.paramHasAttr(0, Attribute::InReg);
2191 DoesNotReturn = Call.doesNotReturn();
2192 IsVarArg = FTy->isVarArg();
2193 IsReturnValueUsed = !Call.getInstruction()->use_empty();
2194 RetSExt = Call.paramHasAttr(0, Attribute::SExt);
2195 RetZExt = Call.paramHasAttr(0, Attribute::ZExt);
2199 CallConv = Call.getCallingConv();
2200 NumFixedArgs = FTy->getNumParams();
2201 Args = std::move(ArgsList);
2208 CallLoweringInfo &setInRegister(bool Value = true) {
2213 CallLoweringInfo &setNoReturn(bool Value = true) {
2214 DoesNotReturn = Value;
2218 CallLoweringInfo &setVarArg(bool Value = true) {
2223 CallLoweringInfo &setTailCall(bool Value = true) {
2228 CallLoweringInfo &setDiscardResult(bool Value = true) {
2229 IsReturnValueUsed = !Value;
2233 CallLoweringInfo &setSExtResult(bool Value = true) {
2238 CallLoweringInfo &setZExtResult(bool Value = true) {
2243 ArgListTy &getArgs() {
2248 /// This function lowers an abstract call to a function into an actual call.
2249 /// This returns a pair of operands. The first element is the return value
2250 /// for the function (if RetTy is not VoidTy). The second element is the
2251 /// outgoing token chain. It calls LowerCall to do the actual lowering.
2252 std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const;
2254 /// This hook must be implemented to lower calls into the the specified
2255 /// DAG. The outgoing arguments to the call are described by the Outs array,
2256 /// and the values to be returned by the call are described by the Ins
2257 /// array. The implementation should fill in the InVals array with legal-type
2258 /// return values from the call, and return the resulting token chain value.
2260 LowerCall(CallLoweringInfo &/*CLI*/,
2261 SmallVectorImpl<SDValue> &/*InVals*/) const {
2262 llvm_unreachable("Not Implemented");
2265 /// Target-specific cleanup for formal ByVal parameters.
2266 virtual void HandleByVal(CCState *, unsigned &, unsigned) const {}
2268 /// This hook should be implemented to check whether the return values
2269 /// described by the Outs array can fit into the return registers. If false
2270 /// is returned, an sret-demotion is performed.
2271 virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/,
2272 MachineFunction &/*MF*/, bool /*isVarArg*/,
2273 const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
2274 LLVMContext &/*Context*/) const
2276 // Return true by default to get preexisting behavior.
2280 /// This hook must be implemented to lower outgoing return values, described
2281 /// by the Outs array, into the specified DAG. The implementation should
2282 /// return the resulting token chain value.
2284 LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
2286 const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
2287 const SmallVectorImpl<SDValue> &/*OutVals*/,
2288 SDLoc /*dl*/, SelectionDAG &/*DAG*/) const {
2289 llvm_unreachable("Not Implemented");
2292 /// Return true if result of the specified node is used by a return node
2293 /// only. It also compute and return the input chain for the tail call.
2295 /// This is used to determine whether it is possible to codegen a libcall as
2296 /// tail call at legalization time.
2297 virtual bool isUsedByReturnOnly(SDNode *, SDValue &/*Chain*/) const {
2301 /// Return true if the target may be able emit the call instruction as a tail
2302 /// call. This is used by optimization passes to determine if it's profitable
2303 /// to duplicate return instructions to enable tailcall optimization.
2304 virtual bool mayBeEmittedAsTailCall(CallInst *) const {
2308 /// Return the builtin name for the __builtin___clear_cache intrinsic
2309 /// Default is to invoke the clear cache library call
2310 virtual const char * getClearCacheBuiltinName() const {
2311 return "__clear_cache";
2314 /// Return the register ID of the name passed in. Used by named register
2315 /// global variables extension. There is no target-independent behaviour
2316 /// so the default action is to bail.
2317 virtual unsigned getRegisterByName(const char* RegName, EVT VT) const {
2318 report_fatal_error("Named registers not implemented for this target");
2321 /// Return the type that should be used to zero or sign extend a
2322 /// zeroext/signext integer argument or return value. FIXME: Most C calling
2323 /// convention requires the return type to be promoted, but this is not true
2324 /// all the time, e.g. i1 on x86-64. It is also not necessary for non-C
2325 /// calling conventions. The frontend should handle this and include all of
2326 /// the necessary information.
2327 virtual MVT getTypeForExtArgOrReturn(MVT VT,
2328 ISD::NodeType /*ExtendKind*/) const {
2329 MVT MinVT = getRegisterType(MVT::i32);
2330 return VT.bitsLT(MinVT) ? MinVT : VT;
2333 /// For some targets, an LLVM struct type must be broken down into multiple
2334 /// simple types, but the calling convention specifies that the entire struct
2335 /// must be passed in a block of consecutive registers.
2337 functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv,
2338 bool isVarArg) const {
2342 /// Returns a 0 terminated array of registers that can be safely used as
2343 /// scratch registers.
2344 virtual const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const {
2348 /// This callback is used to prepare for a volatile or atomic load.
2349 /// It takes a chain node as input and returns the chain for the load itself.
2351 /// Having a callback like this is necessary for targets like SystemZ,
2352 /// which allows a CPU to reuse the result of a previous load indefinitely,
2353 /// even if a cache-coherent store is performed by another CPU. The default
2354 /// implementation does nothing.
2355 virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, SDLoc DL,
2356 SelectionDAG &DAG) const {
2360 /// This callback is invoked by the type legalizer to legalize nodes with an
2361 /// illegal operand type but legal result types. It replaces the
2362 /// LowerOperation callback in the type Legalizer. The reason we can not do
2363 /// away with LowerOperation entirely is that LegalizeDAG isn't yet ready to
2364 /// use this callback.
2366 /// TODO: Consider merging with ReplaceNodeResults.
2368 /// The target places new result values for the node in Results (their number
2369 /// and types must exactly match those of the original return values of
2370 /// the node), or leaves Results empty, which indicates that the node is not
2371 /// to be custom lowered after all.
2372 /// The default implementation calls LowerOperation.
2373 virtual void LowerOperationWrapper(SDNode *N,
2374 SmallVectorImpl<SDValue> &Results,
2375 SelectionDAG &DAG) const;
2377 /// This callback is invoked for operations that are unsupported by the
2378 /// target, which are registered to use 'custom' lowering, and whose defined
2379 /// values are all legal. If the target has no operations that require custom
2380 /// lowering, it need not implement this. The default implementation of this
2382 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
2384 /// This callback is invoked when a node result type is illegal for the
2385 /// target, and the operation was registered to use 'custom' lowering for that
2386 /// result type. The target places new result values for the node in Results
2387 /// (their number and types must exactly match those of the original return
2388 /// values of the node), or leaves Results empty, which indicates that the
2389 /// node is not to be custom lowered after all.
2391 /// If the target has no operations that require custom lowering, it need not
2392 /// implement this. The default implementation aborts.
2393 virtual void ReplaceNodeResults(SDNode * /*N*/,
2394 SmallVectorImpl<SDValue> &/*Results*/,
2395 SelectionDAG &/*DAG*/) const {
2396 llvm_unreachable("ReplaceNodeResults not implemented for this target!");
2399 /// This method returns the name of a target specific DAG node.
2400 virtual const char *getTargetNodeName(unsigned Opcode) const;
2402 /// This method returns a target specific FastISel object, or null if the
2403 /// target does not support "fast" ISel.
2404 virtual FastISel *createFastISel(FunctionLoweringInfo &,
2405 const TargetLibraryInfo *) const {
2410 bool verifyReturnAddressArgumentIsConstant(SDValue Op,
2411 SelectionDAG &DAG) const;
2413 //===--------------------------------------------------------------------===//
2414 // Inline Asm Support hooks
2417 /// This hook allows the target to expand an inline asm call to be explicit
2418 /// llvm code if it wants to. This is useful for turning simple inline asms
2419 /// into LLVM intrinsics, which gives the compiler more information about the
2420 /// behavior of the code.
2421 virtual bool ExpandInlineAsm(CallInst *) const {
2425 enum ConstraintType {
2426 C_Register, // Constraint represents specific register(s).
2427 C_RegisterClass, // Constraint represents any of register(s) in class.
2428 C_Memory, // Memory constraint.
2429 C_Other, // Something else.
2430 C_Unknown // Unsupported constraint.
2433 enum ConstraintWeight {
2435 CW_Invalid = -1, // No match.
2436 CW_Okay = 0, // Acceptable.
2437 CW_Good = 1, // Good weight.
2438 CW_Better = 2, // Better weight.
2439 CW_Best = 3, // Best weight.
2441 // Well-known weights.
2442 CW_SpecificReg = CW_Okay, // Specific register operands.
2443 CW_Register = CW_Good, // Register operands.
2444 CW_Memory = CW_Better, // Memory operands.
2445 CW_Constant = CW_Best, // Constant operand.
2446 CW_Default = CW_Okay // Default or don't know type.
2449 /// This contains information for each constraint that we are lowering.
2450 struct AsmOperandInfo : public InlineAsm::ConstraintInfo {
2451 /// This contains the actual string for the code, like "m". TargetLowering
2452 /// picks the 'best' code from ConstraintInfo::Codes that most closely
2453 /// matches the operand.
2454 std::string ConstraintCode;
2456 /// Information about the constraint code, e.g. Register, RegisterClass,
2457 /// Memory, Other, Unknown.
2458 TargetLowering::ConstraintType ConstraintType;
2460 /// If this is the result output operand or a clobber, this is null,
2461 /// otherwise it is the incoming operand to the CallInst. This gets
2462 /// modified as the asm is processed.
2463 Value *CallOperandVal;
2465 /// The ValueType for the operand value.
2468 /// Return true of this is an input operand that is a matching constraint
2470 bool isMatchingInputConstraint() const;
2472 /// If this is an input matching constraint, this method returns the output
2473 /// operand it matches.
2474 unsigned getMatchedOperand() const;
2476 /// Copy constructor for copying from a ConstraintInfo.
2477 AsmOperandInfo(const InlineAsm::ConstraintInfo &info)
2478 : InlineAsm::ConstraintInfo(info),
2479 ConstraintType(TargetLowering::C_Unknown),
2480 CallOperandVal(nullptr), ConstraintVT(MVT::Other) {
2484 typedef std::vector<AsmOperandInfo> AsmOperandInfoVector;
2486 /// Split up the constraint string from the inline assembly value into the
2487 /// specific constraints and their prefixes, and also tie in the associated
2488 /// operand values. If this returns an empty vector, and if the constraint
2489 /// string itself isn't empty, there was an error parsing.
2490 virtual AsmOperandInfoVector ParseConstraints(ImmutableCallSite CS) const;
2492 /// Examine constraint type and operand type and determine a weight value.
2493 /// The operand object must already have been set up with the operand type.
2494 virtual ConstraintWeight getMultipleConstraintMatchWeight(
2495 AsmOperandInfo &info, int maIndex) const;
2497 /// Examine constraint string and operand type and determine a weight value.
2498 /// The operand object must already have been set up with the operand type.
2499 virtual ConstraintWeight getSingleConstraintMatchWeight(
2500 AsmOperandInfo &info, const char *constraint) const;
2502 /// Determines the constraint code and constraint type to use for the specific
2503 /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
2504 /// If the actual operand being passed in is available, it can be passed in as
2505 /// Op, otherwise an empty SDValue can be passed.
2506 virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
2508 SelectionDAG *DAG = nullptr) const;
2510 /// Given a constraint, return the type of constraint it is for this target.
2511 virtual ConstraintType getConstraintType(const std::string &Constraint) const;
2513 /// Given a physical register constraint (e.g. {edx}), return the register
2514 /// number and the register class for the register.
2516 /// Given a register class constraint, like 'r', if this corresponds directly
2517 /// to an LLVM register class, return a register of 0 and the register class
2520 /// This should only be used for C_Register constraints. On error, this
2521 /// returns a register number of 0 and a null register class pointer..
2522 virtual std::pair<unsigned, const TargetRegisterClass*>
2523 getRegForInlineAsmConstraint(const std::string &Constraint,
2526 /// Try to replace an X constraint, which matches anything, with another that
2527 /// has more specific requirements based on the type of the corresponding
2528 /// operand. This returns null if there is no replacement to make.
2529 virtual const char *LowerXConstraint(EVT ConstraintVT) const;
2531 /// Lower the specified operand into the Ops vector. If it is invalid, don't
2532 /// add anything to Ops.
2533 virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
2534 std::vector<SDValue> &Ops,
2535 SelectionDAG &DAG) const;
2537 //===--------------------------------------------------------------------===//
2538 // Div utility functions
2540 SDValue BuildExactSDIV(SDValue Op1, SDValue Op2, SDLoc dl,
2541 SelectionDAG &DAG) const;
2542 SDValue BuildSDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
2543 bool IsAfterLegalization,
2544 std::vector<SDNode *> *Created) const;
2545 SDValue BuildUDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
2546 bool IsAfterLegalization,
2547 std::vector<SDNode *> *Created) const;
2549 //===--------------------------------------------------------------------===//
2550 // Legalization utility functions
2553 /// Expand a MUL into two nodes. One that computes the high bits of
2554 /// the result and one that computes the low bits.
2555 /// \param HiLoVT The value type to use for the Lo and Hi nodes.
2556 /// \param LL Low bits of the LHS of the MUL. You can use this parameter
2557 /// if you want to control how low bits are extracted from the LHS.
2558 /// \param LH High bits of the LHS of the MUL. See LL for meaning.
2559 /// \param RL Low bits of the RHS of the MUL. See LL for meaning
2560 /// \param RH High bits of the RHS of the MUL. See LL for meaning.
2561 /// \returns true if the node has been expanded. false if it has not
2562 bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
2563 SelectionDAG &DAG, SDValue LL = SDValue(),
2564 SDValue LH = SDValue(), SDValue RL = SDValue(),
2565 SDValue RH = SDValue()) const;
2567 //===--------------------------------------------------------------------===//
2568 // Instruction Emitting Hooks
2571 /// This method should be implemented by targets that mark instructions with
2572 /// the 'usesCustomInserter' flag. These instructions are special in various
2573 /// ways, which require special support to insert. The specified MachineInstr
2574 /// is created but not inserted into any basic blocks, and this method is
2575 /// called to expand it into a sequence of instructions, potentially also
2576 /// creating new basic blocks and control flow.
2577 virtual MachineBasicBlock *
2578 EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const;
2580 /// This method should be implemented by targets that mark instructions with
2581 /// the 'hasPostISelHook' flag. These instructions must be adjusted after
2582 /// instruction selection by target hooks. e.g. To fill in optional defs for
2583 /// ARM 's' setting instructions.
2585 AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const;
2588 /// Given an LLVM IR type and return type attributes, compute the return value
2589 /// EVTs and flags, and optionally also the offsets, if the return value is
2590 /// being lowered to memory.
2591 void GetReturnInfo(Type* ReturnType, AttributeSet attr,
2592 SmallVectorImpl<ISD::OutputArg> &Outs,
2593 const TargetLowering &TLI);
2595 } // end llvm namespace