1 //===-- llvm/Target/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes how to lower LLVM code to machine code. This has two
13 // 1. Which ValueTypes are natively supported by the target.
14 // 2. Which operations are supported for supported ValueTypes.
15 // 3. Cost thresholds for alternative implementations of certain operations.
17 // In addition it has a few other components, like information about FP
20 //===----------------------------------------------------------------------===//
22 #ifndef LLVM_TARGET_TARGETLOWERING_H
23 #define LLVM_TARGET_TARGETLOWERING_H
25 #include "llvm/ADT/DenseMap.h"
26 #include "llvm/AddressingMode.h"
27 #include "llvm/Attributes.h"
28 #include "llvm/CallingConv.h"
29 #include "llvm/CodeGen/RuntimeLibcalls.h"
30 #include "llvm/CodeGen/SelectionDAGNodes.h"
31 #include "llvm/InlineAsm.h"
32 #include "llvm/Support/CallSite.h"
33 #include "llvm/Support/DebugLoc.h"
34 #include "llvm/Target/TargetCallingConv.h"
35 #include "llvm/Target/TargetMachine.h"
44 class FunctionLoweringInfo;
45 class ImmutableCallSite;
47 class MachineBasicBlock;
48 class MachineFunction;
50 class MachineJumpTableInfo;
53 template<typename T> class SmallVectorImpl;
55 class TargetRegisterClass;
56 class TargetLibraryInfo;
57 class TargetLoweringObjectFile;
62 None, // No preference
63 Source, // Follow source order.
64 RegPressure, // Scheduling for lowest register pressure.
65 Hybrid, // Scheduling for both latency and register pressure.
66 ILP, // Scheduling for ILP in low register pressure mode.
67 VLIW // Scheduling for VLIW targets.
72 //===----------------------------------------------------------------------===//
73 /// TargetLowering - This class defines information used to lower LLVM code to
74 /// legal SelectionDAG operators that the target instruction selector can accept
77 /// This class also defines callbacks that targets must implement to lower
78 /// target-specific constructs to SelectionDAG operators.
80 class TargetLowering {
81 TargetLowering(const TargetLowering&) LLVM_DELETED_FUNCTION;
82 void operator=(const TargetLowering&) LLVM_DELETED_FUNCTION;
84 /// LegalizeAction - This enum indicates whether operations are valid for a
85 /// target, and if not, what action should be used to make them valid.
87 Legal, // The target natively supports this operation.
88 Promote, // This operation should be executed in a larger type.
89 Expand, // Try to expand this to other ops, otherwise use a libcall.
90 Custom // Use the LowerOperation hook to implement custom lowering.
93 /// LegalizeTypeAction - This enum indicates whether a types are legal for a
94 /// target, and if not, what action should be used to make them valid.
95 enum LegalizeTypeAction {
96 TypeLegal, // The target natively supports this type.
97 TypePromoteInteger, // Replace this integer with a larger one.
98 TypeExpandInteger, // Split this integer into two of half the size.
99 TypeSoftenFloat, // Convert this float to a same size integer type.
100 TypeExpandFloat, // Split this float into two of half the size.
101 TypeScalarizeVector, // Replace this one-element vector with its element.
102 TypeSplitVector, // Split this vector into two of half the size.
103 TypeWidenVector // This vector should be widened into a larger vector.
106 /// LegalizeKind holds the legalization kind that needs to happen to EVT
107 /// in order to type-legalize it.
108 typedef std::pair<LegalizeTypeAction, EVT> LegalizeKind;
110 enum BooleanContent { // How the target represents true/false values.
111 UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage.
112 ZeroOrOneBooleanContent, // All bits zero except for bit 0.
113 ZeroOrNegativeOneBooleanContent // All bits equal to bit 0.
116 enum SelectSupportKind {
117 ScalarValSelect, // The target supports scalar selects (ex: cmov).
118 ScalarCondVectorVal, // The target supports selects with a scalar condition
119 // and vector values (ex: cmov).
120 VectorMaskSelect // The target supports vector selects with a vector
121 // mask (ex: x86 blends).
124 static ISD::NodeType getExtendForContent(BooleanContent Content) {
126 case UndefinedBooleanContent:
127 // Extend by adding rubbish bits.
128 return ISD::ANY_EXTEND;
129 case ZeroOrOneBooleanContent:
130 // Extend by adding zero bits.
131 return ISD::ZERO_EXTEND;
132 case ZeroOrNegativeOneBooleanContent:
133 // Extend by copying the sign bit.
134 return ISD::SIGN_EXTEND;
136 llvm_unreachable("Invalid content kind");
139 /// NOTE: The constructor takes ownership of TLOF.
140 explicit TargetLowering(const TargetMachine &TM,
141 const TargetLoweringObjectFile *TLOF);
142 virtual ~TargetLowering();
144 const TargetMachine &getTargetMachine() const { return TM; }
145 const DataLayout *getDataLayout() const { return TD; }
146 const TargetLoweringObjectFile &getObjFileLowering() const { return TLOF; }
148 bool isBigEndian() const { return !IsLittleEndian; }
149 bool isLittleEndian() const { return IsLittleEndian; }
150 // Return the pointer type for the given address space, defaults to
151 // the pointer type from the data layout.
152 // FIXME: The default needs to be removed once all the code is updated.
153 virtual MVT getPointerTy(uint32_t AS = 0) const { return PointerTy; }
154 virtual MVT getShiftAmountTy(EVT LHSTy) const;
156 /// isSelectExpensive - Return true if the select operation is expensive for
158 bool isSelectExpensive() const { return SelectIsExpensive; }
160 virtual bool isSelectSupported(SelectSupportKind kind) const { return true; }
162 /// shouldSplitVectorElementType - Return true if a vector of the given type
163 /// should be split (TypeSplitVector) instead of promoted
164 /// (TypePromoteInteger) during type legalization.
165 virtual bool shouldSplitVectorElementType(EVT VT) const { return false; }
167 /// isIntDivCheap() - Return true if integer divide is usually cheaper than
168 /// a sequence of several shifts, adds, and multiplies for this target.
169 bool isIntDivCheap() const { return IntDivIsCheap; }
171 /// isSlowDivBypassed - Returns true if target has indicated at least one
172 /// type should be bypassed.
173 bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); }
175 /// getBypassSlowDivTypes - Returns map of slow types for division or
176 /// remainder with corresponding fast types
177 const DenseMap<unsigned int, unsigned int> &getBypassSlowDivWidths() const {
178 return BypassSlowDivWidths;
181 /// isPow2DivCheap() - Return true if pow2 div is cheaper than a chain of
183 bool isPow2DivCheap() const { return Pow2DivIsCheap; }
185 /// isJumpExpensive() - Return true if Flow Control is an expensive operation
186 /// that should be avoided.
187 bool isJumpExpensive() const { return JumpIsExpensive; }
189 /// isPredictableSelectExpensive - Return true if selects are only cheaper
190 /// than branches if the branch is unlikely to be predicted right.
191 bool isPredictableSelectExpensive() const {
192 return predictableSelectIsExpensive;
195 /// getSetCCResultType - Return the ValueType of the result of SETCC
196 /// operations. Also used to obtain the target's preferred type for
197 /// the condition operand of SELECT and BRCOND nodes. In the case of
198 /// BRCOND the argument passed is MVT::Other since there are no other
199 /// operands to get a type hint from.
200 virtual EVT getSetCCResultType(EVT VT) const;
202 /// getCmpLibcallReturnType - Return the ValueType for comparison
203 /// libcalls. Comparions libcalls include floating point comparion calls,
204 /// and Ordered/Unordered check calls on floating point numbers.
206 MVT::SimpleValueType getCmpLibcallReturnType() const;
208 /// getBooleanContents - For targets without i1 registers, this gives the
209 /// nature of the high-bits of boolean values held in types wider than i1.
210 /// "Boolean values" are special true/false values produced by nodes like
211 /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND.
212 /// Not to be confused with general values promoted from i1.
213 /// Some cpus distinguish between vectors of boolean and scalars; the isVec
214 /// parameter selects between the two kinds. For example on X86 a scalar
215 /// boolean should be zero extended from i1, while the elements of a vector
216 /// of booleans should be sign extended from i1.
217 BooleanContent getBooleanContents(bool isVec) const {
218 return isVec ? BooleanVectorContents : BooleanContents;
221 /// getSchedulingPreference - Return target scheduling preference.
222 Sched::Preference getSchedulingPreference() const {
223 return SchedPreferenceInfo;
226 /// getSchedulingPreference - Some scheduler, e.g. hybrid, can switch to
227 /// different scheduling heuristics for different nodes. This function returns
228 /// the preference (or none) for the given node.
229 virtual Sched::Preference getSchedulingPreference(SDNode *) const {
233 /// getRegClassFor - Return the register class that should be used for the
234 /// specified value type.
235 virtual const TargetRegisterClass *getRegClassFor(EVT VT) const {
236 assert(VT.isSimple() && "getRegClassFor called on illegal type!");
237 const TargetRegisterClass *RC = RegClassForVT[VT.getSimpleVT().SimpleTy];
238 assert(RC && "This value type is not natively supported!");
242 /// getRepRegClassFor - Return the 'representative' register class for the
243 /// specified value type. The 'representative' register class is the largest
244 /// legal super-reg register class for the register class of the value type.
245 /// For example, on i386 the rep register class for i8, i16, and i32 are GR32;
246 /// while the rep register class is GR64 on x86_64.
247 virtual const TargetRegisterClass *getRepRegClassFor(EVT VT) const {
248 assert(VT.isSimple() && "getRepRegClassFor called on illegal type!");
249 const TargetRegisterClass *RC = RepRegClassForVT[VT.getSimpleVT().SimpleTy];
253 /// getRepRegClassCostFor - Return the cost of the 'representative' register
254 /// class for the specified value type.
255 virtual uint8_t getRepRegClassCostFor(EVT VT) const {
256 assert(VT.isSimple() && "getRepRegClassCostFor called on illegal type!");
257 return RepRegClassCostForVT[VT.getSimpleVT().SimpleTy];
260 /// isTypeLegal - Return true if the target has native support for the
261 /// specified value type. This means that it has a register that directly
262 /// holds it without promotions or expansions.
263 bool isTypeLegal(EVT VT) const {
264 assert(!VT.isSimple() ||
265 (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT));
266 return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != 0;
269 class ValueTypeActionImpl {
270 /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum
271 /// that indicates how instruction selection should deal with the type.
272 uint8_t ValueTypeActions[MVT::LAST_VALUETYPE];
275 ValueTypeActionImpl() {
276 std::fill(ValueTypeActions, array_endof(ValueTypeActions), 0);
279 LegalizeTypeAction getTypeAction(MVT VT) const {
280 return (LegalizeTypeAction)ValueTypeActions[VT.SimpleTy];
283 void setTypeAction(EVT VT, LegalizeTypeAction Action) {
284 unsigned I = VT.getSimpleVT().SimpleTy;
285 ValueTypeActions[I] = Action;
289 const ValueTypeActionImpl &getValueTypeActions() const {
290 return ValueTypeActions;
293 /// getTypeAction - Return how we should legalize values of this type, either
294 /// it is already legal (return 'Legal') or we need to promote it to a larger
295 /// type (return 'Promote'), or we need to expand it into multiple registers
296 /// of smaller integer type (return 'Expand'). 'Custom' is not an option.
297 LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const {
298 return getTypeConversion(Context, VT).first;
300 LegalizeTypeAction getTypeAction(MVT VT) const {
301 return ValueTypeActions.getTypeAction(VT);
304 /// getTypeToTransformTo - For types supported by the target, this is an
305 /// identity function. For types that must be promoted to larger types, this
306 /// returns the larger type to promote to. For integer types that are larger
307 /// than the largest integer register, this contains one step in the expansion
308 /// to get to the smaller register. For illegal floating point types, this
309 /// returns the integer type to transform to.
310 EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const {
311 return getTypeConversion(Context, VT).second;
314 /// getTypeToExpandTo - For types supported by the target, this is an
315 /// identity function. For types that must be expanded (i.e. integer types
316 /// that are larger than the largest integer register or illegal floating
317 /// point types), this returns the largest legal type it will be expanded to.
318 EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const {
319 assert(!VT.isVector());
321 switch (getTypeAction(Context, VT)) {
324 case TypeExpandInteger:
325 VT = getTypeToTransformTo(Context, VT);
328 llvm_unreachable("Type is not legal nor is it to be expanded!");
333 /// getVectorTypeBreakdown - Vector types are broken down into some number of
334 /// legal first class types. For example, EVT::v8f32 maps to 2 EVT::v4f32
335 /// with Altivec or SSE1, or 8 promoted EVT::f64 values with the X86 FP stack.
336 /// Similarly, EVT::v2i64 turns into 4 EVT::i32 values with both PPC and X86.
338 /// This method returns the number of registers needed, and the VT for each
339 /// register. It also returns the VT and quantity of the intermediate values
340 /// before they are promoted/expanded.
342 unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
344 unsigned &NumIntermediates,
345 EVT &RegisterVT) const;
347 /// getTgtMemIntrinsic: Given an intrinsic, checks if on the target the
348 /// intrinsic will need to map to a MemIntrinsicNode (touches memory). If
349 /// this is the case, it returns true and store the intrinsic
350 /// information into the IntrinsicInfo that was passed to the function.
351 struct IntrinsicInfo {
352 unsigned opc; // target opcode
353 EVT memVT; // memory VT
354 const Value* ptrVal; // value representing memory location
355 int offset; // offset off of ptrVal
356 unsigned align; // alignment
357 bool vol; // is volatile?
358 bool readMem; // reads memory?
359 bool writeMem; // writes memory?
362 virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &,
363 unsigned /*Intrinsic*/) const {
367 /// isFPImmLegal - Returns true if the target can instruction select the
368 /// specified FP immediate natively. If false, the legalizer will materialize
369 /// the FP immediate as a load from a constant pool.
370 virtual bool isFPImmLegal(const APFloat &/*Imm*/, EVT /*VT*/) const {
374 /// isShuffleMaskLegal - Targets can use this to indicate that they only
375 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
376 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
377 /// are assumed to be legal.
378 virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &/*Mask*/,
383 /// canOpTrap - Returns true if the operation can trap for the value type.
384 /// VT must be a legal type. By default, we optimistically assume most
385 /// operations don't trap except for divide and remainder.
386 virtual bool canOpTrap(unsigned Op, EVT VT) const;
388 /// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is
389 /// used by Targets can use this to indicate if there is a suitable
390 /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant
392 virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &/*Mask*/,
397 /// getOperationAction - Return how this operation should be treated: either
398 /// it is legal, needs to be promoted to a larger size, needs to be
399 /// expanded to some other code sequence, or the target has a custom expander
401 LegalizeAction getOperationAction(unsigned Op, EVT VT) const {
402 if (VT.isExtended()) return Expand;
403 // If a target-specific SDNode requires legalization, require the target
404 // to provide custom legalization for it.
405 if (Op > array_lengthof(OpActions[0])) return Custom;
406 unsigned I = (unsigned) VT.getSimpleVT().SimpleTy;
407 return (LegalizeAction)OpActions[I][Op];
410 /// isOperationLegalOrCustom - Return true if the specified operation is
411 /// legal on this target or can be made legal with custom lowering. This
412 /// is used to help guide high-level lowering decisions.
413 bool isOperationLegalOrCustom(unsigned Op, EVT VT) const {
414 return (VT == MVT::Other || isTypeLegal(VT)) &&
415 (getOperationAction(Op, VT) == Legal ||
416 getOperationAction(Op, VT) == Custom);
419 /// isOperationExpand - Return true if the specified operation is illegal on
420 /// this target or unlikely to be made legal with custom lowering. This is
421 /// used to help guide high-level lowering decisions.
422 bool isOperationExpand(unsigned Op, EVT VT) const {
423 return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand);
426 /// isOperationLegal - Return true if the specified operation is legal on this
428 bool isOperationLegal(unsigned Op, EVT VT) const {
429 return (VT == MVT::Other || isTypeLegal(VT)) &&
430 getOperationAction(Op, VT) == Legal;
433 /// getLoadExtAction - Return how this load with extension should be treated:
434 /// either it is legal, needs to be promoted to a larger size, needs to be
435 /// expanded to some other code sequence, or the target has a custom expander
437 LegalizeAction getLoadExtAction(unsigned ExtType, EVT VT) const {
438 assert(ExtType < ISD::LAST_LOADEXT_TYPE &&
439 VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
440 "Table isn't big enough!");
441 return (LegalizeAction)LoadExtActions[VT.getSimpleVT().SimpleTy][ExtType];
444 /// isLoadExtLegal - Return true if the specified load with extension is legal
446 bool isLoadExtLegal(unsigned ExtType, EVT VT) const {
447 return VT.isSimple() && getLoadExtAction(ExtType, VT) == Legal;
450 /// getTruncStoreAction - Return how this store with truncation should be
451 /// treated: either it is legal, needs to be promoted to a larger size, needs
452 /// to be expanded to some other code sequence, or the target has a custom
454 LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const {
455 assert(ValVT.getSimpleVT() < MVT::LAST_VALUETYPE &&
456 MemVT.getSimpleVT() < MVT::LAST_VALUETYPE &&
457 "Table isn't big enough!");
458 return (LegalizeAction)TruncStoreActions[ValVT.getSimpleVT().SimpleTy]
459 [MemVT.getSimpleVT().SimpleTy];
462 /// isTruncStoreLegal - Return true if the specified store with truncation is
463 /// legal on this target.
464 bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const {
465 return isTypeLegal(ValVT) && MemVT.isSimple() &&
466 getTruncStoreAction(ValVT, MemVT) == Legal;
469 /// getIndexedLoadAction - Return how the indexed load should be treated:
470 /// either it is legal, needs to be promoted to a larger size, needs to be
471 /// expanded to some other code sequence, or the target has a custom expander
474 getIndexedLoadAction(unsigned IdxMode, EVT VT) const {
475 assert(IdxMode < ISD::LAST_INDEXED_MODE &&
476 VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
477 "Table isn't big enough!");
478 unsigned Ty = (unsigned)VT.getSimpleVT().SimpleTy;
479 return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] & 0xf0) >> 4);
482 /// isIndexedLoadLegal - Return true if the specified indexed load is legal
484 bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const {
485 return VT.isSimple() &&
486 (getIndexedLoadAction(IdxMode, VT) == Legal ||
487 getIndexedLoadAction(IdxMode, VT) == Custom);
490 /// getIndexedStoreAction - Return how the indexed store should be treated:
491 /// either it is legal, needs to be promoted to a larger size, needs to be
492 /// expanded to some other code sequence, or the target has a custom expander
495 getIndexedStoreAction(unsigned IdxMode, EVT VT) const {
496 assert(IdxMode < ISD::LAST_INDEXED_MODE &&
497 VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
498 "Table isn't big enough!");
499 unsigned Ty = (unsigned)VT.getSimpleVT().SimpleTy;
500 return (LegalizeAction)(IndexedModeActions[Ty][IdxMode] & 0x0f);
503 /// isIndexedStoreLegal - Return true if the specified indexed load is legal
505 bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const {
506 return VT.isSimple() &&
507 (getIndexedStoreAction(IdxMode, VT) == Legal ||
508 getIndexedStoreAction(IdxMode, VT) == Custom);
511 /// getCondCodeAction - Return how the condition code should be treated:
512 /// either it is legal, needs to be expanded to some other code sequence,
513 /// or the target has a custom expander for it.
515 getCondCodeAction(ISD::CondCode CC, EVT VT) const {
516 assert((unsigned)CC < array_lengthof(CondCodeActions) &&
517 (unsigned)VT.getSimpleVT().SimpleTy < sizeof(CondCodeActions[0])*4 &&
518 "Table isn't big enough!");
519 /// The lower 5 bits of the SimpleTy index into Nth 2bit set from the 64bit
520 /// value and the upper 27 bits index into the second dimension of the
521 /// array to select what 64bit value to use.
522 LegalizeAction Action = (LegalizeAction)
523 ((CondCodeActions[CC][VT.getSimpleVT().SimpleTy >> 5]
524 >> (2*(VT.getSimpleVT().SimpleTy & 0x1F))) & 3);
525 assert(Action != Promote && "Can't promote condition code!");
529 /// isCondCodeLegal - Return true if the specified condition code is legal
531 bool isCondCodeLegal(ISD::CondCode CC, EVT VT) const {
532 return getCondCodeAction(CC, VT) == Legal ||
533 getCondCodeAction(CC, VT) == Custom;
537 /// getTypeToPromoteTo - If the action for this operation is to promote, this
538 /// method returns the ValueType to promote to.
539 EVT getTypeToPromoteTo(unsigned Op, EVT VT) const {
540 assert(getOperationAction(Op, VT) == Promote &&
541 "This operation isn't promoted!");
543 // See if this has an explicit type specified.
544 std::map<std::pair<unsigned, MVT::SimpleValueType>,
545 MVT::SimpleValueType>::const_iterator PTTI =
546 PromoteToType.find(std::make_pair(Op, VT.getSimpleVT().SimpleTy));
547 if (PTTI != PromoteToType.end()) return PTTI->second;
549 assert((VT.isInteger() || VT.isFloatingPoint()) &&
550 "Cannot autopromote this type, add it with AddPromotedToType.");
554 NVT = (MVT::SimpleValueType)(NVT.getSimpleVT().SimpleTy+1);
555 assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid &&
556 "Didn't find type to promote to!");
557 } while (!isTypeLegal(NVT) ||
558 getOperationAction(Op, NVT) == Promote);
562 /// getValueType - Return the EVT corresponding to this LLVM type.
563 /// This is fixed by the LLVM operations except for the pointer size. If
564 /// AllowUnknown is true, this will return MVT::Other for types with no EVT
565 /// counterpart (e.g. structs), otherwise it will assert.
566 EVT getValueType(Type *Ty, bool AllowUnknown = false) const {
567 // Lower scalar pointers to native pointer types.
568 if (Ty->isPointerTy()) return PointerTy;
570 if (Ty->isVectorTy()) {
571 VectorType *VTy = cast<VectorType>(Ty);
572 Type *Elm = VTy->getElementType();
573 // Lower vectors of pointers to native pointer types.
574 if (Elm->isPointerTy())
575 Elm = EVT(PointerTy).getTypeForEVT(Ty->getContext());
576 return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(Elm, false),
577 VTy->getNumElements());
579 return EVT::getEVT(Ty, AllowUnknown);
583 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
584 /// function arguments in the caller parameter area. This is the actual
585 /// alignment, not its logarithm.
586 virtual unsigned getByValTypeAlignment(Type *Ty) const;
588 /// getRegisterType - Return the type of registers that this ValueType will
589 /// eventually require.
590 EVT getRegisterType(MVT VT) const {
591 assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT));
592 return RegisterTypeForVT[VT.SimpleTy];
595 /// getRegisterType - Return the type of registers that this ValueType will
596 /// eventually require.
597 EVT getRegisterType(LLVMContext &Context, EVT VT) const {
599 assert((unsigned)VT.getSimpleVT().SimpleTy <
600 array_lengthof(RegisterTypeForVT));
601 return RegisterTypeForVT[VT.getSimpleVT().SimpleTy];
605 unsigned NumIntermediates;
606 (void)getVectorTypeBreakdown(Context, VT, VT1,
607 NumIntermediates, RegisterVT);
610 if (VT.isInteger()) {
611 return getRegisterType(Context, getTypeToTransformTo(Context, VT));
613 llvm_unreachable("Unsupported extended type!");
616 /// getNumRegisters - Return the number of registers that this ValueType will
617 /// eventually require. This is one for any types promoted to live in larger
618 /// registers, but may be more than one for types (like i64) that are split
619 /// into pieces. For types like i140, which are first promoted then expanded,
620 /// it is the number of registers needed to hold all the bits of the original
621 /// type. For an i140 on a 32 bit machine this means 5 registers.
622 unsigned getNumRegisters(LLVMContext &Context, EVT VT) const {
624 assert((unsigned)VT.getSimpleVT().SimpleTy <
625 array_lengthof(NumRegistersForVT));
626 return NumRegistersForVT[VT.getSimpleVT().SimpleTy];
630 unsigned NumIntermediates;
631 return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2);
633 if (VT.isInteger()) {
634 unsigned BitWidth = VT.getSizeInBits();
635 unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits();
636 return (BitWidth + RegWidth - 1) / RegWidth;
638 llvm_unreachable("Unsupported extended type!");
641 /// ShouldShrinkFPConstant - If true, then instruction selection should
642 /// seek to shrink the FP constant of the specified type to a smaller type
643 /// in order to save space and / or reduce runtime.
644 virtual bool ShouldShrinkFPConstant(EVT) const { return true; }
646 /// hasTargetDAGCombine - If true, the target has custom DAG combine
647 /// transformations that it can perform for the specified node.
648 bool hasTargetDAGCombine(ISD::NodeType NT) const {
649 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
650 return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7));
653 /// This function returns the maximum number of store operations permitted
654 /// to replace a call to llvm.memset. The value is set by the target at the
655 /// performance threshold for such a replacement. If OptSize is true,
656 /// return the limit for functions that have OptSize attribute.
657 /// @brief Get maximum # of store operations permitted for llvm.memset
658 unsigned getMaxStoresPerMemset(bool OptSize) const {
659 return OptSize ? maxStoresPerMemsetOptSize : maxStoresPerMemset;
662 /// This function returns the maximum number of store operations permitted
663 /// to replace a call to llvm.memcpy. The value is set by the target at the
664 /// performance threshold for such a replacement. If OptSize is true,
665 /// return the limit for functions that have OptSize attribute.
666 /// @brief Get maximum # of store operations permitted for llvm.memcpy
667 unsigned getMaxStoresPerMemcpy(bool OptSize) const {
668 return OptSize ? maxStoresPerMemcpyOptSize : maxStoresPerMemcpy;
671 /// This function returns the maximum number of store operations permitted
672 /// to replace a call to llvm.memmove. The value is set by the target at the
673 /// performance threshold for such a replacement. If OptSize is true,
674 /// return the limit for functions that have OptSize attribute.
675 /// @brief Get maximum # of store operations permitted for llvm.memmove
676 unsigned getMaxStoresPerMemmove(bool OptSize) const {
677 return OptSize ? maxStoresPerMemmoveOptSize : maxStoresPerMemmove;
680 /// This function returns true if the target allows unaligned memory accesses.
681 /// of the specified type. If true, it also returns whether the unaligned
682 /// memory access is "fast" in the second argument by reference. This is used,
683 /// for example, in situations where an array copy/move/set is converted to a
684 /// sequence of store operations. It's use helps to ensure that such
685 /// replacements don't generate code that causes an alignment error (trap) on
686 /// the target machine.
687 /// @brief Determine if the target supports unaligned memory accesses.
688 virtual bool allowsUnalignedMemoryAccesses(EVT, bool *Fast = 0) const {
692 /// This function returns true if the target would benefit from code placement
694 /// @brief Determine if the target should perform code placement optimization.
695 bool shouldOptimizeCodePlacement() const {
696 return benefitFromCodePlacementOpt;
699 /// getOptimalMemOpType - Returns the target specific optimal type for load
700 /// and store operations as a result of memset, memcpy, and memmove
701 /// lowering. If DstAlign is zero that means it's safe to destination
702 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
703 /// means there isn't a need to check it against alignment requirement,
704 /// probably because the source does not need to be loaded. If
705 /// 'IsZeroVal' is true, that means it's safe to return a
706 /// non-scalar-integer type, e.g. empty string source, constant, or loaded
707 /// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is
708 /// constant so it does not need to be loaded.
709 /// It returns EVT::Other if the type should be determined using generic
710 /// target-independent logic.
711 virtual EVT getOptimalMemOpType(uint64_t /*Size*/,
712 unsigned /*DstAlign*/, unsigned /*SrcAlign*/,
714 bool /*MemcpyStrSrc*/,
715 MachineFunction &/*MF*/) const {
719 /// isLegalMemOpType - Returns true if it's legal to use load / store of the
720 /// specified type to expand memcpy / memset inline. This is mostly true
721 /// for legal types except for some special cases. For example, on X86
722 /// targets without SSE2 f64 load / store are done with fldl / fstpl which
723 /// also does type conversion.
724 virtual bool isLegalMemOpType(MVT VT) const {
725 return VT.isInteger();
728 /// usesUnderscoreSetJmp - Determine if we should use _setjmp or setjmp
729 /// to implement llvm.setjmp.
730 bool usesUnderscoreSetJmp() const {
731 return UseUnderscoreSetJmp;
734 /// usesUnderscoreLongJmp - Determine if we should use _longjmp or longjmp
735 /// to implement llvm.longjmp.
736 bool usesUnderscoreLongJmp() const {
737 return UseUnderscoreLongJmp;
740 /// supportJumpTables - return whether the target can generate code for
742 bool supportJumpTables() const {
743 return SupportJumpTables;
746 /// getMinimumJumpTableEntries - return integer threshold on number of
747 /// blocks to use jump tables rather than if sequence.
748 int getMinimumJumpTableEntries() const {
749 return MinimumJumpTableEntries;
752 /// getStackPointerRegisterToSaveRestore - If a physical register, this
753 /// specifies the register that llvm.savestack/llvm.restorestack should save
755 unsigned getStackPointerRegisterToSaveRestore() const {
756 return StackPointerRegisterToSaveRestore;
759 /// getExceptionPointerRegister - If a physical register, this returns
760 /// the register that receives the exception address on entry to a landing
762 unsigned getExceptionPointerRegister() const {
763 return ExceptionPointerRegister;
766 /// getExceptionSelectorRegister - If a physical register, this returns
767 /// the register that receives the exception typeid on entry to a landing
769 unsigned getExceptionSelectorRegister() const {
770 return ExceptionSelectorRegister;
773 /// getJumpBufSize - returns the target's jmp_buf size in bytes (if never
774 /// set, the default is 200)
775 unsigned getJumpBufSize() const {
779 /// getJumpBufAlignment - returns the target's jmp_buf alignment in bytes
780 /// (if never set, the default is 0)
781 unsigned getJumpBufAlignment() const {
782 return JumpBufAlignment;
785 /// getMinStackArgumentAlignment - return the minimum stack alignment of an
787 unsigned getMinStackArgumentAlignment() const {
788 return MinStackArgumentAlignment;
791 /// getMinFunctionAlignment - return the minimum function alignment.
793 unsigned getMinFunctionAlignment() const {
794 return MinFunctionAlignment;
797 /// getPrefFunctionAlignment - return the preferred function alignment.
799 unsigned getPrefFunctionAlignment() const {
800 return PrefFunctionAlignment;
803 /// getPrefLoopAlignment - return the preferred loop alignment.
805 unsigned getPrefLoopAlignment() const {
806 return PrefLoopAlignment;
809 /// getShouldFoldAtomicFences - return whether the combiner should fold
810 /// fence MEMBARRIER instructions into the atomic intrinsic instructions.
812 bool getShouldFoldAtomicFences() const {
813 return ShouldFoldAtomicFences;
816 /// getInsertFencesFor - return whether the DAG builder should automatically
817 /// insert fences and reduce ordering for atomics.
819 bool getInsertFencesForAtomic() const {
820 return InsertFencesForAtomic;
823 /// getPreIndexedAddressParts - returns true by value, base pointer and
824 /// offset pointer and addressing mode by reference if the node's address
825 /// can be legally represented as pre-indexed load / store address.
826 virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/,
828 ISD::MemIndexedMode &/*AM*/,
829 SelectionDAG &/*DAG*/) const {
833 /// getPostIndexedAddressParts - returns true by value, base pointer and
834 /// offset pointer and addressing mode by reference if this node can be
835 /// combined with a load / store to form a post-indexed load / store.
836 virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/,
837 SDValue &/*Base*/, SDValue &/*Offset*/,
838 ISD::MemIndexedMode &/*AM*/,
839 SelectionDAG &/*DAG*/) const {
843 /// getJumpTableEncoding - Return the entry encoding for a jump table in the
844 /// current function. The returned value is a member of the
845 /// MachineJumpTableInfo::JTEntryKind enum.
846 virtual unsigned getJumpTableEncoding() const;
848 virtual const MCExpr *
849 LowerCustomJumpTableEntry(const MachineJumpTableInfo * /*MJTI*/,
850 const MachineBasicBlock * /*MBB*/, unsigned /*uid*/,
851 MCContext &/*Ctx*/) const {
852 llvm_unreachable("Need to implement this hook if target has custom JTIs");
855 /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
857 virtual SDValue getPICJumpTableRelocBase(SDValue Table,
858 SelectionDAG &DAG) const;
860 /// getPICJumpTableRelocBaseExpr - This returns the relocation base for the
861 /// given PIC jumptable, the same as getPICJumpTableRelocBase, but as an
863 virtual const MCExpr *
864 getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
865 unsigned JTI, MCContext &Ctx) const;
867 /// isOffsetFoldingLegal - Return true if folding a constant offset
868 /// with the given GlobalAddress is legal. It is frequently not legal in
869 /// PIC relocation models.
870 virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
872 /// getStackCookieLocation - Return true if the target stores stack
873 /// protector cookies at a fixed offset in some non-standard address
874 /// space, and populates the address space and offset as
876 virtual bool getStackCookieLocation(unsigned &/*AddressSpace*/,
877 unsigned &/*Offset*/) const {
881 /// getMaximalGlobalOffset - Returns the maximal possible offset which can be
882 /// used for loads / stores from the global.
883 virtual unsigned getMaximalGlobalOffset() const {
887 //===--------------------------------------------------------------------===//
888 // TargetLowering Optimization Methods
891 /// TargetLoweringOpt - A convenience struct that encapsulates a DAG, and two
892 /// SDValues for returning information from TargetLowering to its clients
893 /// that want to combine
894 struct TargetLoweringOpt {
901 explicit TargetLoweringOpt(SelectionDAG &InDAG,
903 DAG(InDAG), LegalTys(LT), LegalOps(LO) {}
905 bool LegalTypes() const { return LegalTys; }
906 bool LegalOperations() const { return LegalOps; }
908 bool CombineTo(SDValue O, SDValue N) {
914 /// ShrinkDemandedConstant - Check to see if the specified operand of the
915 /// specified instruction is a constant integer. If so, check to see if
916 /// there are any bits set in the constant that are not demanded. If so,
917 /// shrink the constant and return true.
918 bool ShrinkDemandedConstant(SDValue Op, const APInt &Demanded);
920 /// ShrinkDemandedOp - Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the
921 /// casts are free. This uses isZExtFree and ZERO_EXTEND for the widening
922 /// cast, but it could be generalized for targets with other types of
923 /// implicit widening casts.
924 bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded,
928 /// SimplifyDemandedBits - Look at Op. At this point, we know that only the
929 /// DemandedMask bits of the result of Op are ever used downstream. If we can
930 /// use this information to simplify Op, create a new simplified DAG node and
931 /// return true, returning the original and new nodes in Old and New.
932 /// Otherwise, analyze the expression and return a mask of KnownOne and
933 /// KnownZero bits for the expression (used to simplify the caller).
934 /// The KnownZero/One bits may only be accurate for those bits in the
936 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
937 APInt &KnownZero, APInt &KnownOne,
938 TargetLoweringOpt &TLO, unsigned Depth = 0) const;
940 /// computeMaskedBitsForTargetNode - Determine which of the bits specified in
941 /// Mask are known to be either zero or one and return them in the
942 /// KnownZero/KnownOne bitsets.
943 virtual void computeMaskedBitsForTargetNode(const SDValue Op,
946 const SelectionDAG &DAG,
947 unsigned Depth = 0) const;
949 /// ComputeNumSignBitsForTargetNode - This method can be implemented by
950 /// targets that want to expose additional information about sign bits to the
952 virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
953 unsigned Depth = 0) const;
955 struct DAGCombinerInfo {
956 void *DC; // The DAG Combiner object.
958 bool BeforeLegalizeOps;
959 bool CalledByLegalizer;
963 DAGCombinerInfo(SelectionDAG &dag, bool bl, bool blo, bool cl, void *dc)
964 : DC(dc), BeforeLegalize(bl), BeforeLegalizeOps(blo),
965 CalledByLegalizer(cl), DAG(dag) {}
967 bool isBeforeLegalize() const { return BeforeLegalize; }
968 bool isBeforeLegalizeOps() const { return BeforeLegalizeOps; }
969 bool isCalledByLegalizer() const { return CalledByLegalizer; }
971 void AddToWorklist(SDNode *N);
972 void RemoveFromWorklist(SDNode *N);
973 SDValue CombineTo(SDNode *N, const std::vector<SDValue> &To,
975 SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true);
976 SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true);
978 void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO);
981 /// SimplifySetCC - Try to simplify a setcc built with the specified operands
982 /// and cc. If it is unable to simplify it, return a null SDValue.
983 SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
984 ISD::CondCode Cond, bool foldBooleans,
985 DAGCombinerInfo &DCI, DebugLoc dl) const;
987 /// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
988 /// node is a GlobalAddress + offset.
990 isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
992 /// PerformDAGCombine - This method will be invoked for all target nodes and
993 /// for any target-independent nodes that the target has registered with
996 /// The semantics are as follows:
998 /// SDValue.Val == 0 - No change was made
999 /// SDValue.Val == N - N was replaced, is dead, and is already handled.
1000 /// otherwise - N should be replaced by the returned Operand.
1002 /// In addition, methods provided by DAGCombinerInfo may be used to perform
1003 /// more complex transformations.
1005 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
1007 /// isTypeDesirableForOp - Return true if the target has native support for
1008 /// the specified value type and it is 'desirable' to use the type for the
1009 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
1010 /// instruction encodings are longer and some i16 instructions are slow.
1011 virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const {
1012 // By default, assume all legal types are desirable.
1013 return isTypeLegal(VT);
1016 /// isDesirableToPromoteOp - Return true if it is profitable for dag combiner
1017 /// to transform a floating point op of specified opcode to a equivalent op of
1018 /// an integer type. e.g. f32 load -> i32 load can be profitable on ARM.
1019 virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/,
1024 /// IsDesirableToPromoteOp - This method query the target whether it is
1025 /// beneficial for dag combiner to promote the specified node. If true, it
1026 /// should return the desired promotion type by reference.
1027 virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const {
1031 //===--------------------------------------------------------------------===//
1032 // TargetLowering Configuration Methods - These methods should be invoked by
1033 // the derived class constructor to configure this object for the target.
1037 /// setBooleanContents - Specify how the target extends the result of a
1038 /// boolean value from i1 to a wider type. See getBooleanContents.
1039 void setBooleanContents(BooleanContent Ty) { BooleanContents = Ty; }
1040 /// setBooleanVectorContents - Specify how the target extends the result
1041 /// of a vector boolean value from a vector of i1 to a wider type. See
1042 /// getBooleanContents.
1043 void setBooleanVectorContents(BooleanContent Ty) {
1044 BooleanVectorContents = Ty;
1047 /// setSchedulingPreference - Specify the target scheduling preference.
1048 void setSchedulingPreference(Sched::Preference Pref) {
1049 SchedPreferenceInfo = Pref;
1052 /// setUseUnderscoreSetJmp - Indicate whether this target prefers to
1053 /// use _setjmp to implement llvm.setjmp or the non _ version.
1054 /// Defaults to false.
1055 void setUseUnderscoreSetJmp(bool Val) {
1056 UseUnderscoreSetJmp = Val;
1059 /// setUseUnderscoreLongJmp - Indicate whether this target prefers to
1060 /// use _longjmp to implement llvm.longjmp or the non _ version.
1061 /// Defaults to false.
1062 void setUseUnderscoreLongJmp(bool Val) {
1063 UseUnderscoreLongJmp = Val;
1066 /// setSupportJumpTables - Indicate whether the target can generate code for
1068 void setSupportJumpTables(bool Val) {
1069 SupportJumpTables = Val;
1072 /// setMinimumJumpTableEntries - Indicate the number of blocks to generate
1073 /// jump tables rather than if sequence.
1074 void setMinimumJumpTableEntries(int Val) {
1075 MinimumJumpTableEntries = Val;
1078 /// setStackPointerRegisterToSaveRestore - If set to a physical register, this
1079 /// specifies the register that llvm.savestack/llvm.restorestack should save
1081 void setStackPointerRegisterToSaveRestore(unsigned R) {
1082 StackPointerRegisterToSaveRestore = R;
1085 /// setExceptionPointerRegister - If set to a physical register, this sets
1086 /// the register that receives the exception address on entry to a landing
1088 void setExceptionPointerRegister(unsigned R) {
1089 ExceptionPointerRegister = R;
1092 /// setExceptionSelectorRegister - If set to a physical register, this sets
1093 /// the register that receives the exception typeid on entry to a landing
1095 void setExceptionSelectorRegister(unsigned R) {
1096 ExceptionSelectorRegister = R;
1099 /// SelectIsExpensive - Tells the code generator not to expand operations
1100 /// into sequences that use the select operations if possible.
1101 void setSelectIsExpensive(bool isExpensive = true) {
1102 SelectIsExpensive = isExpensive;
1105 /// JumpIsExpensive - Tells the code generator not to expand sequence of
1106 /// operations into a separate sequences that increases the amount of
1108 void setJumpIsExpensive(bool isExpensive = true) {
1109 JumpIsExpensive = isExpensive;
1112 /// setIntDivIsCheap - Tells the code generator that integer divide is
1113 /// expensive, and if possible, should be replaced by an alternate sequence
1114 /// of instructions not containing an integer divide.
1115 void setIntDivIsCheap(bool isCheap = true) { IntDivIsCheap = isCheap; }
1117 /// addBypassSlowDiv - Tells the code generator which bitwidths to bypass.
1118 void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) {
1119 BypassSlowDivWidths[SlowBitWidth] = FastBitWidth;
1122 /// setPow2DivIsCheap - Tells the code generator that it shouldn't generate
1123 /// srl/add/sra for a signed divide by power of two, and let the target handle
1125 void setPow2DivIsCheap(bool isCheap = true) { Pow2DivIsCheap = isCheap; }
1127 /// addRegisterClass - Add the specified register class as an available
1128 /// regclass for the specified value type. This indicates the selector can
1129 /// handle values of that class natively.
1130 void addRegisterClass(EVT VT, const TargetRegisterClass *RC) {
1131 assert((unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT));
1132 AvailableRegClasses.push_back(std::make_pair(VT, RC));
1133 RegClassForVT[VT.getSimpleVT().SimpleTy] = RC;
1136 /// findRepresentativeClass - Return the largest legal super-reg register class
1137 /// of the register class for the specified type and its associated "cost".
1138 virtual std::pair<const TargetRegisterClass*, uint8_t>
1139 findRepresentativeClass(EVT VT) const;
1141 /// computeRegisterProperties - Once all of the register classes are added,
1142 /// this allows us to compute derived properties we expose.
1143 void computeRegisterProperties();
1145 /// setOperationAction - Indicate that the specified operation does not work
1146 /// with the specified type and indicate what to do about it.
1147 void setOperationAction(unsigned Op, MVT VT,
1148 LegalizeAction Action) {
1149 assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!");
1150 OpActions[(unsigned)VT.SimpleTy][Op] = (uint8_t)Action;
1153 /// setLoadExtAction - Indicate that the specified load with extension does
1154 /// not work with the specified type and indicate what to do about it.
1155 void setLoadExtAction(unsigned ExtType, MVT VT,
1156 LegalizeAction Action) {
1157 assert(ExtType < ISD::LAST_LOADEXT_TYPE && VT < MVT::LAST_VALUETYPE &&
1158 "Table isn't big enough!");
1159 LoadExtActions[VT.SimpleTy][ExtType] = (uint8_t)Action;
1162 /// setTruncStoreAction - Indicate that the specified truncating store does
1163 /// not work with the specified type and indicate what to do about it.
1164 void setTruncStoreAction(MVT ValVT, MVT MemVT,
1165 LegalizeAction Action) {
1166 assert(ValVT < MVT::LAST_VALUETYPE && MemVT < MVT::LAST_VALUETYPE &&
1167 "Table isn't big enough!");
1168 TruncStoreActions[ValVT.SimpleTy][MemVT.SimpleTy] = (uint8_t)Action;
1171 /// setIndexedLoadAction - Indicate that the specified indexed load does or
1172 /// does not work with the specified type and indicate what to do abort
1173 /// it. NOTE: All indexed mode loads are initialized to Expand in
1174 /// TargetLowering.cpp
1175 void setIndexedLoadAction(unsigned IdxMode, MVT VT,
1176 LegalizeAction Action) {
1177 assert(VT < MVT::LAST_VALUETYPE && IdxMode < ISD::LAST_INDEXED_MODE &&
1178 (unsigned)Action < 0xf && "Table isn't big enough!");
1179 // Load action are kept in the upper half.
1180 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0xf0;
1181 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action) <<4;
1184 /// setIndexedStoreAction - Indicate that the specified indexed store does or
1185 /// does not work with the specified type and indicate what to do about
1186 /// it. NOTE: All indexed mode stores are initialized to Expand in
1187 /// TargetLowering.cpp
1188 void setIndexedStoreAction(unsigned IdxMode, MVT VT,
1189 LegalizeAction Action) {
1190 assert(VT < MVT::LAST_VALUETYPE && IdxMode < ISD::LAST_INDEXED_MODE &&
1191 (unsigned)Action < 0xf && "Table isn't big enough!");
1192 // Store action are kept in the lower half.
1193 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0x0f;
1194 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action);
1197 /// setCondCodeAction - Indicate that the specified condition code is or isn't
1198 /// supported on the target and indicate what to do about it.
1199 void setCondCodeAction(ISD::CondCode CC, MVT VT,
1200 LegalizeAction Action) {
1201 assert(VT < MVT::LAST_VALUETYPE &&
1202 (unsigned)CC < array_lengthof(CondCodeActions) &&
1203 "Table isn't big enough!");
1204 /// The lower 5 bits of the SimpleTy index into Nth 2bit set from the 64bit
1205 /// value and the upper 27 bits index into the second dimension of the
1206 /// array to select what 64bit value to use.
1207 CondCodeActions[(unsigned)CC][VT.SimpleTy >> 5]
1208 &= ~(uint64_t(3UL) << (VT.SimpleTy & 0x1F)*2);
1209 CondCodeActions[(unsigned)CC][VT.SimpleTy >> 5]
1210 |= (uint64_t)Action << (VT.SimpleTy & 0x1F)*2;
1213 /// AddPromotedToType - If Opc/OrigVT is specified as being promoted, the
1214 /// promotion code defaults to trying a larger integer/fp until it can find
1215 /// one that works. If that default is insufficient, this method can be used
1216 /// by the target to override the default.
1217 void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
1218 PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy;
1221 /// setTargetDAGCombine - Targets should invoke this method for each target
1222 /// independent node that they want to provide a custom DAG combiner for by
1223 /// implementing the PerformDAGCombine virtual method.
1224 void setTargetDAGCombine(ISD::NodeType NT) {
1225 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
1226 TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7);
1229 /// setJumpBufSize - Set the target's required jmp_buf buffer size (in
1230 /// bytes); default is 200
1231 void setJumpBufSize(unsigned Size) {
1235 /// setJumpBufAlignment - Set the target's required jmp_buf buffer
1236 /// alignment (in bytes); default is 0
1237 void setJumpBufAlignment(unsigned Align) {
1238 JumpBufAlignment = Align;
1241 /// setMinFunctionAlignment - Set the target's minimum function alignment (in
1243 void setMinFunctionAlignment(unsigned Align) {
1244 MinFunctionAlignment = Align;
1247 /// setPrefFunctionAlignment - Set the target's preferred function alignment.
1248 /// This should be set if there is a performance benefit to
1249 /// higher-than-minimum alignment (in log2(bytes))
1250 void setPrefFunctionAlignment(unsigned Align) {
1251 PrefFunctionAlignment = Align;
1254 /// setPrefLoopAlignment - Set the target's preferred loop alignment. Default
1255 /// alignment is zero, it means the target does not care about loop alignment.
1256 /// The alignment is specified in log2(bytes).
1257 void setPrefLoopAlignment(unsigned Align) {
1258 PrefLoopAlignment = Align;
1261 /// setMinStackArgumentAlignment - Set the minimum stack alignment of an
1262 /// argument (in log2(bytes)).
1263 void setMinStackArgumentAlignment(unsigned Align) {
1264 MinStackArgumentAlignment = Align;
1267 /// setShouldFoldAtomicFences - Set if the target's implementation of the
1268 /// atomic operation intrinsics includes locking. Default is false.
1269 void setShouldFoldAtomicFences(bool fold) {
1270 ShouldFoldAtomicFences = fold;
1273 /// setInsertFencesForAtomic - Set if the DAG builder should
1274 /// automatically insert fences and reduce the order of atomic memory
1275 /// operations to Monotonic.
1276 void setInsertFencesForAtomic(bool fence) {
1277 InsertFencesForAtomic = fence;
1281 //===--------------------------------------------------------------------===//
1282 // Lowering methods - These methods must be implemented by targets so that
1283 // the SelectionDAGBuilder code knows how to lower these.
1286 /// LowerFormalArguments - This hook must be implemented to lower the
1287 /// incoming (formal) arguments, described by the Ins array, into the
1288 /// specified DAG. The implementation should fill in the InVals array
1289 /// with legal-type argument values, and return the resulting token
1293 LowerFormalArguments(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
1295 const SmallVectorImpl<ISD::InputArg> &/*Ins*/,
1296 DebugLoc /*dl*/, SelectionDAG &/*DAG*/,
1297 SmallVectorImpl<SDValue> &/*InVals*/) const {
1298 llvm_unreachable("Not Implemented");
1301 struct ArgListEntry {
1312 ArgListEntry() : isSExt(false), isZExt(false), isInReg(false),
1313 isSRet(false), isNest(false), isByVal(false), Alignment(0) { }
1315 typedef std::vector<ArgListEntry> ArgListTy;
1317 /// CallLoweringInfo - This structure contains all information that is
1318 /// necessary for lowering calls. It is passed to TLI::LowerCallTo when the
1319 /// SelectionDAG builder needs to lower a call, and targets will see this
1320 /// struct in their LowerCall implementation.
1321 struct CallLoweringInfo {
1328 bool DoesNotReturn : 1;
1329 bool IsReturnValueUsed : 1;
1331 // IsTailCall should be modified by implementations of
1332 // TargetLowering::LowerCall that perform tail call conversions.
1335 unsigned NumFixedArgs;
1336 CallingConv::ID CallConv;
1341 ImmutableCallSite *CS;
1342 SmallVector<ISD::OutputArg, 32> Outs;
1343 SmallVector<SDValue, 32> OutVals;
1344 SmallVector<ISD::InputArg, 32> Ins;
1347 /// CallLoweringInfo - Constructs a call lowering context based on the
1348 /// ImmutableCallSite \p cs.
1349 CallLoweringInfo(SDValue chain, Type *retTy,
1350 FunctionType *FTy, bool isTailCall, SDValue callee,
1351 ArgListTy &args, SelectionDAG &dag, DebugLoc dl,
1352 ImmutableCallSite &cs)
1353 : Chain(chain), RetTy(retTy), RetSExt(cs.paramHasAttr(0, Attributes::SExt)),
1354 RetZExt(cs.paramHasAttr(0, Attributes::ZExt)), IsVarArg(FTy->isVarArg()),
1355 IsInReg(cs.paramHasAttr(0, Attributes::InReg)),
1356 DoesNotReturn(cs.doesNotReturn()),
1357 IsReturnValueUsed(!cs.getInstruction()->use_empty()),
1358 IsTailCall(isTailCall), NumFixedArgs(FTy->getNumParams()),
1359 CallConv(cs.getCallingConv()), Callee(callee), Args(args), DAG(dag),
1362 /// CallLoweringInfo - Constructs a call lowering context based on the
1363 /// provided call information.
1364 CallLoweringInfo(SDValue chain, Type *retTy, bool retSExt, bool retZExt,
1365 bool isVarArg, bool isInReg, unsigned numFixedArgs,
1366 CallingConv::ID callConv, bool isTailCall,
1367 bool doesNotReturn, bool isReturnValueUsed, SDValue callee,
1368 ArgListTy &args, SelectionDAG &dag, DebugLoc dl)
1369 : Chain(chain), RetTy(retTy), RetSExt(retSExt), RetZExt(retZExt),
1370 IsVarArg(isVarArg), IsInReg(isInReg), DoesNotReturn(doesNotReturn),
1371 IsReturnValueUsed(isReturnValueUsed), IsTailCall(isTailCall),
1372 NumFixedArgs(numFixedArgs), CallConv(callConv), Callee(callee),
1373 Args(args), DAG(dag), DL(dl), CS(NULL) {}
1376 /// LowerCallTo - This function lowers an abstract call to a function into an
1377 /// actual call. This returns a pair of operands. The first element is the
1378 /// return value for the function (if RetTy is not VoidTy). The second
1379 /// element is the outgoing token chain. It calls LowerCall to do the actual
1381 std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const;
1383 /// LowerCall - This hook must be implemented to lower calls into the
1384 /// the specified DAG. The outgoing arguments to the call are described
1385 /// by the Outs array, and the values to be returned by the call are
1386 /// described by the Ins array. The implementation should fill in the
1387 /// InVals array with legal-type return values from the call, and return
1388 /// the resulting token chain value.
1390 LowerCall(CallLoweringInfo &/*CLI*/,
1391 SmallVectorImpl<SDValue> &/*InVals*/) const {
1392 llvm_unreachable("Not Implemented");
1395 /// HandleByVal - Target-specific cleanup for formal ByVal parameters.
1396 virtual void HandleByVal(CCState *, unsigned &, unsigned) const {}
1398 /// CanLowerReturn - This hook should be implemented to check whether the
1399 /// return values described by the Outs array can fit into the return
1400 /// registers. If false is returned, an sret-demotion is performed.
1402 virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/,
1403 MachineFunction &/*MF*/, bool /*isVarArg*/,
1404 const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
1405 LLVMContext &/*Context*/) const
1407 // Return true by default to get preexisting behavior.
1411 /// LowerReturn - This hook must be implemented to lower outgoing
1412 /// return values, described by the Outs array, into the specified
1413 /// DAG. The implementation should return the resulting token chain
1417 LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
1419 const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
1420 const SmallVectorImpl<SDValue> &/*OutVals*/,
1421 DebugLoc /*dl*/, SelectionDAG &/*DAG*/) const {
1422 llvm_unreachable("Not Implemented");
1425 /// isUsedByReturnOnly - Return true if result of the specified node is used
1426 /// by a return node only. It also compute and return the input chain for the
1428 /// This is used to determine whether it is possible
1429 /// to codegen a libcall as tail call at legalization time.
1430 virtual bool isUsedByReturnOnly(SDNode *, SDValue &Chain) const {
1434 /// mayBeEmittedAsTailCall - Return true if the target may be able emit the
1435 /// call instruction as a tail call. This is used by optimization passes to
1436 /// determine if it's profitable to duplicate return instructions to enable
1437 /// tailcall optimization.
1438 virtual bool mayBeEmittedAsTailCall(CallInst *) const {
1442 /// getTypeForExtArgOrReturn - Return the type that should be used to zero or
1443 /// sign extend a zeroext/signext integer argument or return value.
1444 /// FIXME: Most C calling convention requires the return type to be promoted,
1445 /// but this is not true all the time, e.g. i1 on x86-64. It is also not
1446 /// necessary for non-C calling conventions. The frontend should handle this
1447 /// and include all of the necessary information.
1448 virtual EVT getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
1449 ISD::NodeType /*ExtendKind*/) const {
1450 EVT MinVT = getRegisterType(Context, MVT::i32);
1451 return VT.bitsLT(MinVT) ? MinVT : VT;
1454 /// LowerOperationWrapper - This callback is invoked by the type legalizer
1455 /// to legalize nodes with an illegal operand type but legal result types.
1456 /// It replaces the LowerOperation callback in the type Legalizer.
1457 /// The reason we can not do away with LowerOperation entirely is that
1458 /// LegalizeDAG isn't yet ready to use this callback.
1459 /// TODO: Consider merging with ReplaceNodeResults.
1461 /// The target places new result values for the node in Results (their number
1462 /// and types must exactly match those of the original return values of
1463 /// the node), or leaves Results empty, which indicates that the node is not
1464 /// to be custom lowered after all.
1465 /// The default implementation calls LowerOperation.
1466 virtual void LowerOperationWrapper(SDNode *N,
1467 SmallVectorImpl<SDValue> &Results,
1468 SelectionDAG &DAG) const;
1470 /// LowerOperation - This callback is invoked for operations that are
1471 /// unsupported by the target, which are registered to use 'custom' lowering,
1472 /// and whose defined values are all legal.
1473 /// If the target has no operations that require custom lowering, it need not
1474 /// implement this. The default implementation of this aborts.
1475 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
1477 /// ReplaceNodeResults - This callback is invoked when a node result type is
1478 /// illegal for the target, and the operation was registered to use 'custom'
1479 /// lowering for that result type. The target places new result values for
1480 /// the node in Results (their number and types must exactly match those of
1481 /// the original return values of the node), or leaves Results empty, which
1482 /// indicates that the node is not to be custom lowered after all.
1484 /// If the target has no operations that require custom lowering, it need not
1485 /// implement this. The default implementation aborts.
1486 virtual void ReplaceNodeResults(SDNode * /*N*/,
1487 SmallVectorImpl<SDValue> &/*Results*/,
1488 SelectionDAG &/*DAG*/) const {
1489 llvm_unreachable("ReplaceNodeResults not implemented for this target!");
1492 /// getTargetNodeName() - This method returns the name of a target specific
1494 virtual const char *getTargetNodeName(unsigned Opcode) const;
1496 /// createFastISel - This method returns a target specific FastISel object,
1497 /// or null if the target does not support "fast" ISel.
1498 virtual FastISel *createFastISel(FunctionLoweringInfo &,
1499 const TargetLibraryInfo *) const {
1503 //===--------------------------------------------------------------------===//
1504 // Inline Asm Support hooks
1507 /// ExpandInlineAsm - This hook allows the target to expand an inline asm
1508 /// call to be explicit llvm code if it wants to. This is useful for
1509 /// turning simple inline asms into LLVM intrinsics, which gives the
1510 /// compiler more information about the behavior of the code.
1511 virtual bool ExpandInlineAsm(CallInst *) const {
1515 enum ConstraintType {
1516 C_Register, // Constraint represents specific register(s).
1517 C_RegisterClass, // Constraint represents any of register(s) in class.
1518 C_Memory, // Memory constraint.
1519 C_Other, // Something else.
1520 C_Unknown // Unsupported constraint.
1523 enum ConstraintWeight {
1525 CW_Invalid = -1, // No match.
1526 CW_Okay = 0, // Acceptable.
1527 CW_Good = 1, // Good weight.
1528 CW_Better = 2, // Better weight.
1529 CW_Best = 3, // Best weight.
1531 // Well-known weights.
1532 CW_SpecificReg = CW_Okay, // Specific register operands.
1533 CW_Register = CW_Good, // Register operands.
1534 CW_Memory = CW_Better, // Memory operands.
1535 CW_Constant = CW_Best, // Constant operand.
1536 CW_Default = CW_Okay // Default or don't know type.
1539 /// AsmOperandInfo - This contains information for each constraint that we are
1541 struct AsmOperandInfo : public InlineAsm::ConstraintInfo {
1542 /// ConstraintCode - This contains the actual string for the code, like "m".
1543 /// TargetLowering picks the 'best' code from ConstraintInfo::Codes that
1544 /// most closely matches the operand.
1545 std::string ConstraintCode;
1547 /// ConstraintType - Information about the constraint code, e.g. Register,
1548 /// RegisterClass, Memory, Other, Unknown.
1549 TargetLowering::ConstraintType ConstraintType;
1551 /// CallOperandval - If this is the result output operand or a
1552 /// clobber, this is null, otherwise it is the incoming operand to the
1553 /// CallInst. This gets modified as the asm is processed.
1554 Value *CallOperandVal;
1556 /// ConstraintVT - The ValueType for the operand value.
1559 /// isMatchingInputConstraint - Return true of this is an input operand that
1560 /// is a matching constraint like "4".
1561 bool isMatchingInputConstraint() const;
1563 /// getMatchedOperand - If this is an input matching constraint, this method
1564 /// returns the output operand it matches.
1565 unsigned getMatchedOperand() const;
1567 /// Copy constructor for copying from an AsmOperandInfo.
1568 AsmOperandInfo(const AsmOperandInfo &info)
1569 : InlineAsm::ConstraintInfo(info),
1570 ConstraintCode(info.ConstraintCode),
1571 ConstraintType(info.ConstraintType),
1572 CallOperandVal(info.CallOperandVal),
1573 ConstraintVT(info.ConstraintVT) {
1576 /// Copy constructor for copying from a ConstraintInfo.
1577 AsmOperandInfo(const InlineAsm::ConstraintInfo &info)
1578 : InlineAsm::ConstraintInfo(info),
1579 ConstraintType(TargetLowering::C_Unknown),
1580 CallOperandVal(0), ConstraintVT(MVT::Other) {
1584 typedef std::vector<AsmOperandInfo> AsmOperandInfoVector;
1586 /// ParseConstraints - Split up the constraint string from the inline
1587 /// assembly value into the specific constraints and their prefixes,
1588 /// and also tie in the associated operand values.
1589 /// If this returns an empty vector, and if the constraint string itself
1590 /// isn't empty, there was an error parsing.
1591 virtual AsmOperandInfoVector ParseConstraints(ImmutableCallSite CS) const;
1593 /// Examine constraint type and operand type and determine a weight value.
1594 /// The operand object must already have been set up with the operand type.
1595 virtual ConstraintWeight getMultipleConstraintMatchWeight(
1596 AsmOperandInfo &info, int maIndex) const;
1598 /// Examine constraint string and operand type and determine a weight value.
1599 /// The operand object must already have been set up with the operand type.
1600 virtual ConstraintWeight getSingleConstraintMatchWeight(
1601 AsmOperandInfo &info, const char *constraint) const;
1603 /// ComputeConstraintToUse - Determines the constraint code and constraint
1604 /// type to use for the specific AsmOperandInfo, setting
1605 /// OpInfo.ConstraintCode and OpInfo.ConstraintType. If the actual operand
1606 /// being passed in is available, it can be passed in as Op, otherwise an
1607 /// empty SDValue can be passed.
1608 virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
1610 SelectionDAG *DAG = 0) const;
1612 /// getConstraintType - Given a constraint, return the type of constraint it
1613 /// is for this target.
1614 virtual ConstraintType getConstraintType(const std::string &Constraint) const;
1616 /// getRegForInlineAsmConstraint - Given a physical register constraint (e.g.
1617 /// {edx}), return the register number and the register class for the
1620 /// Given a register class constraint, like 'r', if this corresponds directly
1621 /// to an LLVM register class, return a register of 0 and the register class
1624 /// This should only be used for C_Register constraints. On error,
1625 /// this returns a register number of 0 and a null register class pointer..
1626 virtual std::pair<unsigned, const TargetRegisterClass*>
1627 getRegForInlineAsmConstraint(const std::string &Constraint,
1630 /// LowerXConstraint - try to replace an X constraint, which matches anything,
1631 /// with another that has more specific requirements based on the type of the
1632 /// corresponding operand. This returns null if there is no replacement to
1634 virtual const char *LowerXConstraint(EVT ConstraintVT) const;
1636 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
1637 /// vector. If it is invalid, don't add anything to Ops.
1638 virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
1639 std::vector<SDValue> &Ops,
1640 SelectionDAG &DAG) const;
1642 //===--------------------------------------------------------------------===//
1643 // Instruction Emitting Hooks
1646 // EmitInstrWithCustomInserter - This method should be implemented by targets
1647 // that mark instructions with the 'usesCustomInserter' flag. These
1648 // instructions are special in various ways, which require special support to
1649 // insert. The specified MachineInstr is created but not inserted into any
1650 // basic blocks, and this method is called to expand it into a sequence of
1651 // instructions, potentially also creating new basic blocks and control flow.
1652 virtual MachineBasicBlock *
1653 EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const;
1655 /// AdjustInstrPostInstrSelection - This method should be implemented by
1656 /// targets that mark instructions with the 'hasPostISelHook' flag. These
1657 /// instructions must be adjusted after instruction selection by target hooks.
1658 /// e.g. To fill in optional defs for ARM 's' setting instructions.
1660 AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const;
1662 //===--------------------------------------------------------------------===//
1663 // Addressing mode description hooks (used by LSR etc).
1666 /// GetAddrModeArguments - CodeGenPrepare sinks address calculations into the
1667 /// same BB as Load/Store instructions reading the address. This allows as
1668 /// much computation as possible to be done in the address mode for that
1669 /// operand. This hook lets targets also pass back when this should be done
1670 /// on intrinsics which load/store.
1671 virtual bool GetAddrModeArguments(IntrinsicInst *I,
1672 SmallVectorImpl<Value*> &Ops,
1673 Type *&AccessTy) const {
1677 /// isLegalAddressingMode - Return true if the addressing mode represented by
1678 /// AM is legal for this target, for a load/store of the specified type.
1679 /// The type may be VoidTy, in which case only return true if the addressing
1680 /// mode is legal for a load/store of any legal type.
1681 /// TODO: Handle pre/postinc as well.
1682 virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const;
1684 /// isLegalICmpImmediate - Return true if the specified immediate is legal
1685 /// icmp immediate, that is the target has icmp instructions which can compare
1686 /// a register against the immediate without having to materialize the
1687 /// immediate into a register.
1688 virtual bool isLegalICmpImmediate(int64_t) const {
1692 /// isLegalAddImmediate - Return true if the specified immediate is legal
1693 /// add immediate, that is the target has add instructions which can add
1694 /// a register with the immediate without having to materialize the
1695 /// immediate into a register.
1696 virtual bool isLegalAddImmediate(int64_t) const {
1700 /// isTruncateFree - Return true if it's free to truncate a value of
1701 /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in
1702 /// register EAX to i16 by referencing its sub-register AX.
1703 virtual bool isTruncateFree(Type * /*Ty1*/, Type * /*Ty2*/) const {
1707 virtual bool isTruncateFree(EVT /*VT1*/, EVT /*VT2*/) const {
1711 /// isZExtFree - Return true if any actual instruction that defines a
1712 /// value of type Ty1 implicitly zero-extends the value to Ty2 in the result
1713 /// register. This does not necessarily include registers defined in
1714 /// unknown ways, such as incoming arguments, or copies from unknown
1715 /// virtual registers. Also, if isTruncateFree(Ty2, Ty1) is true, this
1716 /// does not necessarily apply to truncate instructions. e.g. on x86-64,
1717 /// all instructions that define 32-bit values implicit zero-extend the
1718 /// result out to 64 bits.
1719 virtual bool isZExtFree(Type * /*Ty1*/, Type * /*Ty2*/) const {
1723 virtual bool isZExtFree(EVT /*VT1*/, EVT /*VT2*/) const {
1727 /// isZExtFree - Return true if zero-extending the specific node Val to type
1728 /// VT2 is free (either because it's implicitly zero-extended such as ARM
1729 /// ldrb / ldrh or because it's folded such as X86 zero-extending loads).
1730 virtual bool isZExtFree(SDValue Val, EVT VT2) const {
1731 return isZExtFree(Val.getValueType(), VT2);
1734 /// isFNegFree - Return true if an fneg operation is free to the point where
1735 /// it is never worthwhile to replace it with a bitwise operation.
1736 virtual bool isFNegFree(EVT) const {
1740 /// isFAbsFree - Return true if an fneg operation is free to the point where
1741 /// it is never worthwhile to replace it with a bitwise operation.
1742 virtual bool isFAbsFree(EVT) const {
1746 /// isFMAFasterThanMulAndAdd - Return true if an FMA operation is faster than
1747 /// a pair of mul and add instructions. fmuladd intrinsics will be expanded to
1748 /// FMAs when this method returns true (and FMAs are legal), otherwise fmuladd
1749 /// is expanded to mul + add.
1750 virtual bool isFMAFasterThanMulAndAdd(EVT) const {
1754 /// isNarrowingProfitable - Return true if it's profitable to narrow
1755 /// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow
1756 /// from i32 to i8 but not from i32 to i16.
1757 virtual bool isNarrowingProfitable(EVT /*VT1*/, EVT /*VT2*/) const {
1761 //===--------------------------------------------------------------------===//
1762 // Div utility functions
1764 SDValue BuildExactSDIV(SDValue Op1, SDValue Op2, DebugLoc dl,
1765 SelectionDAG &DAG) const;
1766 SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
1767 std::vector<SDNode*> *Created) const;
1768 SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
1769 std::vector<SDNode*> *Created) const;
1772 //===--------------------------------------------------------------------===//
1773 // Runtime Library hooks
1776 /// setLibcallName - Rename the default libcall routine name for the specified
1778 void setLibcallName(RTLIB::Libcall Call, const char *Name) {
1779 LibcallRoutineNames[Call] = Name;
1782 /// getLibcallName - Get the libcall routine name for the specified libcall.
1784 const char *getLibcallName(RTLIB::Libcall Call) const {
1785 return LibcallRoutineNames[Call];
1788 /// setCmpLibcallCC - Override the default CondCode to be used to test the
1789 /// result of the comparison libcall against zero.
1790 void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) {
1791 CmpLibcallCCs[Call] = CC;
1794 /// getCmpLibcallCC - Get the CondCode that's to be used to test the result of
1795 /// the comparison libcall against zero.
1796 ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const {
1797 return CmpLibcallCCs[Call];
1800 /// setLibcallCallingConv - Set the CallingConv that should be used for the
1801 /// specified libcall.
1802 void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) {
1803 LibcallCallingConvs[Call] = CC;
1806 /// getLibcallCallingConv - Get the CallingConv that should be used for the
1807 /// specified libcall.
1808 CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const {
1809 return LibcallCallingConvs[Call];
1813 const TargetMachine &TM;
1814 const DataLayout *TD;
1815 const TargetLoweringObjectFile &TLOF;
1817 /// PointerTy - The type to use for pointers for the default address space,
1818 /// usually i32 or i64.
1822 /// IsLittleEndian - True if this is a little endian target.
1824 bool IsLittleEndian;
1826 /// SelectIsExpensive - Tells the code generator not to expand operations
1827 /// into sequences that use the select operations if possible.
1828 bool SelectIsExpensive;
1830 /// IntDivIsCheap - Tells the code generator not to expand integer divides by
1831 /// constants into a sequence of muls, adds, and shifts. This is a hack until
1832 /// a real cost model is in place. If we ever optimize for size, this will be
1833 /// set to true unconditionally.
1836 /// BypassSlowDivMap - Tells the code generator to bypass slow divide or
1837 /// remainder instructions. For example, BypassSlowDivWidths[32,8] tells the
1838 /// code generator to bypass 32-bit integer div/rem with an 8-bit unsigned
1839 /// integer div/rem when the operands are positive and less than 256.
1840 DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;
1842 /// Pow2DivIsCheap - Tells the code generator that it shouldn't generate
1843 /// srl/add/sra for a signed divide by power of two, and let the target handle
1845 bool Pow2DivIsCheap;
1847 /// JumpIsExpensive - Tells the code generator that it shouldn't generate
1848 /// extra flow control instructions and should attempt to combine flow
1849 /// control instructions via predication.
1850 bool JumpIsExpensive;
1852 /// UseUnderscoreSetJmp - This target prefers to use _setjmp to implement
1853 /// llvm.setjmp. Defaults to false.
1854 bool UseUnderscoreSetJmp;
1856 /// UseUnderscoreLongJmp - This target prefers to use _longjmp to implement
1857 /// llvm.longjmp. Defaults to false.
1858 bool UseUnderscoreLongJmp;
1860 /// SupportJumpTables - Whether the target can generate code for jumptables.
1861 /// If it's not true, then each jumptable must be lowered into if-then-else's.
1862 bool SupportJumpTables;
1864 /// MinimumJumpTableEntries - Number of blocks threshold to use jump tables.
1865 int MinimumJumpTableEntries;
1867 /// BooleanContents - Information about the contents of the high-bits in
1868 /// boolean values held in a type wider than i1. See getBooleanContents.
1869 BooleanContent BooleanContents;
1870 /// BooleanVectorContents - Information about the contents of the high-bits
1871 /// in boolean vector values when the element type is wider than i1. See
1872 /// getBooleanContents.
1873 BooleanContent BooleanVectorContents;
1875 /// SchedPreferenceInfo - The target scheduling preference: shortest possible
1876 /// total cycles or lowest register usage.
1877 Sched::Preference SchedPreferenceInfo;
1879 /// JumpBufSize - The size, in bytes, of the target's jmp_buf buffers
1880 unsigned JumpBufSize;
1882 /// JumpBufAlignment - The alignment, in bytes, of the target's jmp_buf
1884 unsigned JumpBufAlignment;
1886 /// MinStackArgumentAlignment - The minimum alignment that any argument
1887 /// on the stack needs to have.
1889 unsigned MinStackArgumentAlignment;
1891 /// MinFunctionAlignment - The minimum function alignment (used when
1892 /// optimizing for size, and to prevent explicitly provided alignment
1893 /// from leading to incorrect code).
1895 unsigned MinFunctionAlignment;
1897 /// PrefFunctionAlignment - The preferred function alignment (used when
1898 /// alignment unspecified and optimizing for speed).
1900 unsigned PrefFunctionAlignment;
1902 /// PrefLoopAlignment - The preferred loop alignment.
1904 unsigned PrefLoopAlignment;
1906 /// ShouldFoldAtomicFences - Whether fencing MEMBARRIER instructions should
1907 /// be folded into the enclosed atomic intrinsic instruction by the
1909 bool ShouldFoldAtomicFences;
1911 /// InsertFencesForAtomic - Whether the DAG builder should automatically
1912 /// insert fences and reduce ordering for atomics. (This will be set for
1913 /// for most architectures with weak memory ordering.)
1914 bool InsertFencesForAtomic;
1916 /// StackPointerRegisterToSaveRestore - If set to a physical register, this
1917 /// specifies the register that llvm.savestack/llvm.restorestack should save
1919 unsigned StackPointerRegisterToSaveRestore;
1921 /// ExceptionPointerRegister - If set to a physical register, this specifies
1922 /// the register that receives the exception address on entry to a landing
1924 unsigned ExceptionPointerRegister;
1926 /// ExceptionSelectorRegister - If set to a physical register, this specifies
1927 /// the register that receives the exception typeid on entry to a landing
1929 unsigned ExceptionSelectorRegister;
1931 /// RegClassForVT - This indicates the default register class to use for
1932 /// each ValueType the target supports natively.
1933 const TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE];
1934 unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE];
1935 EVT RegisterTypeForVT[MVT::LAST_VALUETYPE];
1937 /// RepRegClassForVT - This indicates the "representative" register class to
1938 /// use for each ValueType the target supports natively. This information is
1939 /// used by the scheduler to track register pressure. By default, the
1940 /// representative register class is the largest legal super-reg register
1941 /// class of the register class of the specified type. e.g. On x86, i8, i16,
1942 /// and i32's representative class would be GR32.
1943 const TargetRegisterClass *RepRegClassForVT[MVT::LAST_VALUETYPE];
1945 /// RepRegClassCostForVT - This indicates the "cost" of the "representative"
1946 /// register class for each ValueType. The cost is used by the scheduler to
1947 /// approximate register pressure.
1948 uint8_t RepRegClassCostForVT[MVT::LAST_VALUETYPE];
1950 /// TransformToType - For any value types we are promoting or expanding, this
1951 /// contains the value type that we are changing to. For Expanded types, this
1952 /// contains one step of the expand (e.g. i64 -> i32), even if there are
1953 /// multiple steps required (e.g. i64 -> i16). For types natively supported
1954 /// by the system, this holds the same type (e.g. i32 -> i32).
1955 EVT TransformToType[MVT::LAST_VALUETYPE];
1957 /// OpActions - For each operation and each value type, keep a LegalizeAction
1958 /// that indicates how instruction selection should deal with the operation.
1959 /// Most operations are Legal (aka, supported natively by the target), but
1960 /// operations that are not should be described. Note that operations on
1961 /// non-legal value types are not described here.
1962 uint8_t OpActions[MVT::LAST_VALUETYPE][ISD::BUILTIN_OP_END];
1964 /// LoadExtActions - For each load extension type and each value type,
1965 /// keep a LegalizeAction that indicates how instruction selection should deal
1966 /// with a load of a specific value type and extension type.
1967 uint8_t LoadExtActions[MVT::LAST_VALUETYPE][ISD::LAST_LOADEXT_TYPE];
1969 /// TruncStoreActions - For each value type pair keep a LegalizeAction that
1970 /// indicates whether a truncating store of a specific value type and
1971 /// truncating type is legal.
1972 uint8_t TruncStoreActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE];
1974 /// IndexedModeActions - For each indexed mode and each value type,
1975 /// keep a pair of LegalizeAction that indicates how instruction
1976 /// selection should deal with the load / store. The first dimension is the
1977 /// value_type for the reference. The second dimension represents the various
1978 /// modes for load store.
1979 uint8_t IndexedModeActions[MVT::LAST_VALUETYPE][ISD::LAST_INDEXED_MODE];
1981 /// CondCodeActions - For each condition code (ISD::CondCode) keep a
1982 /// LegalizeAction that indicates how instruction selection should
1983 /// deal with the condition code.
1984 /// Because each CC action takes up 2 bits, we need to have the array size
1985 /// be large enough to fit all of the value types. This can be done by
1986 /// dividing the MVT::LAST_VALUETYPE by 32 and adding one.
1987 uint64_t CondCodeActions[ISD::SETCC_INVALID][(MVT::LAST_VALUETYPE / 32) + 1];
1989 ValueTypeActionImpl ValueTypeActions;
1993 getTypeConversion(LLVMContext &Context, EVT VT) const {
1994 // If this is a simple type, use the ComputeRegisterProp mechanism.
1995 if (VT.isSimple()) {
1996 assert((unsigned)VT.getSimpleVT().SimpleTy <
1997 array_lengthof(TransformToType));
1998 EVT NVT = TransformToType[VT.getSimpleVT().SimpleTy];
1999 LegalizeTypeAction LA = ValueTypeActions.getTypeAction(VT.getSimpleVT());
2002 (!(NVT.isSimple() && LA != TypeLegal) ||
2003 ValueTypeActions.getTypeAction(NVT.getSimpleVT()) != TypePromoteInteger)
2004 && "Promote may not follow Expand or Promote");
2006 if (LA == TypeSplitVector)
2007 NVT = EVT::getVectorVT(Context, VT.getVectorElementType(),
2008 VT.getVectorNumElements() / 2);
2009 return LegalizeKind(LA, NVT);
2012 // Handle Extended Scalar Types.
2013 if (!VT.isVector()) {
2014 assert(VT.isInteger() && "Float types must be simple");
2015 unsigned BitSize = VT.getSizeInBits();
2016 // First promote to a power-of-two size, then expand if necessary.
2017 if (BitSize < 8 || !isPowerOf2_32(BitSize)) {
2018 EVT NVT = VT.getRoundIntegerType(Context);
2019 assert(NVT != VT && "Unable to round integer VT");
2020 LegalizeKind NextStep = getTypeConversion(Context, NVT);
2021 // Avoid multi-step promotion.
2022 if (NextStep.first == TypePromoteInteger) return NextStep;
2023 // Return rounded integer type.
2024 return LegalizeKind(TypePromoteInteger, NVT);
2027 return LegalizeKind(TypeExpandInteger,
2028 EVT::getIntegerVT(Context, VT.getSizeInBits()/2));
2031 // Handle vector types.
2032 unsigned NumElts = VT.getVectorNumElements();
2033 EVT EltVT = VT.getVectorElementType();
2035 // Vectors with only one element are always scalarized.
2037 return LegalizeKind(TypeScalarizeVector, EltVT);
2039 // Try to widen vector elements until a legal type is found.
2040 if (EltVT.isInteger()) {
2041 // Vectors with a number of elements that is not a power of two are always
2042 // widened, for example <3 x float> -> <4 x float>.
2043 if (!VT.isPow2VectorType()) {
2044 NumElts = (unsigned)NextPowerOf2(NumElts);
2045 EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts);
2046 return LegalizeKind(TypeWidenVector, NVT);
2049 // Examine the element type.
2050 LegalizeKind LK = getTypeConversion(Context, EltVT);
2052 // If type is to be expanded, split the vector.
2053 // <4 x i140> -> <2 x i140>
2054 if (LK.first == TypeExpandInteger)
2055 return LegalizeKind(TypeSplitVector,
2056 EVT::getVectorVT(Context, EltVT, NumElts / 2));
2058 // Promote the integer element types until a legal vector type is found
2059 // or until the element integer type is too big. If a legal type was not
2060 // found, fallback to the usual mechanism of widening/splitting the
2063 // Increase the bitwidth of the element to the next pow-of-two
2064 // (which is greater than 8 bits).
2065 EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits()
2066 ).getRoundIntegerType(Context);
2068 // Stop trying when getting a non-simple element type.
2069 // Note that vector elements may be greater than legal vector element
2070 // types. Example: X86 XMM registers hold 64bit element on 32bit systems.
2071 if (!EltVT.isSimple()) break;
2073 // Build a new vector type and check if it is legal.
2074 MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
2075 // Found a legal promoted vector type.
2076 if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal)
2077 return LegalizeKind(TypePromoteInteger,
2078 EVT::getVectorVT(Context, EltVT, NumElts));
2082 // Try to widen the vector until a legal type is found.
2083 // If there is no wider legal type, split the vector.
2085 // Round up to the next power of 2.
2086 NumElts = (unsigned)NextPowerOf2(NumElts);
2088 // If there is no simple vector type with this many elements then there
2089 // cannot be a larger legal vector type. Note that this assumes that
2090 // there are no skipped intermediate vector types in the simple types.
2091 if (!EltVT.isSimple()) break;
2092 MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
2093 if (LargerVector == MVT()) break;
2095 // If this type is legal then widen the vector.
2096 if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal)
2097 return LegalizeKind(TypeWidenVector, LargerVector);
2100 // Widen odd vectors to next power of two.
2101 if (!VT.isPow2VectorType()) {
2102 EVT NVT = VT.getPow2VectorType(Context);
2103 return LegalizeKind(TypeWidenVector, NVT);
2106 // Vectors with illegal element types are expanded.
2107 EVT NVT = EVT::getVectorVT(Context, EltVT, VT.getVectorNumElements() / 2);
2108 return LegalizeKind(TypeSplitVector, NVT);
2112 std::vector<std::pair<EVT, const TargetRegisterClass*> > AvailableRegClasses;
2114 /// TargetDAGCombineArray - Targets can specify ISD nodes that they would
2115 /// like PerformDAGCombine callbacks for by calling setTargetDAGCombine(),
2116 /// which sets a bit in this array.
2118 TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT];
2120 /// PromoteToType - For operations that must be promoted to a specific type,
2121 /// this holds the destination type. This map should be sparse, so don't hold
2124 /// Targets add entries to this map with AddPromotedToType(..), clients access
2125 /// this with getTypeToPromoteTo(..).
2126 std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType>
2129 /// LibcallRoutineNames - Stores the name each libcall.
2131 const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL];
2133 /// CmpLibcallCCs - The ISD::CondCode that should be used to test the result
2134 /// of each of the comparison libcall against zero.
2135 ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL];
2137 /// LibcallCallingConvs - Stores the CallingConv that should be used for each
2139 CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL];
2142 /// When lowering \@llvm.memset this field specifies the maximum number of
2143 /// store operations that may be substituted for the call to memset. Targets
2144 /// must set this value based on the cost threshold for that target. Targets
2145 /// should assume that the memset will be done using as many of the largest
2146 /// store operations first, followed by smaller ones, if necessary, per
2147 /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine
2148 /// with 16-bit alignment would result in four 2-byte stores and one 1-byte
2149 /// store. This only applies to setting a constant array of a constant size.
2150 /// @brief Specify maximum number of store instructions per memset call.
2151 unsigned maxStoresPerMemset;
2153 /// Maximum number of stores operations that may be substituted for the call
2154 /// to memset, used for functions with OptSize attribute.
2155 unsigned maxStoresPerMemsetOptSize;
2157 /// When lowering \@llvm.memcpy this field specifies the maximum number of
2158 /// store operations that may be substituted for a call to memcpy. Targets
2159 /// must set this value based on the cost threshold for that target. Targets
2160 /// should assume that the memcpy will be done using as many of the largest
2161 /// store operations first, followed by smaller ones, if necessary, per
2162 /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine
2163 /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store
2164 /// and one 1-byte store. This only applies to copying a constant array of
2166 /// @brief Specify maximum bytes of store instructions per memcpy call.
2167 unsigned maxStoresPerMemcpy;
2169 /// Maximum number of store operations that may be substituted for a call
2170 /// to memcpy, used for functions with OptSize attribute.
2171 unsigned maxStoresPerMemcpyOptSize;
2173 /// When lowering \@llvm.memmove this field specifies the maximum number of
2174 /// store instructions that may be substituted for a call to memmove. Targets
2175 /// must set this value based on the cost threshold for that target. Targets
2176 /// should assume that the memmove will be done using as many of the largest
2177 /// store operations first, followed by smaller ones, if necessary, per
2178 /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine
2179 /// with 8-bit alignment would result in nine 1-byte stores. This only
2180 /// applies to copying a constant array of constant size.
2181 /// @brief Specify maximum bytes of store instructions per memmove call.
2182 unsigned maxStoresPerMemmove;
2184 /// Maximum number of store instructions that may be substituted for a call
2185 /// to memmove, used for functions with OpSize attribute.
2186 unsigned maxStoresPerMemmoveOptSize;
2188 /// This field specifies whether the target can benefit from code placement
2190 bool benefitFromCodePlacementOpt;
2192 /// predictableSelectIsExpensive - Tells the code generator that select is
2193 /// more expensive than a branch if the branch is usually predicted right.
2194 bool predictableSelectIsExpensive;
2197 /// isLegalRC - Return true if the value types that can be represented by the
2198 /// specified register class are all legal.
2199 bool isLegalRC(const TargetRegisterClass *RC) const;
2202 /// GetReturnInfo - Given an LLVM IR type and return type attributes,
2203 /// compute the return value EVTs and flags, and optionally also
2204 /// the offsets, if the return value is being lowered to memory.
2205 void GetReturnInfo(Type* ReturnType, Attributes attr,
2206 SmallVectorImpl<ISD::OutputArg> &Outs,
2207 const TargetLowering &TLI);
2209 } // end llvm namespace