1 //===-- llvm/Target/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes how to lower LLVM code to machine code. This has two
13 // 1. Which ValueTypes are natively supported by the target.
14 // 2. Which operations are supported for supported ValueTypes.
15 // 3. Cost thresholds for alternative implementations of certain operations.
17 // In addition it has a few other components, like information about FP
20 //===----------------------------------------------------------------------===//
22 #ifndef LLVM_TARGET_TARGETLOWERING_H
23 #define LLVM_TARGET_TARGETLOWERING_H
25 #include "llvm/ADT/DenseMap.h"
26 #include "llvm/AddressingMode.h"
27 #include "llvm/Attributes.h"
28 #include "llvm/CallingConv.h"
29 #include "llvm/CodeGen/RuntimeLibcalls.h"
30 #include "llvm/CodeGen/SelectionDAGNodes.h"
31 #include "llvm/InlineAsm.h"
32 #include "llvm/Support/CallSite.h"
33 #include "llvm/Support/DebugLoc.h"
34 #include "llvm/Target/TargetCallingConv.h"
35 #include "llvm/Target/TargetMachine.h"
44 class FunctionLoweringInfo;
45 class ImmutableCallSite;
47 class MachineBasicBlock;
48 class MachineFunction;
50 class MachineJumpTableInfo;
53 template<typename T> class SmallVectorImpl;
55 class TargetRegisterClass;
56 class TargetLibraryInfo;
57 class TargetLoweringObjectFile;
62 None, // No preference
63 Source, // Follow source order.
64 RegPressure, // Scheduling for lowest register pressure.
65 Hybrid, // Scheduling for both latency and register pressure.
66 ILP, // Scheduling for ILP in low register pressure mode.
67 VLIW // Scheduling for VLIW targets.
72 //===----------------------------------------------------------------------===//
73 /// TargetLowering - This class defines information used to lower LLVM code to
74 /// legal SelectionDAG operators that the target instruction selector can accept
77 /// This class also defines callbacks that targets must implement to lower
78 /// target-specific constructs to SelectionDAG operators.
80 class TargetLowering {
81 TargetLowering(const TargetLowering&) LLVM_DELETED_FUNCTION;
82 void operator=(const TargetLowering&) LLVM_DELETED_FUNCTION;
84 /// LegalizeAction - This enum indicates whether operations are valid for a
85 /// target, and if not, what action should be used to make them valid.
87 Legal, // The target natively supports this operation.
88 Promote, // This operation should be executed in a larger type.
89 Expand, // Try to expand this to other ops, otherwise use a libcall.
90 Custom // Use the LowerOperation hook to implement custom lowering.
93 /// LegalizeTypeAction - This enum indicates whether a types are legal for a
94 /// target, and if not, what action should be used to make them valid.
95 enum LegalizeTypeAction {
96 TypeLegal, // The target natively supports this type.
97 TypePromoteInteger, // Replace this integer with a larger one.
98 TypeExpandInteger, // Split this integer into two of half the size.
99 TypeSoftenFloat, // Convert this float to a same size integer type.
100 TypeExpandFloat, // Split this float into two of half the size.
101 TypeScalarizeVector, // Replace this one-element vector with its element.
102 TypeSplitVector, // Split this vector into two of half the size.
103 TypeWidenVector // This vector should be widened into a larger vector.
106 /// LegalizeKind holds the legalization kind that needs to happen to EVT
107 /// in order to type-legalize it.
108 typedef std::pair<LegalizeTypeAction, EVT> LegalizeKind;
110 enum BooleanContent { // How the target represents true/false values.
111 UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage.
112 ZeroOrOneBooleanContent, // All bits zero except for bit 0.
113 ZeroOrNegativeOneBooleanContent // All bits equal to bit 0.
116 enum SelectSupportKind {
117 ScalarValSelect, // The target supports scalar selects (ex: cmov).
118 ScalarCondVectorVal, // The target supports selects with a scalar condition
119 // and vector values (ex: cmov).
120 VectorMaskSelect // The target supports vector selects with a vector
121 // mask (ex: x86 blends).
124 static ISD::NodeType getExtendForContent(BooleanContent Content) {
126 case UndefinedBooleanContent:
127 // Extend by adding rubbish bits.
128 return ISD::ANY_EXTEND;
129 case ZeroOrOneBooleanContent:
130 // Extend by adding zero bits.
131 return ISD::ZERO_EXTEND;
132 case ZeroOrNegativeOneBooleanContent:
133 // Extend by copying the sign bit.
134 return ISD::SIGN_EXTEND;
136 llvm_unreachable("Invalid content kind");
139 /// NOTE: The constructor takes ownership of TLOF.
140 explicit TargetLowering(const TargetMachine &TM,
141 const TargetLoweringObjectFile *TLOF);
142 virtual ~TargetLowering();
144 const TargetMachine &getTargetMachine() const { return TM; }
145 const DataLayout *getDataLayout() const { return TD; }
146 const TargetLoweringObjectFile &getObjFileLowering() const { return TLOF; }
148 bool isBigEndian() const { return !IsLittleEndian; }
149 bool isLittleEndian() const { return IsLittleEndian; }
150 // Return the pointer type for the given address space, defaults to
151 // the pointer type from the data layout.
152 // FIXME: The default needs to be removed once all the code is updated.
153 virtual MVT getPointerTy(uint32_t AS = 0) const { return PointerTy; }
154 virtual MVT getShiftAmountTy(EVT LHSTy) const;
156 /// isSelectExpensive - Return true if the select operation is expensive for
158 bool isSelectExpensive() const { return SelectIsExpensive; }
160 virtual bool isSelectSupported(SelectSupportKind kind) const { return true; }
162 /// shouldSplitVectorElementType - Return true if a vector of the given type
163 /// should be split (TypeSplitVector) instead of promoted
164 /// (TypePromoteInteger) during type legalization.
165 virtual bool shouldSplitVectorElementType(EVT VT) const { return false; }
167 /// isIntDivCheap() - Return true if integer divide is usually cheaper than
168 /// a sequence of several shifts, adds, and multiplies for this target.
169 bool isIntDivCheap() const { return IntDivIsCheap; }
171 /// isSlowDivBypassed - Returns true if target has indicated at least one
172 /// type should be bypassed.
173 bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); }
175 /// getBypassSlowDivTypes - Returns map of slow types for division or
176 /// remainder with corresponding fast types
177 const DenseMap<unsigned int, unsigned int> &getBypassSlowDivWidths() const {
178 return BypassSlowDivWidths;
181 /// isPow2DivCheap() - Return true if pow2 div is cheaper than a chain of
183 bool isPow2DivCheap() const { return Pow2DivIsCheap; }
185 /// isJumpExpensive() - Return true if Flow Control is an expensive operation
186 /// that should be avoided.
187 bool isJumpExpensive() const { return JumpIsExpensive; }
189 /// isPredictableSelectExpensive - Return true if selects are only cheaper
190 /// than branches if the branch is unlikely to be predicted right.
191 bool isPredictableSelectExpensive() const {
192 return predictableSelectIsExpensive;
195 /// getSetCCResultType - Return the ValueType of the result of SETCC
196 /// operations. Also used to obtain the target's preferred type for
197 /// the condition operand of SELECT and BRCOND nodes. In the case of
198 /// BRCOND the argument passed is MVT::Other since there are no other
199 /// operands to get a type hint from.
200 virtual EVT getSetCCResultType(EVT VT) const;
202 /// getCmpLibcallReturnType - Return the ValueType for comparison
203 /// libcalls. Comparions libcalls include floating point comparion calls,
204 /// and Ordered/Unordered check calls on floating point numbers.
206 MVT::SimpleValueType getCmpLibcallReturnType() const;
208 /// getBooleanContents - For targets without i1 registers, this gives the
209 /// nature of the high-bits of boolean values held in types wider than i1.
210 /// "Boolean values" are special true/false values produced by nodes like
211 /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND.
212 /// Not to be confused with general values promoted from i1.
213 /// Some cpus distinguish between vectors of boolean and scalars; the isVec
214 /// parameter selects between the two kinds. For example on X86 a scalar
215 /// boolean should be zero extended from i1, while the elements of a vector
216 /// of booleans should be sign extended from i1.
217 BooleanContent getBooleanContents(bool isVec) const {
218 return isVec ? BooleanVectorContents : BooleanContents;
221 /// getSchedulingPreference - Return target scheduling preference.
222 Sched::Preference getSchedulingPreference() const {
223 return SchedPreferenceInfo;
226 /// getSchedulingPreference - Some scheduler, e.g. hybrid, can switch to
227 /// different scheduling heuristics for different nodes. This function returns
228 /// the preference (or none) for the given node.
229 virtual Sched::Preference getSchedulingPreference(SDNode *) const {
233 /// getRegClassFor - Return the register class that should be used for the
234 /// specified value type.
235 virtual const TargetRegisterClass *getRegClassFor(MVT VT) const {
236 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
237 assert(RC && "This value type is not natively supported!");
241 /// getRepRegClassFor - Return the 'representative' register class for the
242 /// specified value type. The 'representative' register class is the largest
243 /// legal super-reg register class for the register class of the value type.
244 /// For example, on i386 the rep register class for i8, i16, and i32 are GR32;
245 /// while the rep register class is GR64 on x86_64.
246 virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const {
247 const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy];
251 /// getRepRegClassCostFor - Return the cost of the 'representative' register
252 /// class for the specified value type.
253 virtual uint8_t getRepRegClassCostFor(EVT VT) const {
254 assert(VT.isSimple() && "getRepRegClassCostFor called on illegal type!");
255 return RepRegClassCostForVT[VT.getSimpleVT().SimpleTy];
258 /// isTypeLegal - Return true if the target has native support for the
259 /// specified value type. This means that it has a register that directly
260 /// holds it without promotions or expansions.
261 bool isTypeLegal(EVT VT) const {
262 assert(!VT.isSimple() ||
263 (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT));
264 return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != 0;
267 class ValueTypeActionImpl {
268 /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum
269 /// that indicates how instruction selection should deal with the type.
270 uint8_t ValueTypeActions[MVT::LAST_VALUETYPE];
273 ValueTypeActionImpl() {
274 std::fill(ValueTypeActions, array_endof(ValueTypeActions), 0);
277 LegalizeTypeAction getTypeAction(MVT VT) const {
278 return (LegalizeTypeAction)ValueTypeActions[VT.SimpleTy];
281 void setTypeAction(MVT VT, LegalizeTypeAction Action) {
282 unsigned I = VT.SimpleTy;
283 ValueTypeActions[I] = Action;
287 const ValueTypeActionImpl &getValueTypeActions() const {
288 return ValueTypeActions;
291 /// getTypeAction - Return how we should legalize values of this type, either
292 /// it is already legal (return 'Legal') or we need to promote it to a larger
293 /// type (return 'Promote'), or we need to expand it into multiple registers
294 /// of smaller integer type (return 'Expand'). 'Custom' is not an option.
295 LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const {
296 return getTypeConversion(Context, VT).first;
298 LegalizeTypeAction getTypeAction(MVT VT) const {
299 return ValueTypeActions.getTypeAction(VT);
302 /// getTypeToTransformTo - For types supported by the target, this is an
303 /// identity function. For types that must be promoted to larger types, this
304 /// returns the larger type to promote to. For integer types that are larger
305 /// than the largest integer register, this contains one step in the expansion
306 /// to get to the smaller register. For illegal floating point types, this
307 /// returns the integer type to transform to.
308 EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const {
309 return getTypeConversion(Context, VT).second;
312 /// getTypeToExpandTo - For types supported by the target, this is an
313 /// identity function. For types that must be expanded (i.e. integer types
314 /// that are larger than the largest integer register or illegal floating
315 /// point types), this returns the largest legal type it will be expanded to.
316 EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const {
317 assert(!VT.isVector());
319 switch (getTypeAction(Context, VT)) {
322 case TypeExpandInteger:
323 VT = getTypeToTransformTo(Context, VT);
326 llvm_unreachable("Type is not legal nor is it to be expanded!");
331 /// getVectorTypeBreakdown - Vector types are broken down into some number of
332 /// legal first class types. For example, EVT::v8f32 maps to 2 EVT::v4f32
333 /// with Altivec or SSE1, or 8 promoted EVT::f64 values with the X86 FP stack.
334 /// Similarly, EVT::v2i64 turns into 4 EVT::i32 values with both PPC and X86.
336 /// This method returns the number of registers needed, and the VT for each
337 /// register. It also returns the VT and quantity of the intermediate values
338 /// before they are promoted/expanded.
340 unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
342 unsigned &NumIntermediates,
343 EVT &RegisterVT) const;
345 /// getTgtMemIntrinsic: Given an intrinsic, checks if on the target the
346 /// intrinsic will need to map to a MemIntrinsicNode (touches memory). If
347 /// this is the case, it returns true and store the intrinsic
348 /// information into the IntrinsicInfo that was passed to the function.
349 struct IntrinsicInfo {
350 unsigned opc; // target opcode
351 EVT memVT; // memory VT
352 const Value* ptrVal; // value representing memory location
353 int offset; // offset off of ptrVal
354 unsigned align; // alignment
355 bool vol; // is volatile?
356 bool readMem; // reads memory?
357 bool writeMem; // writes memory?
360 virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &,
361 unsigned /*Intrinsic*/) const {
365 /// isFPImmLegal - Returns true if the target can instruction select the
366 /// specified FP immediate natively. If false, the legalizer will materialize
367 /// the FP immediate as a load from a constant pool.
368 virtual bool isFPImmLegal(const APFloat &/*Imm*/, EVT /*VT*/) const {
372 /// isIntImmLegal - Returns true if the target can instruction select the
373 /// specified integer immediate natively (that is, it's materialized with one
374 /// instruction). The current *assumption* in isel is all of integer
375 /// immediates are "legal" and only the memcpy / memset expansion code is
376 /// making use of this. The rest of isel doesn't have proper cost model for
377 /// immediate materialization.
378 virtual bool isIntImmLegal(const APInt &/*Imm*/, EVT /*VT*/) const {
382 /// isShuffleMaskLegal - Targets can use this to indicate that they only
383 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
384 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
385 /// are assumed to be legal.
386 virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &/*Mask*/,
391 /// canOpTrap - Returns true if the operation can trap for the value type.
392 /// VT must be a legal type. By default, we optimistically assume most
393 /// operations don't trap except for divide and remainder.
394 virtual bool canOpTrap(unsigned Op, EVT VT) const;
396 /// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is
397 /// used by Targets can use this to indicate if there is a suitable
398 /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant
400 virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &/*Mask*/,
405 /// getOperationAction - Return how this operation should be treated: either
406 /// it is legal, needs to be promoted to a larger size, needs to be
407 /// expanded to some other code sequence, or the target has a custom expander
409 LegalizeAction getOperationAction(unsigned Op, EVT VT) const {
410 if (VT.isExtended()) return Expand;
411 // If a target-specific SDNode requires legalization, require the target
412 // to provide custom legalization for it.
413 if (Op > array_lengthof(OpActions[0])) return Custom;
414 unsigned I = (unsigned) VT.getSimpleVT().SimpleTy;
415 return (LegalizeAction)OpActions[I][Op];
418 /// isOperationLegalOrCustom - Return true if the specified operation is
419 /// legal on this target or can be made legal with custom lowering. This
420 /// is used to help guide high-level lowering decisions.
421 bool isOperationLegalOrCustom(unsigned Op, EVT VT) const {
422 return (VT == MVT::Other || isTypeLegal(VT)) &&
423 (getOperationAction(Op, VT) == Legal ||
424 getOperationAction(Op, VT) == Custom);
427 /// isOperationExpand - Return true if the specified operation is illegal on
428 /// this target or unlikely to be made legal with custom lowering. This is
429 /// used to help guide high-level lowering decisions.
430 bool isOperationExpand(unsigned Op, EVT VT) const {
431 return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand);
434 /// isOperationLegal - Return true if the specified operation is legal on this
436 bool isOperationLegal(unsigned Op, EVT VT) const {
437 return (VT == MVT::Other || isTypeLegal(VT)) &&
438 getOperationAction(Op, VT) == Legal;
441 /// getLoadExtAction - Return how this load with extension should be treated:
442 /// either it is legal, needs to be promoted to a larger size, needs to be
443 /// expanded to some other code sequence, or the target has a custom expander
445 LegalizeAction getLoadExtAction(unsigned ExtType, MVT VT) const {
446 assert(ExtType < ISD::LAST_LOADEXT_TYPE && VT < MVT::LAST_VALUETYPE &&
447 "Table isn't big enough!");
448 return (LegalizeAction)LoadExtActions[VT.SimpleTy][ExtType];
451 /// isLoadExtLegal - Return true if the specified load with extension is legal
453 bool isLoadExtLegal(unsigned ExtType, EVT VT) const {
454 return VT.isSimple() &&
455 getLoadExtAction(ExtType, VT.getSimpleVT()) == Legal;
458 /// getTruncStoreAction - Return how this store with truncation should be
459 /// treated: either it is legal, needs to be promoted to a larger size, needs
460 /// to be expanded to some other code sequence, or the target has a custom
462 LegalizeAction getTruncStoreAction(MVT ValVT, MVT MemVT) const {
463 assert(ValVT < MVT::LAST_VALUETYPE && MemVT < MVT::LAST_VALUETYPE &&
464 "Table isn't big enough!");
465 return (LegalizeAction)TruncStoreActions[ValVT.SimpleTy]
469 /// isTruncStoreLegal - Return true if the specified store with truncation is
470 /// legal on this target.
471 bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const {
472 return isTypeLegal(ValVT) && MemVT.isSimple() &&
473 getTruncStoreAction(ValVT.getSimpleVT(), MemVT.getSimpleVT()) == Legal;
476 /// getIndexedLoadAction - Return how the indexed load should be treated:
477 /// either it is legal, needs to be promoted to a larger size, needs to be
478 /// expanded to some other code sequence, or the target has a custom expander
481 getIndexedLoadAction(unsigned IdxMode, EVT VT) const {
482 assert(IdxMode < ISD::LAST_INDEXED_MODE &&
483 VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
484 "Table isn't big enough!");
485 unsigned Ty = (unsigned)VT.getSimpleVT().SimpleTy;
486 return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] & 0xf0) >> 4);
489 /// isIndexedLoadLegal - Return true if the specified indexed load is legal
491 bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const {
492 return VT.isSimple() &&
493 (getIndexedLoadAction(IdxMode, VT) == Legal ||
494 getIndexedLoadAction(IdxMode, VT) == Custom);
497 /// getIndexedStoreAction - Return how the indexed store should be treated:
498 /// either it is legal, needs to be promoted to a larger size, needs to be
499 /// expanded to some other code sequence, or the target has a custom expander
502 getIndexedStoreAction(unsigned IdxMode, EVT VT) const {
503 assert(IdxMode < ISD::LAST_INDEXED_MODE &&
504 VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
505 "Table isn't big enough!");
506 unsigned Ty = (unsigned)VT.getSimpleVT().SimpleTy;
507 return (LegalizeAction)(IndexedModeActions[Ty][IdxMode] & 0x0f);
510 /// isIndexedStoreLegal - Return true if the specified indexed load is legal
512 bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const {
513 return VT.isSimple() &&
514 (getIndexedStoreAction(IdxMode, VT) == Legal ||
515 getIndexedStoreAction(IdxMode, VT) == Custom);
518 /// getCondCodeAction - Return how the condition code should be treated:
519 /// either it is legal, needs to be expanded to some other code sequence,
520 /// or the target has a custom expander for it.
522 getCondCodeAction(ISD::CondCode CC, MVT VT) const {
523 assert((unsigned)CC < array_lengthof(CondCodeActions) &&
524 (unsigned)VT.SimpleTy < sizeof(CondCodeActions[0])*4 &&
525 "Table isn't big enough!");
526 /// The lower 5 bits of the SimpleTy index into Nth 2bit set from the 64bit
527 /// value and the upper 27 bits index into the second dimension of the
528 /// array to select what 64bit value to use.
529 LegalizeAction Action = (LegalizeAction)
530 ((CondCodeActions[CC][VT.SimpleTy >> 5] >> (2*(VT.SimpleTy & 0x1F))) & 3);
531 assert(Action != Promote && "Can't promote condition code!");
535 /// isCondCodeLegal - Return true if the specified condition code is legal
537 bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const {
539 getCondCodeAction(CC, VT) == Legal ||
540 getCondCodeAction(CC, VT) == Custom;
544 /// getTypeToPromoteTo - If the action for this operation is to promote, this
545 /// method returns the ValueType to promote to.
546 MVT getTypeToPromoteTo(unsigned Op, MVT VT) const {
547 assert(getOperationAction(Op, VT) == Promote &&
548 "This operation isn't promoted!");
550 // See if this has an explicit type specified.
551 std::map<std::pair<unsigned, MVT::SimpleValueType>,
552 MVT::SimpleValueType>::const_iterator PTTI =
553 PromoteToType.find(std::make_pair(Op, VT.SimpleTy));
554 if (PTTI != PromoteToType.end()) return PTTI->second;
556 assert((VT.isInteger() || VT.isFloatingPoint()) &&
557 "Cannot autopromote this type, add it with AddPromotedToType.");
561 NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1);
562 assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid &&
563 "Didn't find type to promote to!");
564 } while (!isTypeLegal(NVT) ||
565 getOperationAction(Op, NVT) == Promote);
569 /// getValueType - Return the EVT corresponding to this LLVM type.
570 /// This is fixed by the LLVM operations except for the pointer size. If
571 /// AllowUnknown is true, this will return MVT::Other for types with no EVT
572 /// counterpart (e.g. structs), otherwise it will assert.
573 EVT getValueType(Type *Ty, bool AllowUnknown = false) const {
574 // Lower scalar pointers to native pointer types.
575 if (Ty->isPointerTy()) return PointerTy;
577 if (Ty->isVectorTy()) {
578 VectorType *VTy = cast<VectorType>(Ty);
579 Type *Elm = VTy->getElementType();
580 // Lower vectors of pointers to native pointer types.
581 if (Elm->isPointerTy())
582 Elm = EVT(PointerTy).getTypeForEVT(Ty->getContext());
583 return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(Elm, false),
584 VTy->getNumElements());
586 return EVT::getEVT(Ty, AllowUnknown);
589 /// Return the MVT corresponding to this LLVM type. See getValueType.
590 MVT getSimpleValueType(Type *Ty, bool AllowUnknown = false) const {
591 return getValueType(Ty, AllowUnknown).getSimpleVT();
594 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
595 /// function arguments in the caller parameter area. This is the actual
596 /// alignment, not its logarithm.
597 virtual unsigned getByValTypeAlignment(Type *Ty) const;
599 /// getRegisterType - Return the type of registers that this ValueType will
600 /// eventually require.
601 EVT getRegisterType(MVT VT) const {
602 assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT));
603 return RegisterTypeForVT[VT.SimpleTy];
606 /// getRegisterType - Return the type of registers that this ValueType will
607 /// eventually require.
608 EVT getRegisterType(LLVMContext &Context, EVT VT) const {
610 assert((unsigned)VT.getSimpleVT().SimpleTy <
611 array_lengthof(RegisterTypeForVT));
612 return RegisterTypeForVT[VT.getSimpleVT().SimpleTy];
616 unsigned NumIntermediates;
617 (void)getVectorTypeBreakdown(Context, VT, VT1,
618 NumIntermediates, RegisterVT);
621 if (VT.isInteger()) {
622 return getRegisterType(Context, getTypeToTransformTo(Context, VT));
624 llvm_unreachable("Unsupported extended type!");
627 /// getNumRegisters - Return the number of registers that this ValueType will
628 /// eventually require. This is one for any types promoted to live in larger
629 /// registers, but may be more than one for types (like i64) that are split
630 /// into pieces. For types like i140, which are first promoted then expanded,
631 /// it is the number of registers needed to hold all the bits of the original
632 /// type. For an i140 on a 32 bit machine this means 5 registers.
633 unsigned getNumRegisters(LLVMContext &Context, EVT VT) const {
635 assert((unsigned)VT.getSimpleVT().SimpleTy <
636 array_lengthof(NumRegistersForVT));
637 return NumRegistersForVT[VT.getSimpleVT().SimpleTy];
641 unsigned NumIntermediates;
642 return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2);
644 if (VT.isInteger()) {
645 unsigned BitWidth = VT.getSizeInBits();
646 unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits();
647 return (BitWidth + RegWidth - 1) / RegWidth;
649 llvm_unreachable("Unsupported extended type!");
652 /// ShouldShrinkFPConstant - If true, then instruction selection should
653 /// seek to shrink the FP constant of the specified type to a smaller type
654 /// in order to save space and / or reduce runtime.
655 virtual bool ShouldShrinkFPConstant(EVT) const { return true; }
657 /// hasTargetDAGCombine - If true, the target has custom DAG combine
658 /// transformations that it can perform for the specified node.
659 bool hasTargetDAGCombine(ISD::NodeType NT) const {
660 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
661 return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7));
664 /// This function returns the maximum number of store operations permitted
665 /// to replace a call to llvm.memset. The value is set by the target at the
666 /// performance threshold for such a replacement. If OptSize is true,
667 /// return the limit for functions that have OptSize attribute.
668 /// @brief Get maximum # of store operations permitted for llvm.memset
669 unsigned getMaxStoresPerMemset(bool OptSize) const {
670 return OptSize ? maxStoresPerMemsetOptSize : maxStoresPerMemset;
673 /// This function returns the maximum number of store operations permitted
674 /// to replace a call to llvm.memcpy. The value is set by the target at the
675 /// performance threshold for such a replacement. If OptSize is true,
676 /// return the limit for functions that have OptSize attribute.
677 /// @brief Get maximum # of store operations permitted for llvm.memcpy
678 unsigned getMaxStoresPerMemcpy(bool OptSize) const {
679 return OptSize ? maxStoresPerMemcpyOptSize : maxStoresPerMemcpy;
682 /// This function returns the maximum number of store operations permitted
683 /// to replace a call to llvm.memmove. The value is set by the target at the
684 /// performance threshold for such a replacement. If OptSize is true,
685 /// return the limit for functions that have OptSize attribute.
686 /// @brief Get maximum # of store operations permitted for llvm.memmove
687 unsigned getMaxStoresPerMemmove(bool OptSize) const {
688 return OptSize ? maxStoresPerMemmoveOptSize : maxStoresPerMemmove;
691 /// This function returns true if the target allows unaligned memory accesses.
692 /// of the specified type. If true, it also returns whether the unaligned
693 /// memory access is "fast" in the second argument by reference. This is used,
694 /// for example, in situations where an array copy/move/set is converted to a
695 /// sequence of store operations. It's use helps to ensure that such
696 /// replacements don't generate code that causes an alignment error (trap) on
697 /// the target machine.
698 /// @brief Determine if the target supports unaligned memory accesses.
699 virtual bool allowsUnalignedMemoryAccesses(EVT, bool *Fast = 0) const {
703 /// This function returns true if the target would benefit from code placement
705 /// @brief Determine if the target should perform code placement optimization.
706 bool shouldOptimizeCodePlacement() const {
707 return benefitFromCodePlacementOpt;
710 /// getOptimalMemOpType - Returns the target specific optimal type for load
711 /// and store operations as a result of memset, memcpy, and memmove
712 /// lowering. If DstAlign is zero that means it's safe to destination
713 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
714 /// means there isn't a need to check it against alignment requirement,
715 /// probably because the source does not need to be loaded. If
716 /// 'IsZeroVal' is true, that means it's safe to return a
717 /// non-scalar-integer type, e.g. empty string source, constant, or loaded
718 /// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is
719 /// constant so it does not need to be loaded.
720 /// It returns EVT::Other if the type should be determined using generic
721 /// target-independent logic.
722 virtual EVT getOptimalMemOpType(uint64_t /*Size*/,
723 unsigned /*DstAlign*/, unsigned /*SrcAlign*/,
725 bool /*MemcpyStrSrc*/,
726 MachineFunction &/*MF*/) const {
730 /// usesUnderscoreSetJmp - Determine if we should use _setjmp or setjmp
731 /// to implement llvm.setjmp.
732 bool usesUnderscoreSetJmp() const {
733 return UseUnderscoreSetJmp;
736 /// usesUnderscoreLongJmp - Determine if we should use _longjmp or longjmp
737 /// to implement llvm.longjmp.
738 bool usesUnderscoreLongJmp() const {
739 return UseUnderscoreLongJmp;
742 /// supportJumpTables - return whether the target can generate code for
744 bool supportJumpTables() const {
745 return SupportJumpTables;
748 /// getMinimumJumpTableEntries - return integer threshold on number of
749 /// blocks to use jump tables rather than if sequence.
750 int getMinimumJumpTableEntries() const {
751 return MinimumJumpTableEntries;
754 /// getStackPointerRegisterToSaveRestore - If a physical register, this
755 /// specifies the register that llvm.savestack/llvm.restorestack should save
757 unsigned getStackPointerRegisterToSaveRestore() const {
758 return StackPointerRegisterToSaveRestore;
761 /// getExceptionPointerRegister - If a physical register, this returns
762 /// the register that receives the exception address on entry to a landing
764 unsigned getExceptionPointerRegister() const {
765 return ExceptionPointerRegister;
768 /// getExceptionSelectorRegister - If a physical register, this returns
769 /// the register that receives the exception typeid on entry to a landing
771 unsigned getExceptionSelectorRegister() const {
772 return ExceptionSelectorRegister;
775 /// getJumpBufSize - returns the target's jmp_buf size in bytes (if never
776 /// set, the default is 200)
777 unsigned getJumpBufSize() const {
781 /// getJumpBufAlignment - returns the target's jmp_buf alignment in bytes
782 /// (if never set, the default is 0)
783 unsigned getJumpBufAlignment() const {
784 return JumpBufAlignment;
787 /// getMinStackArgumentAlignment - return the minimum stack alignment of an
789 unsigned getMinStackArgumentAlignment() const {
790 return MinStackArgumentAlignment;
793 /// getMinFunctionAlignment - return the minimum function alignment.
795 unsigned getMinFunctionAlignment() const {
796 return MinFunctionAlignment;
799 /// getPrefFunctionAlignment - return the preferred function alignment.
801 unsigned getPrefFunctionAlignment() const {
802 return PrefFunctionAlignment;
805 /// getPrefLoopAlignment - return the preferred loop alignment.
807 unsigned getPrefLoopAlignment() const {
808 return PrefLoopAlignment;
811 /// getShouldFoldAtomicFences - return whether the combiner should fold
812 /// fence MEMBARRIER instructions into the atomic intrinsic instructions.
814 bool getShouldFoldAtomicFences() const {
815 return ShouldFoldAtomicFences;
818 /// getInsertFencesFor - return whether the DAG builder should automatically
819 /// insert fences and reduce ordering for atomics.
821 bool getInsertFencesForAtomic() const {
822 return InsertFencesForAtomic;
825 /// getPreIndexedAddressParts - returns true by value, base pointer and
826 /// offset pointer and addressing mode by reference if the node's address
827 /// can be legally represented as pre-indexed load / store address.
828 virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/,
830 ISD::MemIndexedMode &/*AM*/,
831 SelectionDAG &/*DAG*/) const {
835 /// getPostIndexedAddressParts - returns true by value, base pointer and
836 /// offset pointer and addressing mode by reference if this node can be
837 /// combined with a load / store to form a post-indexed load / store.
838 virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/,
839 SDValue &/*Base*/, SDValue &/*Offset*/,
840 ISD::MemIndexedMode &/*AM*/,
841 SelectionDAG &/*DAG*/) const {
845 /// getJumpTableEncoding - Return the entry encoding for a jump table in the
846 /// current function. The returned value is a member of the
847 /// MachineJumpTableInfo::JTEntryKind enum.
848 virtual unsigned getJumpTableEncoding() const;
850 virtual const MCExpr *
851 LowerCustomJumpTableEntry(const MachineJumpTableInfo * /*MJTI*/,
852 const MachineBasicBlock * /*MBB*/, unsigned /*uid*/,
853 MCContext &/*Ctx*/) const {
854 llvm_unreachable("Need to implement this hook if target has custom JTIs");
857 /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
859 virtual SDValue getPICJumpTableRelocBase(SDValue Table,
860 SelectionDAG &DAG) const;
862 /// getPICJumpTableRelocBaseExpr - This returns the relocation base for the
863 /// given PIC jumptable, the same as getPICJumpTableRelocBase, but as an
865 virtual const MCExpr *
866 getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
867 unsigned JTI, MCContext &Ctx) const;
869 /// isOffsetFoldingLegal - Return true if folding a constant offset
870 /// with the given GlobalAddress is legal. It is frequently not legal in
871 /// PIC relocation models.
872 virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
874 /// getStackCookieLocation - Return true if the target stores stack
875 /// protector cookies at a fixed offset in some non-standard address
876 /// space, and populates the address space and offset as
878 virtual bool getStackCookieLocation(unsigned &/*AddressSpace*/,
879 unsigned &/*Offset*/) const {
883 /// getMaximalGlobalOffset - Returns the maximal possible offset which can be
884 /// used for loads / stores from the global.
885 virtual unsigned getMaximalGlobalOffset() const {
889 //===--------------------------------------------------------------------===//
890 // TargetLowering Optimization Methods
893 /// TargetLoweringOpt - A convenience struct that encapsulates a DAG, and two
894 /// SDValues for returning information from TargetLowering to its clients
895 /// that want to combine
896 struct TargetLoweringOpt {
903 explicit TargetLoweringOpt(SelectionDAG &InDAG,
905 DAG(InDAG), LegalTys(LT), LegalOps(LO) {}
907 bool LegalTypes() const { return LegalTys; }
908 bool LegalOperations() const { return LegalOps; }
910 bool CombineTo(SDValue O, SDValue N) {
916 /// ShrinkDemandedConstant - Check to see if the specified operand of the
917 /// specified instruction is a constant integer. If so, check to see if
918 /// there are any bits set in the constant that are not demanded. If so,
919 /// shrink the constant and return true.
920 bool ShrinkDemandedConstant(SDValue Op, const APInt &Demanded);
922 /// ShrinkDemandedOp - Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the
923 /// casts are free. This uses isZExtFree and ZERO_EXTEND for the widening
924 /// cast, but it could be generalized for targets with other types of
925 /// implicit widening casts.
926 bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded,
930 /// SimplifyDemandedBits - Look at Op. At this point, we know that only the
931 /// DemandedMask bits of the result of Op are ever used downstream. If we can
932 /// use this information to simplify Op, create a new simplified DAG node and
933 /// return true, returning the original and new nodes in Old and New.
934 /// Otherwise, analyze the expression and return a mask of KnownOne and
935 /// KnownZero bits for the expression (used to simplify the caller).
936 /// The KnownZero/One bits may only be accurate for those bits in the
938 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
939 APInt &KnownZero, APInt &KnownOne,
940 TargetLoweringOpt &TLO, unsigned Depth = 0) const;
942 /// computeMaskedBitsForTargetNode - Determine which of the bits specified in
943 /// Mask are known to be either zero or one and return them in the
944 /// KnownZero/KnownOne bitsets.
945 virtual void computeMaskedBitsForTargetNode(const SDValue Op,
948 const SelectionDAG &DAG,
949 unsigned Depth = 0) const;
951 /// ComputeNumSignBitsForTargetNode - This method can be implemented by
952 /// targets that want to expose additional information about sign bits to the
954 virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
955 unsigned Depth = 0) const;
957 struct DAGCombinerInfo {
958 void *DC; // The DAG Combiner object.
960 bool BeforeLegalizeOps;
961 bool CalledByLegalizer;
965 DAGCombinerInfo(SelectionDAG &dag, bool bl, bool blo, bool cl, void *dc)
966 : DC(dc), BeforeLegalize(bl), BeforeLegalizeOps(blo),
967 CalledByLegalizer(cl), DAG(dag) {}
969 bool isBeforeLegalize() const { return BeforeLegalize; }
970 bool isBeforeLegalizeOps() const { return BeforeLegalizeOps; }
971 bool isCalledByLegalizer() const { return CalledByLegalizer; }
973 void AddToWorklist(SDNode *N);
974 void RemoveFromWorklist(SDNode *N);
975 SDValue CombineTo(SDNode *N, const std::vector<SDValue> &To,
977 SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true);
978 SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true);
980 void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO);
983 /// SimplifySetCC - Try to simplify a setcc built with the specified operands
984 /// and cc. If it is unable to simplify it, return a null SDValue.
985 SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
986 ISD::CondCode Cond, bool foldBooleans,
987 DAGCombinerInfo &DCI, DebugLoc dl) const;
989 /// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
990 /// node is a GlobalAddress + offset.
992 isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
994 /// PerformDAGCombine - This method will be invoked for all target nodes and
995 /// for any target-independent nodes that the target has registered with
998 /// The semantics are as follows:
1000 /// SDValue.Val == 0 - No change was made
1001 /// SDValue.Val == N - N was replaced, is dead, and is already handled.
1002 /// otherwise - N should be replaced by the returned Operand.
1004 /// In addition, methods provided by DAGCombinerInfo may be used to perform
1005 /// more complex transformations.
1007 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
1009 /// isTypeDesirableForOp - Return true if the target has native support for
1010 /// the specified value type and it is 'desirable' to use the type for the
1011 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
1012 /// instruction encodings are longer and some i16 instructions are slow.
1013 virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const {
1014 // By default, assume all legal types are desirable.
1015 return isTypeLegal(VT);
1018 /// isDesirableToPromoteOp - Return true if it is profitable for dag combiner
1019 /// to transform a floating point op of specified opcode to a equivalent op of
1020 /// an integer type. e.g. f32 load -> i32 load can be profitable on ARM.
1021 virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/,
1026 /// IsDesirableToPromoteOp - This method query the target whether it is
1027 /// beneficial for dag combiner to promote the specified node. If true, it
1028 /// should return the desired promotion type by reference.
1029 virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const {
1033 //===--------------------------------------------------------------------===//
1034 // TargetLowering Configuration Methods - These methods should be invoked by
1035 // the derived class constructor to configure this object for the target.
1039 /// setBooleanContents - Specify how the target extends the result of a
1040 /// boolean value from i1 to a wider type. See getBooleanContents.
1041 void setBooleanContents(BooleanContent Ty) { BooleanContents = Ty; }
1042 /// setBooleanVectorContents - Specify how the target extends the result
1043 /// of a vector boolean value from a vector of i1 to a wider type. See
1044 /// getBooleanContents.
1045 void setBooleanVectorContents(BooleanContent Ty) {
1046 BooleanVectorContents = Ty;
1049 /// setSchedulingPreference - Specify the target scheduling preference.
1050 void setSchedulingPreference(Sched::Preference Pref) {
1051 SchedPreferenceInfo = Pref;
1054 /// setUseUnderscoreSetJmp - Indicate whether this target prefers to
1055 /// use _setjmp to implement llvm.setjmp or the non _ version.
1056 /// Defaults to false.
1057 void setUseUnderscoreSetJmp(bool Val) {
1058 UseUnderscoreSetJmp = Val;
1061 /// setUseUnderscoreLongJmp - Indicate whether this target prefers to
1062 /// use _longjmp to implement llvm.longjmp or the non _ version.
1063 /// Defaults to false.
1064 void setUseUnderscoreLongJmp(bool Val) {
1065 UseUnderscoreLongJmp = Val;
1068 /// setSupportJumpTables - Indicate whether the target can generate code for
1070 void setSupportJumpTables(bool Val) {
1071 SupportJumpTables = Val;
1074 /// setMinimumJumpTableEntries - Indicate the number of blocks to generate
1075 /// jump tables rather than if sequence.
1076 void setMinimumJumpTableEntries(int Val) {
1077 MinimumJumpTableEntries = Val;
1080 /// setStackPointerRegisterToSaveRestore - If set to a physical register, this
1081 /// specifies the register that llvm.savestack/llvm.restorestack should save
1083 void setStackPointerRegisterToSaveRestore(unsigned R) {
1084 StackPointerRegisterToSaveRestore = R;
1087 /// setExceptionPointerRegister - If set to a physical register, this sets
1088 /// the register that receives the exception address on entry to a landing
1090 void setExceptionPointerRegister(unsigned R) {
1091 ExceptionPointerRegister = R;
1094 /// setExceptionSelectorRegister - If set to a physical register, this sets
1095 /// the register that receives the exception typeid on entry to a landing
1097 void setExceptionSelectorRegister(unsigned R) {
1098 ExceptionSelectorRegister = R;
1101 /// SelectIsExpensive - Tells the code generator not to expand operations
1102 /// into sequences that use the select operations if possible.
1103 void setSelectIsExpensive(bool isExpensive = true) {
1104 SelectIsExpensive = isExpensive;
1107 /// JumpIsExpensive - Tells the code generator not to expand sequence of
1108 /// operations into a separate sequences that increases the amount of
1110 void setJumpIsExpensive(bool isExpensive = true) {
1111 JumpIsExpensive = isExpensive;
1114 /// setIntDivIsCheap - Tells the code generator that integer divide is
1115 /// expensive, and if possible, should be replaced by an alternate sequence
1116 /// of instructions not containing an integer divide.
1117 void setIntDivIsCheap(bool isCheap = true) { IntDivIsCheap = isCheap; }
1119 /// addBypassSlowDiv - Tells the code generator which bitwidths to bypass.
1120 void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) {
1121 BypassSlowDivWidths[SlowBitWidth] = FastBitWidth;
1124 /// setPow2DivIsCheap - Tells the code generator that it shouldn't generate
1125 /// srl/add/sra for a signed divide by power of two, and let the target handle
1127 void setPow2DivIsCheap(bool isCheap = true) { Pow2DivIsCheap = isCheap; }
1129 /// addRegisterClass - Add the specified register class as an available
1130 /// regclass for the specified value type. This indicates the selector can
1131 /// handle values of that class natively.
1132 void addRegisterClass(EVT VT, const TargetRegisterClass *RC) {
1133 assert((unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT));
1134 AvailableRegClasses.push_back(std::make_pair(VT, RC));
1135 RegClassForVT[VT.getSimpleVT().SimpleTy] = RC;
1138 /// findRepresentativeClass - Return the largest legal super-reg register class
1139 /// of the register class for the specified type and its associated "cost".
1140 virtual std::pair<const TargetRegisterClass*, uint8_t>
1141 findRepresentativeClass(EVT VT) const;
1143 /// computeRegisterProperties - Once all of the register classes are added,
1144 /// this allows us to compute derived properties we expose.
1145 void computeRegisterProperties();
1147 /// setOperationAction - Indicate that the specified operation does not work
1148 /// with the specified type and indicate what to do about it.
1149 void setOperationAction(unsigned Op, MVT VT,
1150 LegalizeAction Action) {
1151 assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!");
1152 OpActions[(unsigned)VT.SimpleTy][Op] = (uint8_t)Action;
1155 /// setLoadExtAction - Indicate that the specified load with extension does
1156 /// not work with the specified type and indicate what to do about it.
1157 void setLoadExtAction(unsigned ExtType, MVT VT,
1158 LegalizeAction Action) {
1159 assert(ExtType < ISD::LAST_LOADEXT_TYPE && VT < MVT::LAST_VALUETYPE &&
1160 "Table isn't big enough!");
1161 LoadExtActions[VT.SimpleTy][ExtType] = (uint8_t)Action;
1164 /// setTruncStoreAction - Indicate that the specified truncating store does
1165 /// not work with the specified type and indicate what to do about it.
1166 void setTruncStoreAction(MVT ValVT, MVT MemVT,
1167 LegalizeAction Action) {
1168 assert(ValVT < MVT::LAST_VALUETYPE && MemVT < MVT::LAST_VALUETYPE &&
1169 "Table isn't big enough!");
1170 TruncStoreActions[ValVT.SimpleTy][MemVT.SimpleTy] = (uint8_t)Action;
1173 /// setIndexedLoadAction - Indicate that the specified indexed load does or
1174 /// does not work with the specified type and indicate what to do abort
1175 /// it. NOTE: All indexed mode loads are initialized to Expand in
1176 /// TargetLowering.cpp
1177 void setIndexedLoadAction(unsigned IdxMode, MVT VT,
1178 LegalizeAction Action) {
1179 assert(VT < MVT::LAST_VALUETYPE && IdxMode < ISD::LAST_INDEXED_MODE &&
1180 (unsigned)Action < 0xf && "Table isn't big enough!");
1181 // Load action are kept in the upper half.
1182 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0xf0;
1183 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action) <<4;
1186 /// setIndexedStoreAction - Indicate that the specified indexed store does or
1187 /// does not work with the specified type and indicate what to do about
1188 /// it. NOTE: All indexed mode stores are initialized to Expand in
1189 /// TargetLowering.cpp
1190 void setIndexedStoreAction(unsigned IdxMode, MVT VT,
1191 LegalizeAction Action) {
1192 assert(VT < MVT::LAST_VALUETYPE && IdxMode < ISD::LAST_INDEXED_MODE &&
1193 (unsigned)Action < 0xf && "Table isn't big enough!");
1194 // Store action are kept in the lower half.
1195 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0x0f;
1196 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action);
1199 /// setCondCodeAction - Indicate that the specified condition code is or isn't
1200 /// supported on the target and indicate what to do about it.
1201 void setCondCodeAction(ISD::CondCode CC, MVT VT,
1202 LegalizeAction Action) {
1203 assert(VT < MVT::LAST_VALUETYPE &&
1204 (unsigned)CC < array_lengthof(CondCodeActions) &&
1205 "Table isn't big enough!");
1206 /// The lower 5 bits of the SimpleTy index into Nth 2bit set from the 64bit
1207 /// value and the upper 27 bits index into the second dimension of the
1208 /// array to select what 64bit value to use.
1209 CondCodeActions[(unsigned)CC][VT.SimpleTy >> 5]
1210 &= ~(uint64_t(3UL) << (VT.SimpleTy & 0x1F)*2);
1211 CondCodeActions[(unsigned)CC][VT.SimpleTy >> 5]
1212 |= (uint64_t)Action << (VT.SimpleTy & 0x1F)*2;
1215 /// AddPromotedToType - If Opc/OrigVT is specified as being promoted, the
1216 /// promotion code defaults to trying a larger integer/fp until it can find
1217 /// one that works. If that default is insufficient, this method can be used
1218 /// by the target to override the default.
1219 void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
1220 PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy;
1223 /// setTargetDAGCombine - Targets should invoke this method for each target
1224 /// independent node that they want to provide a custom DAG combiner for by
1225 /// implementing the PerformDAGCombine virtual method.
1226 void setTargetDAGCombine(ISD::NodeType NT) {
1227 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
1228 TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7);
1231 /// setJumpBufSize - Set the target's required jmp_buf buffer size (in
1232 /// bytes); default is 200
1233 void setJumpBufSize(unsigned Size) {
1237 /// setJumpBufAlignment - Set the target's required jmp_buf buffer
1238 /// alignment (in bytes); default is 0
1239 void setJumpBufAlignment(unsigned Align) {
1240 JumpBufAlignment = Align;
1243 /// setMinFunctionAlignment - Set the target's minimum function alignment (in
1245 void setMinFunctionAlignment(unsigned Align) {
1246 MinFunctionAlignment = Align;
1249 /// setPrefFunctionAlignment - Set the target's preferred function alignment.
1250 /// This should be set if there is a performance benefit to
1251 /// higher-than-minimum alignment (in log2(bytes))
1252 void setPrefFunctionAlignment(unsigned Align) {
1253 PrefFunctionAlignment = Align;
1256 /// setPrefLoopAlignment - Set the target's preferred loop alignment. Default
1257 /// alignment is zero, it means the target does not care about loop alignment.
1258 /// The alignment is specified in log2(bytes).
1259 void setPrefLoopAlignment(unsigned Align) {
1260 PrefLoopAlignment = Align;
1263 /// setMinStackArgumentAlignment - Set the minimum stack alignment of an
1264 /// argument (in log2(bytes)).
1265 void setMinStackArgumentAlignment(unsigned Align) {
1266 MinStackArgumentAlignment = Align;
1269 /// setShouldFoldAtomicFences - Set if the target's implementation of the
1270 /// atomic operation intrinsics includes locking. Default is false.
1271 void setShouldFoldAtomicFences(bool fold) {
1272 ShouldFoldAtomicFences = fold;
1275 /// setInsertFencesForAtomic - Set if the DAG builder should
1276 /// automatically insert fences and reduce the order of atomic memory
1277 /// operations to Monotonic.
1278 void setInsertFencesForAtomic(bool fence) {
1279 InsertFencesForAtomic = fence;
1283 //===--------------------------------------------------------------------===//
1284 // Lowering methods - These methods must be implemented by targets so that
1285 // the SelectionDAGBuilder code knows how to lower these.
1288 /// LowerFormalArguments - This hook must be implemented to lower the
1289 /// incoming (formal) arguments, described by the Ins array, into the
1290 /// specified DAG. The implementation should fill in the InVals array
1291 /// with legal-type argument values, and return the resulting token
1295 LowerFormalArguments(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
1297 const SmallVectorImpl<ISD::InputArg> &/*Ins*/,
1298 DebugLoc /*dl*/, SelectionDAG &/*DAG*/,
1299 SmallVectorImpl<SDValue> &/*InVals*/) const {
1300 llvm_unreachable("Not Implemented");
1303 struct ArgListEntry {
1314 ArgListEntry() : isSExt(false), isZExt(false), isInReg(false),
1315 isSRet(false), isNest(false), isByVal(false), Alignment(0) { }
1317 typedef std::vector<ArgListEntry> ArgListTy;
1319 /// CallLoweringInfo - This structure contains all information that is
1320 /// necessary for lowering calls. It is passed to TLI::LowerCallTo when the
1321 /// SelectionDAG builder needs to lower a call, and targets will see this
1322 /// struct in their LowerCall implementation.
1323 struct CallLoweringInfo {
1330 bool DoesNotReturn : 1;
1331 bool IsReturnValueUsed : 1;
1333 // IsTailCall should be modified by implementations of
1334 // TargetLowering::LowerCall that perform tail call conversions.
1337 unsigned NumFixedArgs;
1338 CallingConv::ID CallConv;
1343 ImmutableCallSite *CS;
1344 SmallVector<ISD::OutputArg, 32> Outs;
1345 SmallVector<SDValue, 32> OutVals;
1346 SmallVector<ISD::InputArg, 32> Ins;
1349 /// CallLoweringInfo - Constructs a call lowering context based on the
1350 /// ImmutableCallSite \p cs.
1351 CallLoweringInfo(SDValue chain, Type *retTy,
1352 FunctionType *FTy, bool isTailCall, SDValue callee,
1353 ArgListTy &args, SelectionDAG &dag, DebugLoc dl,
1354 ImmutableCallSite &cs)
1355 : Chain(chain), RetTy(retTy), RetSExt(cs.paramHasAttr(0, Attributes::SExt)),
1356 RetZExt(cs.paramHasAttr(0, Attributes::ZExt)), IsVarArg(FTy->isVarArg()),
1357 IsInReg(cs.paramHasAttr(0, Attributes::InReg)),
1358 DoesNotReturn(cs.doesNotReturn()),
1359 IsReturnValueUsed(!cs.getInstruction()->use_empty()),
1360 IsTailCall(isTailCall), NumFixedArgs(FTy->getNumParams()),
1361 CallConv(cs.getCallingConv()), Callee(callee), Args(args), DAG(dag),
1364 /// CallLoweringInfo - Constructs a call lowering context based on the
1365 /// provided call information.
1366 CallLoweringInfo(SDValue chain, Type *retTy, bool retSExt, bool retZExt,
1367 bool isVarArg, bool isInReg, unsigned numFixedArgs,
1368 CallingConv::ID callConv, bool isTailCall,
1369 bool doesNotReturn, bool isReturnValueUsed, SDValue callee,
1370 ArgListTy &args, SelectionDAG &dag, DebugLoc dl)
1371 : Chain(chain), RetTy(retTy), RetSExt(retSExt), RetZExt(retZExt),
1372 IsVarArg(isVarArg), IsInReg(isInReg), DoesNotReturn(doesNotReturn),
1373 IsReturnValueUsed(isReturnValueUsed), IsTailCall(isTailCall),
1374 NumFixedArgs(numFixedArgs), CallConv(callConv), Callee(callee),
1375 Args(args), DAG(dag), DL(dl), CS(NULL) {}
1378 /// LowerCallTo - This function lowers an abstract call to a function into an
1379 /// actual call. This returns a pair of operands. The first element is the
1380 /// return value for the function (if RetTy is not VoidTy). The second
1381 /// element is the outgoing token chain. It calls LowerCall to do the actual
1383 std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const;
1385 /// LowerCall - This hook must be implemented to lower calls into the
1386 /// the specified DAG. The outgoing arguments to the call are described
1387 /// by the Outs array, and the values to be returned by the call are
1388 /// described by the Ins array. The implementation should fill in the
1389 /// InVals array with legal-type return values from the call, and return
1390 /// the resulting token chain value.
1392 LowerCall(CallLoweringInfo &/*CLI*/,
1393 SmallVectorImpl<SDValue> &/*InVals*/) const {
1394 llvm_unreachable("Not Implemented");
1397 /// HandleByVal - Target-specific cleanup for formal ByVal parameters.
1398 virtual void HandleByVal(CCState *, unsigned &, unsigned) const {}
1400 /// CanLowerReturn - This hook should be implemented to check whether the
1401 /// return values described by the Outs array can fit into the return
1402 /// registers. If false is returned, an sret-demotion is performed.
1404 virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/,
1405 MachineFunction &/*MF*/, bool /*isVarArg*/,
1406 const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
1407 LLVMContext &/*Context*/) const
1409 // Return true by default to get preexisting behavior.
1413 /// LowerReturn - This hook must be implemented to lower outgoing
1414 /// return values, described by the Outs array, into the specified
1415 /// DAG. The implementation should return the resulting token chain
1419 LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
1421 const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
1422 const SmallVectorImpl<SDValue> &/*OutVals*/,
1423 DebugLoc /*dl*/, SelectionDAG &/*DAG*/) const {
1424 llvm_unreachable("Not Implemented");
1427 /// isUsedByReturnOnly - Return true if result of the specified node is used
1428 /// by a return node only. It also compute and return the input chain for the
1430 /// This is used to determine whether it is possible
1431 /// to codegen a libcall as tail call at legalization time.
1432 virtual bool isUsedByReturnOnly(SDNode *, SDValue &Chain) const {
1436 /// mayBeEmittedAsTailCall - Return true if the target may be able emit the
1437 /// call instruction as a tail call. This is used by optimization passes to
1438 /// determine if it's profitable to duplicate return instructions to enable
1439 /// tailcall optimization.
1440 virtual bool mayBeEmittedAsTailCall(CallInst *) const {
1444 /// getTypeForExtArgOrReturn - Return the type that should be used to zero or
1445 /// sign extend a zeroext/signext integer argument or return value.
1446 /// FIXME: Most C calling convention requires the return type to be promoted,
1447 /// but this is not true all the time, e.g. i1 on x86-64. It is also not
1448 /// necessary for non-C calling conventions. The frontend should handle this
1449 /// and include all of the necessary information.
1450 virtual EVT getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
1451 ISD::NodeType /*ExtendKind*/) const {
1452 EVT MinVT = getRegisterType(Context, MVT::i32);
1453 return VT.bitsLT(MinVT) ? MinVT : VT;
1456 /// LowerOperationWrapper - This callback is invoked by the type legalizer
1457 /// to legalize nodes with an illegal operand type but legal result types.
1458 /// It replaces the LowerOperation callback in the type Legalizer.
1459 /// The reason we can not do away with LowerOperation entirely is that
1460 /// LegalizeDAG isn't yet ready to use this callback.
1461 /// TODO: Consider merging with ReplaceNodeResults.
1463 /// The target places new result values for the node in Results (their number
1464 /// and types must exactly match those of the original return values of
1465 /// the node), or leaves Results empty, which indicates that the node is not
1466 /// to be custom lowered after all.
1467 /// The default implementation calls LowerOperation.
1468 virtual void LowerOperationWrapper(SDNode *N,
1469 SmallVectorImpl<SDValue> &Results,
1470 SelectionDAG &DAG) const;
1472 /// LowerOperation - This callback is invoked for operations that are
1473 /// unsupported by the target, which are registered to use 'custom' lowering,
1474 /// and whose defined values are all legal.
1475 /// If the target has no operations that require custom lowering, it need not
1476 /// implement this. The default implementation of this aborts.
1477 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
1479 /// ReplaceNodeResults - This callback is invoked when a node result type is
1480 /// illegal for the target, and the operation was registered to use 'custom'
1481 /// lowering for that result type. The target places new result values for
1482 /// the node in Results (their number and types must exactly match those of
1483 /// the original return values of the node), or leaves Results empty, which
1484 /// indicates that the node is not to be custom lowered after all.
1486 /// If the target has no operations that require custom lowering, it need not
1487 /// implement this. The default implementation aborts.
1488 virtual void ReplaceNodeResults(SDNode * /*N*/,
1489 SmallVectorImpl<SDValue> &/*Results*/,
1490 SelectionDAG &/*DAG*/) const {
1491 llvm_unreachable("ReplaceNodeResults not implemented for this target!");
1494 /// getTargetNodeName() - This method returns the name of a target specific
1496 virtual const char *getTargetNodeName(unsigned Opcode) const;
1498 /// createFastISel - This method returns a target specific FastISel object,
1499 /// or null if the target does not support "fast" ISel.
1500 virtual FastISel *createFastISel(FunctionLoweringInfo &,
1501 const TargetLibraryInfo *) const {
1505 //===--------------------------------------------------------------------===//
1506 // Inline Asm Support hooks
1509 /// ExpandInlineAsm - This hook allows the target to expand an inline asm
1510 /// call to be explicit llvm code if it wants to. This is useful for
1511 /// turning simple inline asms into LLVM intrinsics, which gives the
1512 /// compiler more information about the behavior of the code.
1513 virtual bool ExpandInlineAsm(CallInst *) const {
1517 enum ConstraintType {
1518 C_Register, // Constraint represents specific register(s).
1519 C_RegisterClass, // Constraint represents any of register(s) in class.
1520 C_Memory, // Memory constraint.
1521 C_Other, // Something else.
1522 C_Unknown // Unsupported constraint.
1525 enum ConstraintWeight {
1527 CW_Invalid = -1, // No match.
1528 CW_Okay = 0, // Acceptable.
1529 CW_Good = 1, // Good weight.
1530 CW_Better = 2, // Better weight.
1531 CW_Best = 3, // Best weight.
1533 // Well-known weights.
1534 CW_SpecificReg = CW_Okay, // Specific register operands.
1535 CW_Register = CW_Good, // Register operands.
1536 CW_Memory = CW_Better, // Memory operands.
1537 CW_Constant = CW_Best, // Constant operand.
1538 CW_Default = CW_Okay // Default or don't know type.
1541 /// AsmOperandInfo - This contains information for each constraint that we are
1543 struct AsmOperandInfo : public InlineAsm::ConstraintInfo {
1544 /// ConstraintCode - This contains the actual string for the code, like "m".
1545 /// TargetLowering picks the 'best' code from ConstraintInfo::Codes that
1546 /// most closely matches the operand.
1547 std::string ConstraintCode;
1549 /// ConstraintType - Information about the constraint code, e.g. Register,
1550 /// RegisterClass, Memory, Other, Unknown.
1551 TargetLowering::ConstraintType ConstraintType;
1553 /// CallOperandval - If this is the result output operand or a
1554 /// clobber, this is null, otherwise it is the incoming operand to the
1555 /// CallInst. This gets modified as the asm is processed.
1556 Value *CallOperandVal;
1558 /// ConstraintVT - The ValueType for the operand value.
1561 /// isMatchingInputConstraint - Return true of this is an input operand that
1562 /// is a matching constraint like "4".
1563 bool isMatchingInputConstraint() const;
1565 /// getMatchedOperand - If this is an input matching constraint, this method
1566 /// returns the output operand it matches.
1567 unsigned getMatchedOperand() const;
1569 /// Copy constructor for copying from an AsmOperandInfo.
1570 AsmOperandInfo(const AsmOperandInfo &info)
1571 : InlineAsm::ConstraintInfo(info),
1572 ConstraintCode(info.ConstraintCode),
1573 ConstraintType(info.ConstraintType),
1574 CallOperandVal(info.CallOperandVal),
1575 ConstraintVT(info.ConstraintVT) {
1578 /// Copy constructor for copying from a ConstraintInfo.
1579 AsmOperandInfo(const InlineAsm::ConstraintInfo &info)
1580 : InlineAsm::ConstraintInfo(info),
1581 ConstraintType(TargetLowering::C_Unknown),
1582 CallOperandVal(0), ConstraintVT(MVT::Other) {
1586 typedef std::vector<AsmOperandInfo> AsmOperandInfoVector;
1588 /// ParseConstraints - Split up the constraint string from the inline
1589 /// assembly value into the specific constraints and their prefixes,
1590 /// and also tie in the associated operand values.
1591 /// If this returns an empty vector, and if the constraint string itself
1592 /// isn't empty, there was an error parsing.
1593 virtual AsmOperandInfoVector ParseConstraints(ImmutableCallSite CS) const;
1595 /// Examine constraint type and operand type and determine a weight value.
1596 /// The operand object must already have been set up with the operand type.
1597 virtual ConstraintWeight getMultipleConstraintMatchWeight(
1598 AsmOperandInfo &info, int maIndex) const;
1600 /// Examine constraint string and operand type and determine a weight value.
1601 /// The operand object must already have been set up with the operand type.
1602 virtual ConstraintWeight getSingleConstraintMatchWeight(
1603 AsmOperandInfo &info, const char *constraint) const;
1605 /// ComputeConstraintToUse - Determines the constraint code and constraint
1606 /// type to use for the specific AsmOperandInfo, setting
1607 /// OpInfo.ConstraintCode and OpInfo.ConstraintType. If the actual operand
1608 /// being passed in is available, it can be passed in as Op, otherwise an
1609 /// empty SDValue can be passed.
1610 virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
1612 SelectionDAG *DAG = 0) const;
1614 /// getConstraintType - Given a constraint, return the type of constraint it
1615 /// is for this target.
1616 virtual ConstraintType getConstraintType(const std::string &Constraint) const;
1618 /// getRegForInlineAsmConstraint - Given a physical register constraint (e.g.
1619 /// {edx}), return the register number and the register class for the
1622 /// Given a register class constraint, like 'r', if this corresponds directly
1623 /// to an LLVM register class, return a register of 0 and the register class
1626 /// This should only be used for C_Register constraints. On error,
1627 /// this returns a register number of 0 and a null register class pointer..
1628 virtual std::pair<unsigned, const TargetRegisterClass*>
1629 getRegForInlineAsmConstraint(const std::string &Constraint,
1632 /// LowerXConstraint - try to replace an X constraint, which matches anything,
1633 /// with another that has more specific requirements based on the type of the
1634 /// corresponding operand. This returns null if there is no replacement to
1636 virtual const char *LowerXConstraint(EVT ConstraintVT) const;
1638 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
1639 /// vector. If it is invalid, don't add anything to Ops.
1640 virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
1641 std::vector<SDValue> &Ops,
1642 SelectionDAG &DAG) const;
1644 //===--------------------------------------------------------------------===//
1645 // Instruction Emitting Hooks
1648 // EmitInstrWithCustomInserter - This method should be implemented by targets
1649 // that mark instructions with the 'usesCustomInserter' flag. These
1650 // instructions are special in various ways, which require special support to
1651 // insert. The specified MachineInstr is created but not inserted into any
1652 // basic blocks, and this method is called to expand it into a sequence of
1653 // instructions, potentially also creating new basic blocks and control flow.
1654 virtual MachineBasicBlock *
1655 EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const;
1657 /// AdjustInstrPostInstrSelection - This method should be implemented by
1658 /// targets that mark instructions with the 'hasPostISelHook' flag. These
1659 /// instructions must be adjusted after instruction selection by target hooks.
1660 /// e.g. To fill in optional defs for ARM 's' setting instructions.
1662 AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const;
1664 //===--------------------------------------------------------------------===//
1665 // Addressing mode description hooks (used by LSR etc).
1668 /// GetAddrModeArguments - CodeGenPrepare sinks address calculations into the
1669 /// same BB as Load/Store instructions reading the address. This allows as
1670 /// much computation as possible to be done in the address mode for that
1671 /// operand. This hook lets targets also pass back when this should be done
1672 /// on intrinsics which load/store.
1673 virtual bool GetAddrModeArguments(IntrinsicInst *I,
1674 SmallVectorImpl<Value*> &Ops,
1675 Type *&AccessTy) const {
1679 /// isLegalAddressingMode - Return true if the addressing mode represented by
1680 /// AM is legal for this target, for a load/store of the specified type.
1681 /// The type may be VoidTy, in which case only return true if the addressing
1682 /// mode is legal for a load/store of any legal type.
1683 /// TODO: Handle pre/postinc as well.
1684 virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const;
1686 /// isLegalICmpImmediate - Return true if the specified immediate is legal
1687 /// icmp immediate, that is the target has icmp instructions which can compare
1688 /// a register against the immediate without having to materialize the
1689 /// immediate into a register.
1690 virtual bool isLegalICmpImmediate(int64_t) const {
1694 /// isLegalAddImmediate - Return true if the specified immediate is legal
1695 /// add immediate, that is the target has add instructions which can add
1696 /// a register with the immediate without having to materialize the
1697 /// immediate into a register.
1698 virtual bool isLegalAddImmediate(int64_t) const {
1702 /// isTruncateFree - Return true if it's free to truncate a value of
1703 /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in
1704 /// register EAX to i16 by referencing its sub-register AX.
1705 virtual bool isTruncateFree(Type * /*Ty1*/, Type * /*Ty2*/) const {
1709 virtual bool isTruncateFree(EVT /*VT1*/, EVT /*VT2*/) const {
1713 /// isZExtFree - Return true if any actual instruction that defines a
1714 /// value of type Ty1 implicitly zero-extends the value to Ty2 in the result
1715 /// register. This does not necessarily include registers defined in
1716 /// unknown ways, such as incoming arguments, or copies from unknown
1717 /// virtual registers. Also, if isTruncateFree(Ty2, Ty1) is true, this
1718 /// does not necessarily apply to truncate instructions. e.g. on x86-64,
1719 /// all instructions that define 32-bit values implicit zero-extend the
1720 /// result out to 64 bits.
1721 virtual bool isZExtFree(Type * /*Ty1*/, Type * /*Ty2*/) const {
1725 virtual bool isZExtFree(EVT /*VT1*/, EVT /*VT2*/) const {
1729 /// isZExtFree - Return true if zero-extending the specific node Val to type
1730 /// VT2 is free (either because it's implicitly zero-extended such as ARM
1731 /// ldrb / ldrh or because it's folded such as X86 zero-extending loads).
1732 virtual bool isZExtFree(SDValue Val, EVT VT2) const {
1733 return isZExtFree(Val.getValueType(), VT2);
1736 /// isFNegFree - Return true if an fneg operation is free to the point where
1737 /// it is never worthwhile to replace it with a bitwise operation.
1738 virtual bool isFNegFree(EVT) const {
1742 /// isFAbsFree - Return true if an fneg operation is free to the point where
1743 /// it is never worthwhile to replace it with a bitwise operation.
1744 virtual bool isFAbsFree(EVT) const {
1748 /// isFMAFasterThanMulAndAdd - Return true if an FMA operation is faster than
1749 /// a pair of mul and add instructions. fmuladd intrinsics will be expanded to
1750 /// FMAs when this method returns true (and FMAs are legal), otherwise fmuladd
1751 /// is expanded to mul + add.
1752 virtual bool isFMAFasterThanMulAndAdd(EVT) const {
1756 /// isNarrowingProfitable - Return true if it's profitable to narrow
1757 /// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow
1758 /// from i32 to i8 but not from i32 to i16.
1759 virtual bool isNarrowingProfitable(EVT /*VT1*/, EVT /*VT2*/) const {
1763 //===--------------------------------------------------------------------===//
1764 // Div utility functions
1766 SDValue BuildExactSDIV(SDValue Op1, SDValue Op2, DebugLoc dl,
1767 SelectionDAG &DAG) const;
1768 SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
1769 std::vector<SDNode*> *Created) const;
1770 SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
1771 std::vector<SDNode*> *Created) const;
1774 //===--------------------------------------------------------------------===//
1775 // Runtime Library hooks
1778 /// setLibcallName - Rename the default libcall routine name for the specified
1780 void setLibcallName(RTLIB::Libcall Call, const char *Name) {
1781 LibcallRoutineNames[Call] = Name;
1784 /// getLibcallName - Get the libcall routine name for the specified libcall.
1786 const char *getLibcallName(RTLIB::Libcall Call) const {
1787 return LibcallRoutineNames[Call];
1790 /// setCmpLibcallCC - Override the default CondCode to be used to test the
1791 /// result of the comparison libcall against zero.
1792 void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) {
1793 CmpLibcallCCs[Call] = CC;
1796 /// getCmpLibcallCC - Get the CondCode that's to be used to test the result of
1797 /// the comparison libcall against zero.
1798 ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const {
1799 return CmpLibcallCCs[Call];
1802 /// setLibcallCallingConv - Set the CallingConv that should be used for the
1803 /// specified libcall.
1804 void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) {
1805 LibcallCallingConvs[Call] = CC;
1808 /// getLibcallCallingConv - Get the CallingConv that should be used for the
1809 /// specified libcall.
1810 CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const {
1811 return LibcallCallingConvs[Call];
1815 const TargetMachine &TM;
1816 const DataLayout *TD;
1817 const TargetLoweringObjectFile &TLOF;
1819 /// PointerTy - The type to use for pointers for the default address space,
1820 /// usually i32 or i64.
1824 /// IsLittleEndian - True if this is a little endian target.
1826 bool IsLittleEndian;
1828 /// SelectIsExpensive - Tells the code generator not to expand operations
1829 /// into sequences that use the select operations if possible.
1830 bool SelectIsExpensive;
1832 /// IntDivIsCheap - Tells the code generator not to expand integer divides by
1833 /// constants into a sequence of muls, adds, and shifts. This is a hack until
1834 /// a real cost model is in place. If we ever optimize for size, this will be
1835 /// set to true unconditionally.
1838 /// BypassSlowDivMap - Tells the code generator to bypass slow divide or
1839 /// remainder instructions. For example, BypassSlowDivWidths[32,8] tells the
1840 /// code generator to bypass 32-bit integer div/rem with an 8-bit unsigned
1841 /// integer div/rem when the operands are positive and less than 256.
1842 DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;
1844 /// Pow2DivIsCheap - Tells the code generator that it shouldn't generate
1845 /// srl/add/sra for a signed divide by power of two, and let the target handle
1847 bool Pow2DivIsCheap;
1849 /// JumpIsExpensive - Tells the code generator that it shouldn't generate
1850 /// extra flow control instructions and should attempt to combine flow
1851 /// control instructions via predication.
1852 bool JumpIsExpensive;
1854 /// UseUnderscoreSetJmp - This target prefers to use _setjmp to implement
1855 /// llvm.setjmp. Defaults to false.
1856 bool UseUnderscoreSetJmp;
1858 /// UseUnderscoreLongJmp - This target prefers to use _longjmp to implement
1859 /// llvm.longjmp. Defaults to false.
1860 bool UseUnderscoreLongJmp;
1862 /// SupportJumpTables - Whether the target can generate code for jumptables.
1863 /// If it's not true, then each jumptable must be lowered into if-then-else's.
1864 bool SupportJumpTables;
1866 /// MinimumJumpTableEntries - Number of blocks threshold to use jump tables.
1867 int MinimumJumpTableEntries;
1869 /// BooleanContents - Information about the contents of the high-bits in
1870 /// boolean values held in a type wider than i1. See getBooleanContents.
1871 BooleanContent BooleanContents;
1872 /// BooleanVectorContents - Information about the contents of the high-bits
1873 /// in boolean vector values when the element type is wider than i1. See
1874 /// getBooleanContents.
1875 BooleanContent BooleanVectorContents;
1877 /// SchedPreferenceInfo - The target scheduling preference: shortest possible
1878 /// total cycles or lowest register usage.
1879 Sched::Preference SchedPreferenceInfo;
1881 /// JumpBufSize - The size, in bytes, of the target's jmp_buf buffers
1882 unsigned JumpBufSize;
1884 /// JumpBufAlignment - The alignment, in bytes, of the target's jmp_buf
1886 unsigned JumpBufAlignment;
1888 /// MinStackArgumentAlignment - The minimum alignment that any argument
1889 /// on the stack needs to have.
1891 unsigned MinStackArgumentAlignment;
1893 /// MinFunctionAlignment - The minimum function alignment (used when
1894 /// optimizing for size, and to prevent explicitly provided alignment
1895 /// from leading to incorrect code).
1897 unsigned MinFunctionAlignment;
1899 /// PrefFunctionAlignment - The preferred function alignment (used when
1900 /// alignment unspecified and optimizing for speed).
1902 unsigned PrefFunctionAlignment;
1904 /// PrefLoopAlignment - The preferred loop alignment.
1906 unsigned PrefLoopAlignment;
1908 /// ShouldFoldAtomicFences - Whether fencing MEMBARRIER instructions should
1909 /// be folded into the enclosed atomic intrinsic instruction by the
1911 bool ShouldFoldAtomicFences;
1913 /// InsertFencesForAtomic - Whether the DAG builder should automatically
1914 /// insert fences and reduce ordering for atomics. (This will be set for
1915 /// for most architectures with weak memory ordering.)
1916 bool InsertFencesForAtomic;
1918 /// StackPointerRegisterToSaveRestore - If set to a physical register, this
1919 /// specifies the register that llvm.savestack/llvm.restorestack should save
1921 unsigned StackPointerRegisterToSaveRestore;
1923 /// ExceptionPointerRegister - If set to a physical register, this specifies
1924 /// the register that receives the exception address on entry to a landing
1926 unsigned ExceptionPointerRegister;
1928 /// ExceptionSelectorRegister - If set to a physical register, this specifies
1929 /// the register that receives the exception typeid on entry to a landing
1931 unsigned ExceptionSelectorRegister;
1933 /// RegClassForVT - This indicates the default register class to use for
1934 /// each ValueType the target supports natively.
1935 const TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE];
1936 unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE];
1937 EVT RegisterTypeForVT[MVT::LAST_VALUETYPE];
1939 /// RepRegClassForVT - This indicates the "representative" register class to
1940 /// use for each ValueType the target supports natively. This information is
1941 /// used by the scheduler to track register pressure. By default, the
1942 /// representative register class is the largest legal super-reg register
1943 /// class of the register class of the specified type. e.g. On x86, i8, i16,
1944 /// and i32's representative class would be GR32.
1945 const TargetRegisterClass *RepRegClassForVT[MVT::LAST_VALUETYPE];
1947 /// RepRegClassCostForVT - This indicates the "cost" of the "representative"
1948 /// register class for each ValueType. The cost is used by the scheduler to
1949 /// approximate register pressure.
1950 uint8_t RepRegClassCostForVT[MVT::LAST_VALUETYPE];
1952 /// TransformToType - For any value types we are promoting or expanding, this
1953 /// contains the value type that we are changing to. For Expanded types, this
1954 /// contains one step of the expand (e.g. i64 -> i32), even if there are
1955 /// multiple steps required (e.g. i64 -> i16). For types natively supported
1956 /// by the system, this holds the same type (e.g. i32 -> i32).
1957 EVT TransformToType[MVT::LAST_VALUETYPE];
1959 /// OpActions - For each operation and each value type, keep a LegalizeAction
1960 /// that indicates how instruction selection should deal with the operation.
1961 /// Most operations are Legal (aka, supported natively by the target), but
1962 /// operations that are not should be described. Note that operations on
1963 /// non-legal value types are not described here.
1964 uint8_t OpActions[MVT::LAST_VALUETYPE][ISD::BUILTIN_OP_END];
1966 /// LoadExtActions - For each load extension type and each value type,
1967 /// keep a LegalizeAction that indicates how instruction selection should deal
1968 /// with a load of a specific value type and extension type.
1969 uint8_t LoadExtActions[MVT::LAST_VALUETYPE][ISD::LAST_LOADEXT_TYPE];
1971 /// TruncStoreActions - For each value type pair keep a LegalizeAction that
1972 /// indicates whether a truncating store of a specific value type and
1973 /// truncating type is legal.
1974 uint8_t TruncStoreActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE];
1976 /// IndexedModeActions - For each indexed mode and each value type,
1977 /// keep a pair of LegalizeAction that indicates how instruction
1978 /// selection should deal with the load / store. The first dimension is the
1979 /// value_type for the reference. The second dimension represents the various
1980 /// modes for load store.
1981 uint8_t IndexedModeActions[MVT::LAST_VALUETYPE][ISD::LAST_INDEXED_MODE];
1983 /// CondCodeActions - For each condition code (ISD::CondCode) keep a
1984 /// LegalizeAction that indicates how instruction selection should
1985 /// deal with the condition code.
1986 /// Because each CC action takes up 2 bits, we need to have the array size
1987 /// be large enough to fit all of the value types. This can be done by
1988 /// dividing the MVT::LAST_VALUETYPE by 32 and adding one.
1989 uint64_t CondCodeActions[ISD::SETCC_INVALID][(MVT::LAST_VALUETYPE / 32) + 1];
1991 ValueTypeActionImpl ValueTypeActions;
1995 getTypeConversion(LLVMContext &Context, EVT VT) const {
1996 // If this is a simple type, use the ComputeRegisterProp mechanism.
1997 if (VT.isSimple()) {
1998 assert((unsigned)VT.getSimpleVT().SimpleTy <
1999 array_lengthof(TransformToType));
2000 EVT NVT = TransformToType[VT.getSimpleVT().SimpleTy];
2001 LegalizeTypeAction LA = ValueTypeActions.getTypeAction(VT.getSimpleVT());
2004 (!(NVT.isSimple() && LA != TypeLegal) ||
2005 ValueTypeActions.getTypeAction(NVT.getSimpleVT()) != TypePromoteInteger)
2006 && "Promote may not follow Expand or Promote");
2008 if (LA == TypeSplitVector)
2009 NVT = EVT::getVectorVT(Context, VT.getVectorElementType(),
2010 VT.getVectorNumElements() / 2);
2011 return LegalizeKind(LA, NVT);
2014 // Handle Extended Scalar Types.
2015 if (!VT.isVector()) {
2016 assert(VT.isInteger() && "Float types must be simple");
2017 unsigned BitSize = VT.getSizeInBits();
2018 // First promote to a power-of-two size, then expand if necessary.
2019 if (BitSize < 8 || !isPowerOf2_32(BitSize)) {
2020 EVT NVT = VT.getRoundIntegerType(Context);
2021 assert(NVT != VT && "Unable to round integer VT");
2022 LegalizeKind NextStep = getTypeConversion(Context, NVT);
2023 // Avoid multi-step promotion.
2024 if (NextStep.first == TypePromoteInteger) return NextStep;
2025 // Return rounded integer type.
2026 return LegalizeKind(TypePromoteInteger, NVT);
2029 return LegalizeKind(TypeExpandInteger,
2030 EVT::getIntegerVT(Context, VT.getSizeInBits()/2));
2033 // Handle vector types.
2034 unsigned NumElts = VT.getVectorNumElements();
2035 EVT EltVT = VT.getVectorElementType();
2037 // Vectors with only one element are always scalarized.
2039 return LegalizeKind(TypeScalarizeVector, EltVT);
2041 // Try to widen vector elements until a legal type is found.
2042 if (EltVT.isInteger()) {
2043 // Vectors with a number of elements that is not a power of two are always
2044 // widened, for example <3 x float> -> <4 x float>.
2045 if (!VT.isPow2VectorType()) {
2046 NumElts = (unsigned)NextPowerOf2(NumElts);
2047 EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts);
2048 return LegalizeKind(TypeWidenVector, NVT);
2051 // Examine the element type.
2052 LegalizeKind LK = getTypeConversion(Context, EltVT);
2054 // If type is to be expanded, split the vector.
2055 // <4 x i140> -> <2 x i140>
2056 if (LK.first == TypeExpandInteger)
2057 return LegalizeKind(TypeSplitVector,
2058 EVT::getVectorVT(Context, EltVT, NumElts / 2));
2060 // Promote the integer element types until a legal vector type is found
2061 // or until the element integer type is too big. If a legal type was not
2062 // found, fallback to the usual mechanism of widening/splitting the
2065 // Increase the bitwidth of the element to the next pow-of-two
2066 // (which is greater than 8 bits).
2067 EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits()
2068 ).getRoundIntegerType(Context);
2070 // Stop trying when getting a non-simple element type.
2071 // Note that vector elements may be greater than legal vector element
2072 // types. Example: X86 XMM registers hold 64bit element on 32bit systems.
2073 if (!EltVT.isSimple()) break;
2075 // Build a new vector type and check if it is legal.
2076 MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
2077 // Found a legal promoted vector type.
2078 if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal)
2079 return LegalizeKind(TypePromoteInteger,
2080 EVT::getVectorVT(Context, EltVT, NumElts));
2084 // Try to widen the vector until a legal type is found.
2085 // If there is no wider legal type, split the vector.
2087 // Round up to the next power of 2.
2088 NumElts = (unsigned)NextPowerOf2(NumElts);
2090 // If there is no simple vector type with this many elements then there
2091 // cannot be a larger legal vector type. Note that this assumes that
2092 // there are no skipped intermediate vector types in the simple types.
2093 if (!EltVT.isSimple()) break;
2094 MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
2095 if (LargerVector == MVT()) break;
2097 // If this type is legal then widen the vector.
2098 if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal)
2099 return LegalizeKind(TypeWidenVector, LargerVector);
2102 // Widen odd vectors to next power of two.
2103 if (!VT.isPow2VectorType()) {
2104 EVT NVT = VT.getPow2VectorType(Context);
2105 return LegalizeKind(TypeWidenVector, NVT);
2108 // Vectors with illegal element types are expanded.
2109 EVT NVT = EVT::getVectorVT(Context, EltVT, VT.getVectorNumElements() / 2);
2110 return LegalizeKind(TypeSplitVector, NVT);
2114 std::vector<std::pair<EVT, const TargetRegisterClass*> > AvailableRegClasses;
2116 /// TargetDAGCombineArray - Targets can specify ISD nodes that they would
2117 /// like PerformDAGCombine callbacks for by calling setTargetDAGCombine(),
2118 /// which sets a bit in this array.
2120 TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT];
2122 /// PromoteToType - For operations that must be promoted to a specific type,
2123 /// this holds the destination type. This map should be sparse, so don't hold
2126 /// Targets add entries to this map with AddPromotedToType(..), clients access
2127 /// this with getTypeToPromoteTo(..).
2128 std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType>
2131 /// LibcallRoutineNames - Stores the name each libcall.
2133 const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL];
2135 /// CmpLibcallCCs - The ISD::CondCode that should be used to test the result
2136 /// of each of the comparison libcall against zero.
2137 ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL];
2139 /// LibcallCallingConvs - Stores the CallingConv that should be used for each
2141 CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL];
2144 /// When lowering \@llvm.memset this field specifies the maximum number of
2145 /// store operations that may be substituted for the call to memset. Targets
2146 /// must set this value based on the cost threshold for that target. Targets
2147 /// should assume that the memset will be done using as many of the largest
2148 /// store operations first, followed by smaller ones, if necessary, per
2149 /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine
2150 /// with 16-bit alignment would result in four 2-byte stores and one 1-byte
2151 /// store. This only applies to setting a constant array of a constant size.
2152 /// @brief Specify maximum number of store instructions per memset call.
2153 unsigned maxStoresPerMemset;
2155 /// Maximum number of stores operations that may be substituted for the call
2156 /// to memset, used for functions with OptSize attribute.
2157 unsigned maxStoresPerMemsetOptSize;
2159 /// When lowering \@llvm.memcpy this field specifies the maximum number of
2160 /// store operations that may be substituted for a call to memcpy. Targets
2161 /// must set this value based on the cost threshold for that target. Targets
2162 /// should assume that the memcpy will be done using as many of the largest
2163 /// store operations first, followed by smaller ones, if necessary, per
2164 /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine
2165 /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store
2166 /// and one 1-byte store. This only applies to copying a constant array of
2168 /// @brief Specify maximum bytes of store instructions per memcpy call.
2169 unsigned maxStoresPerMemcpy;
2171 /// Maximum number of store operations that may be substituted for a call
2172 /// to memcpy, used for functions with OptSize attribute.
2173 unsigned maxStoresPerMemcpyOptSize;
2175 /// When lowering \@llvm.memmove this field specifies the maximum number of
2176 /// store instructions that may be substituted for a call to memmove. Targets
2177 /// must set this value based on the cost threshold for that target. Targets
2178 /// should assume that the memmove will be done using as many of the largest
2179 /// store operations first, followed by smaller ones, if necessary, per
2180 /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine
2181 /// with 8-bit alignment would result in nine 1-byte stores. This only
2182 /// applies to copying a constant array of constant size.
2183 /// @brief Specify maximum bytes of store instructions per memmove call.
2184 unsigned maxStoresPerMemmove;
2186 /// Maximum number of store instructions that may be substituted for a call
2187 /// to memmove, used for functions with OpSize attribute.
2188 unsigned maxStoresPerMemmoveOptSize;
2190 /// This field specifies whether the target can benefit from code placement
2192 bool benefitFromCodePlacementOpt;
2194 /// predictableSelectIsExpensive - Tells the code generator that select is
2195 /// more expensive than a branch if the branch is usually predicted right.
2196 bool predictableSelectIsExpensive;
2199 /// isLegalRC - Return true if the value types that can be represented by the
2200 /// specified register class are all legal.
2201 bool isLegalRC(const TargetRegisterClass *RC) const;
2204 /// GetReturnInfo - Given an LLVM IR type and return type attributes,
2205 /// compute the return value EVTs and flags, and optionally also
2206 /// the offsets, if the return value is being lowered to memory.
2207 void GetReturnInfo(Type* ReturnType, Attributes attr,
2208 SmallVectorImpl<ISD::OutputArg> &Outs,
2209 const TargetLowering &TLI);
2211 } // end llvm namespace